[ 
https://issues.apache.org/jira/browse/HBASE-7904?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13619234#comment-13619234
 ] 

Ted Yu commented on HBASE-7904:
-------------------------------

I tried the following change:
{code}
Index: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
===================================================================
--- 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java    
    (revision 1463235)
+++ 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java    
    (working copy)
@@ -484,7 +484,7 @@
       this.rsAccounting = this.rsServices.getRegionServerAccounting();
       // don't initialize coprocessors if not running within a regionserver
       // TODO: revisit if coprocessors should load in other cases
-      this.coprocessorHost = new RegionCoprocessorHost(this, rsServices, 
baseConf);
+      this.coprocessorHost = new RegionCoprocessorHost(this, rsServices, conf);
       this.metricsRegionWrapper = new MetricsRegionWrapperImpl(this);
       this.metricsRegion = new MetricsRegion(this.metricsRegionWrapper);
     } else {
Index: 
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
===================================================================
--- 
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
      (revision 1463235)
+++ 
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
      (working copy)
@@ -121,7 +121,7 @@
     baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);

     try {
-      fs = FileSystem.get(conf);
+      fs = FileSystem.get(new Configuration(conf));
       fs.mkdirs(baseStagingDir, PERM_HIDDEN);
       fs.setPermission(baseStagingDir, PERM_HIDDEN);
       //no sticky bit in hadoop-1.0, making directory nonempty so it never 
gets erased
{code}
But I still got:
{code}
2013-04-01 14:43:38,337 ERROR [IPC Server handler 0 on 52052] 
access.SecureBulkLoadEndpoint$1(240): Failed to complete bulk load
java.lang.UnsupportedOperationException: Immutable Configuration
  at 
org.apache.hadoop.hbase.CompoundConfiguration.setClass(CompoundConfiguration.java:474)
  at org.apache.hadoop.ipc.RPC.setProtocolEngine(RPC.java:193)
  at 
org.apache.hadoop.hdfs.NameNodeProxies.createNNProxyWithClientProtocol(NameNodeProxies.java:249)
  at 
org.apache.hadoop.hdfs.NameNodeProxies.createNonHAProxy(NameNodeProxies.java:168)
  at 
org.apache.hadoop.hdfs.NameNodeProxies.createProxy(NameNodeProxies.java:129)
  at org.apache.hadoop.hdfs.DFSClient.<init>(DFSClient.java:421)
  at org.apache.hadoop.hdfs.DFSClient.<init>(DFSClient.java:388)
  at 
org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:126)
  at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2312)
  at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:87)
  at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2346)
  at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2328)
  at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:352)
  at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:164)
  at 
org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint$1.run(SecureBulkLoadEndpoint.java:224)
  at 
org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint$1.run(SecureBulkLoadEndpoint.java:218)
{code}
I also tried this:
{code}
Index: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
===================================================================
--- 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java    
    (revision 1463235)
+++ 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java    
    (working copy)
@@ -484,7 +484,7 @@
       this.rsAccounting = this.rsServices.getRegionServerAccounting();
       // don't initialize coprocessors if not running within a regionserver
       // TODO: revisit if coprocessors should load in other cases
-      this.coprocessorHost = new RegionCoprocessorHost(this, rsServices, 
baseConf);
+      this.coprocessorHost = new RegionCoprocessorHost(this, rsServices, conf);
       this.metricsRegionWrapper = new MetricsRegionWrapperImpl(this);
       this.metricsRegion = new MetricsRegion(this.metricsRegionWrapper);
     } else {
Index: 
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
===================================================================
--- 
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
      (revision 1463235)
+++ 
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
      (working copy)
@@ -31,6 +31,7 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -121,7 +122,9 @@
     baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);

     try {
-      fs = FileSystem.get(conf);
+      Configuration clone = new Configuration();
+      HBaseConfiguration.merge(clone, conf);
+      fs = FileSystem.get(clone);
       fs.mkdirs(baseStagingDir, PERM_HIDDEN);
       fs.setPermission(baseStagingDir, PERM_HIDDEN);
       //no sticky bit in hadoop-1.0, making directory nonempty so it never 
gets erased
{code}
which led to:
{code}
2013-04-01 14:58:10,084 ERROR [RS_OPEN_META-10.10.8.31,52478,1364853486544-0] 
handler.OpenRegionHandler(463): Failed open of region=.META.,,1.1028785192, 
starting to roll back the global memstore size.
java.lang.IllegalStateException: Could not instantiate a region instance.
  at org.apache.hadoop.hbase.regionserver.HRegion.newHRegion(HRegion.java:3765)
  at org.apache.hadoop.hbase.regionserver.HRegion.openHRegion(HRegion.java:4024)
  at org.apache.hadoop.hbase.regionserver.HRegion.openHRegion(HRegion.java:3975)
  at org.apache.hadoop.hbase.regionserver.HRegion.openHRegion(HRegion.java:3926)
  at 
org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler.openRegion(OpenRegionHandler.java:447)
  at 
org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler.process(OpenRegionHandler.java:129)
  at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:130)
  at 
java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:895)
  at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:918)
  at java.lang.Thread.run(Thread.java:680)
Caused by: java.lang.reflect.InvocationTargetException
  at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
  at 
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:39)
  at 
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:27)
  at java.lang.reflect.Constructor.newInstance(Constructor.java:513)
  at org.apache.hadoop.hbase.regionserver.HRegion.newHRegion(HRegion.java:3762)
  ... 9 more
Caused by: java.lang.UnsupportedOperationException: Immutable Configuration
  at 
org.apache.hadoop.hbase.CompoundConfiguration.iterator(CompoundConfiguration.java:437)
  at 
org.apache.hadoop.hbase.HBaseConfiguration.merge(HBaseConfiguration.java:134)
  at 
org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint.start(SecureBulkLoadEndpoint.java:126)
  at 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost$Environment.startup(CoprocessorHost.java:669)
  at 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost.loadInstance(CoprocessorHost.java:328)
  at 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost.loadSystemCoprocessors(CoprocessorHost.java:154)
  at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.<init>(RegionCoprocessorHost.java:145)
  at org.apache.hadoop.hbase.regionserver.HRegion.<init>(HRegion.java:487)
{code}
                
> Make mapreduce jobs pass based on 2.0.4-alpha
> ---------------------------------------------
>
>                 Key: HBASE-7904
>                 URL: https://issues.apache.org/jira/browse/HBASE-7904
>             Project: HBase
>          Issue Type: Task
>            Reporter: Ted Yu
>            Assignee: Ted Yu
>            Priority: Critical
>             Fix For: 0.95.0, 0.98.0
>
>         Attachments: 7904-addendum.txt, 7904.txt, 7904-v2-hadoop-2.0.txt, 
> 7904-v2.txt, 7904-v4-hadoop-2.0.txt, 7904-v4.txt, 7904-v4.txt, 
> 7904-v5-hadoop-2.0.txt, 7904-v5.txt, 7904-v6-hadoop-2.0.txt, 
> 7904-v7-hadoop-2.0.txt, 7904-v8-hadoop-2.0.txt, 7904-v8.txt, 
> 7904-v9-hadoop-2.0.txt, 7904-v9.txt, hbase-7904-v3.txt
>
>
> 2.0.3-alpha has been released.
> We should upgrade the dependency.

--
This message is automatically generated by JIRA.
If you think it was sent incorrectly, please contact your JIRA administrators
For more information on JIRA, see: http://www.atlassian.com/software/jira

Reply via email to