anoopsjohn commented on a change in pull request #2167:
URL: https://github.com/apache/hbase/pull/2167#discussion_r464174854



##########
File path: 
hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
##########
@@ -222,6 +222,7 @@ public RegionLocator getRegionLocator() {
       private final Map<byte[], WriterLength> writers = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
       private final Map<byte[], byte[]> previousRows = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
       private final long now = EnvironmentEdgeManager.currentTime();
+      private byte[] tableNameBytes = Bytes.toBytes(writeTableNames);

Review comment:
       Should we do this under writeMultipleTables check?
   private byte[] tableNameBytes = (writeMultipleTables)? null: 
Bytes.toBytes(writeTableNames);

##########
File path: 
hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
##########
@@ -274,39 +270,36 @@ public void write(ImmutableBytesWritable row, V cell) 
throws IOException {
 
         // create a new WAL writer, if necessary
         if (wl == null || wl.writer == null) {
+          InetSocketAddress[] favoredNodes = null;
           if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, 
DEFAULT_LOCALITY_SENSITIVE)) {
             HRegionLocation loc = null;
-
+            String tableName = Bytes.toString(tableNameBytes);
             if (tableName != null) {
               try (Connection connection = 
ConnectionFactory.createConnection(conf);
-                     RegionLocator locator =
-                       
connection.getRegionLocator(TableName.valueOf(tableName))) {
+                RegionLocator locator = 
connection.getRegionLocator(TableName.valueOf(tableName))) {
                 loc = locator.getRegionLocation(rowKey);
               } catch (Throwable e) {
-                LOG.warn("Something wrong locating rowkey {} in {}",
-                  Bytes.toString(rowKey), tableName, e);
+                LOG.warn("Something wrong locating rowkey {} in {}", 
Bytes.toString(rowKey),
+                  tableName, e);
                 loc = null;
-              } }
-
+              }
+            }

Review comment:
       Here also.

##########
File path: 
hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
##########
@@ -274,39 +270,36 @@ public void write(ImmutableBytesWritable row, V cell) 
throws IOException {
 
         // create a new WAL writer, if necessary
         if (wl == null || wl.writer == null) {
+          InetSocketAddress[] favoredNodes = null;
           if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, 
DEFAULT_LOCALITY_SENSITIVE)) {
             HRegionLocation loc = null;
-
+            String tableName = Bytes.toString(tableNameBytes);

Review comment:
       Looks like a format issue here?

##########
File path: 
hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
##########
@@ -376,16 +369,15 @@ private WriterLength getNewWriter(byte[] tableName, 
byte[] family, Configuration
         DataBlockEncoding encoding = overriddenEncoding;
         encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) 
: encoding;
         encoding = encoding == null ? DataBlockEncoding.NONE : encoding;
-        HFileContextBuilder contextBuilder = new HFileContextBuilder()
-          
.withCompression(compression).withChecksumType(HStore.getChecksumType(conf))
-          
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blockSize)
-          .withColumnFamily(family).withTableName(tableName);
+        HFileContextBuilder contextBuilder = new 
HFileContextBuilder().withCompression(compression)

Review comment:
       Pls check format issue at these changed/added lines once.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to