jhungund commented on code in PR #6250:
URL: https://github.com/apache/hbase/pull/6250#discussion_r1768700232


##########
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java:
##########
@@ -1626,52 +1628,33 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry 
proto) throws IOExceptio
   }
 
   private void persistChunkedBackingMap(FileOutputStream fos) throws 
IOException {
-    long numChunks = backingMap.size() / persistenceChunkSize;
-    if (backingMap.size() % persistenceChunkSize != 0) {
-      numChunks += 1;
-    }
-
     LOG.debug(
       "persistToFile: before persisting backing map size: {}, "
-        + "fullycachedFiles size: {}, chunkSize: {}, numberofChunks: {}",
-      backingMap.size(), fullyCachedFiles.size(), persistenceChunkSize, 
numChunks);
+        + "fullycachedFiles size: {}, chunkSize: {}", backingMap.size(), 
fullyCachedFiles.size(),
+      persistenceChunkSize);
 
-    BucketProtoUtils.serializeAsPB(this, fos, persistenceChunkSize, numChunks);
+    BucketProtoUtils.serializeAsPB(this, fos, persistenceChunkSize);
 
     LOG.debug(
-      "persistToFile: after persisting backing map size: {}, "
-        + "fullycachedFiles size: {}, numChunksPersisteed: {}",
-      backingMap.size(), fullyCachedFiles.size(), numChunks);
+      "persistToFile: after persisting backing map size: {}, " + 
"fullycachedFiles size: {}",
+      backingMap.size(), fullyCachedFiles.size());
   }
 
-  private void retrieveChunkedBackingMap(FileInputStream in, int[] 
bucketSizes) throws IOException {
-    byte[] bytes = new byte[Long.BYTES];
-    int readSize = in.read(bytes);
-    if (readSize != Long.BYTES) {
-      throw new IOException("Invalid size of chunk-size read from persistence: 
" + readSize);
-    }
-    long batchSize = Bytes.toLong(bytes, 0);
-
-    readSize = in.read(bytes);
-    if (readSize != Long.BYTES) {
-      throw new IOException("Invalid size for number of chunks read from 
persistence: " + readSize);
-    }
-    long numChunks = Bytes.toLong(bytes, 0);
-
-    LOG.info("Number of chunks: {}, chunk size: {}", numChunks, batchSize);
+  private void retrieveChunkedBackingMap(FileInputStream in) throws 
IOException {
 
     // Read the first chunk that has all the details.
     BucketCacheProtos.BucketCacheEntry firstChunk =
       BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in);
     parseFirstChunk(firstChunk);
 
     // Subsequent chunks have the backingMap entries.
-    for (int i = 1; i < numChunks; i++) {
-      LOG.info("Reading chunk no: {}", i + 1);
+    int numChunks = 0;
+    while (in.available() > 0) {
       parseChunkPB(BucketCacheProtos.BackingMap.parseDelimitedFrom(in),
         firstChunk.getDeserializersMap());
-      LOG.info("Retrieved chunk: {}", i + 1);
+      numChunks++;
     }

Review Comment:
   ack!



##########
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java:
##########
@@ -62,42 +62,55 @@ static BucketCacheProtos.BucketCacheEntry toPB(BucketCache 
cache,
       .build();
   }
 
-  public static void serializeAsPB(BucketCache cache, FileOutputStream fos, 
long chunkSize,
-    long numChunks) throws IOException {
+  public static void serializeAsPB(BucketCache cache, FileOutputStream fos, 
long chunkSize)
+    throws IOException {
+    // Write the new version of magic number.
+    fos.write(PB_MAGIC_V2);
+
     int blockCount = 0;
-    int chunkCount = 0;
     int backingMapSize = cache.backingMap.size();
     BucketCacheProtos.BackingMap.Builder builder = 
BucketCacheProtos.BackingMap.newBuilder();
-
-    fos.write(PB_MAGIC_V2);
-    fos.write(Bytes.toBytes(chunkSize));
-    fos.write(Bytes.toBytes(numChunks));
-
     BucketCacheProtos.BackingMapEntry.Builder entryBuilder =
       BucketCacheProtos.BackingMapEntry.newBuilder();
-    for (Map.Entry<BlockCacheKey, BucketEntry> entry : 
cache.backingMap.entrySet()) {
-      blockCount++;
-      entryBuilder.clear();
-      entryBuilder.setKey(BucketProtoUtils.toPB(entry.getKey()));
-      entryBuilder.setValue(BucketProtoUtils.toPB(entry.getValue()));
-      builder.addEntry(entryBuilder.build());
+    Iterator<Map.Entry<BlockCacheKey, BucketEntry>> entrySetIter =
+      cache.backingMap.entrySet().iterator();
 
+    // Create the first chunk and persist all details along with it.
+    while (entrySetIter.hasNext()) {
+      blockCount++;
+      Map.Entry<BlockCacheKey, BucketEntry> entry = entrySetIter.next();
+      addToBuilder(entry, entryBuilder, builder);
       if (blockCount % chunkSize == 0 || (blockCount == backingMapSize)) {
-        chunkCount++;
-        if (chunkCount == 1) {
-          // Persist all details along with the first chunk into 
BucketCacheEntry
           BucketProtoUtils.toPB(cache, builder.build()).writeDelimitedTo(fos);
-        } else {
-          // Directly persist subsequent backing-map chunks.
+          break;
+      }
+    }
+    builder.clear();

Review Comment:
   ack!



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@hbase.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to