wchevreuil commented on code in PR #6250:
URL: https://github.com/apache/hbase/pull/6250#discussion_r1771646171


##########
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java:
##########
@@ -1626,55 +1618,42 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry 
proto) throws IOExceptio
   }
 
   private void persistChunkedBackingMap(FileOutputStream fos) throws 
IOException {
-    long numChunks = backingMap.size() / persistenceChunkSize;
-    if (backingMap.size() % persistenceChunkSize != 0) {
-      numChunks += 1;
-    }
-
     LOG.debug(
       "persistToFile: before persisting backing map size: {}, "
-        + "fullycachedFiles size: {}, chunkSize: {}, numberofChunks: {}",
-      backingMap.size(), fullyCachedFiles.size(), persistenceChunkSize, 
numChunks);
+        + "fullycachedFiles size: {}, chunkSize: {}",
+      backingMap.size(), fullyCachedFiles.size(), persistenceChunkSize);
 
-    BucketProtoUtils.serializeAsPB(this, fos, persistenceChunkSize, numChunks);
+    BucketProtoUtils.serializeAsPB(this, fos, persistenceChunkSize);
 
     LOG.debug(
-      "persistToFile: after persisting backing map size: {}, "
-        + "fullycachedFiles size: {}, numChunksPersisteed: {}",
-      backingMap.size(), fullyCachedFiles.size(), numChunks);
+      "persistToFile: after persisting backing map size: {}, " + 
"fullycachedFiles size: {}",
+      backingMap.size(), fullyCachedFiles.size());
   }
 
-  private void retrieveChunkedBackingMap(FileInputStream in, int[] 
bucketSizes) throws IOException {
-    byte[] bytes = new byte[Long.BYTES];
-    int readSize = in.read(bytes);
-    if (readSize != Long.BYTES) {
-      throw new IOException("Invalid size of chunk-size read from persistence: 
" + readSize);
-    }
-    long batchSize = Bytes.toLong(bytes, 0);
-
-    readSize = in.read(bytes);
-    if (readSize != Long.BYTES) {
-      throw new IOException("Invalid size for number of chunks read from 
persistence: " + readSize);
-    }
-    long numChunks = Bytes.toLong(bytes, 0);
-
-    LOG.info("Number of chunks: {}, chunk size: {}", numChunks, batchSize);
+  private void retrieveChunkedBackingMap(FileInputStream in) throws 
IOException {
 
     // Read the first chunk that has all the details.
-    BucketCacheProtos.BucketCacheEntry firstChunk =
+    BucketCacheProtos.BucketCacheEntry cacheEntry =
       BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in);
-    parseFirstChunk(firstChunk);
-
-    // Subsequent chunks have the backingMap entries.
-    for (int i = 1; i < numChunks; i++) {
-      LOG.info("Reading chunk no: {}", i + 1);
-      parseChunkPB(BucketCacheProtos.BackingMap.parseDelimitedFrom(in),
-        firstChunk.getDeserializersMap());
-      LOG.info("Retrieved chunk: {}", i + 1);
-    }
-    verifyFileIntegrity(firstChunk);
-    verifyCapacityAndClasses(firstChunk.getCacheCapacity(), 
firstChunk.getIoClass(),
-      firstChunk.getMapClass());
+
+    fullyCachedFiles.clear();
+    
fullyCachedFiles.putAll(BucketProtoUtils.fromPB(cacheEntry.getCachedFilesMap()));

Review Comment:
   Indeed, thanks for confirming.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@hbase.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to