shwetayakkali commented on a change in pull request #1146: HDDS-1366. Add 
ability in Recon to track the number of small files in an Ozone Cluster
URL: https://github.com/apache/hadoop/pull/1146#discussion_r311308069
 
 

 ##########
 File path: 
hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java
 ##########
 @@ -155,70 +164,70 @@ private void fetchUpperBoundCount(String type) {
         LOG.error("Unexpected exception while updating key data : {} {}",
                 updatedKey, e.getMessage());
         return new ImmutablePair<>(getTaskName(), false);
-      } finally {
-        populateFileCountBySizeDB();
       }
+      populateFileCountBySizeDB();
     }
     LOG.info("Completed a 'process' run of FileSizeCountTask.");
     return new ImmutablePair<>(getTaskName(), true);
   }
 
   /**
    * Calculate the bin index based on size of the Key.
+   * index is calculated as the number of right shifts
+   * needed until dataSize becomes zero.
    *
    * @param dataSize Size of the key.
    * @return int bin index in upperBoundCount
    */
-  private int calcBinIndex(long dataSize) {
-    if(dataSize >= maxFileSizeUpperBound) {
-      return Integer.MIN_VALUE;
-    } else if (dataSize > SIZE_512_TB) {
-      //given the small difference in 512TB and 512TB + 1B, index for both 
would
-      //return same, to differentiate specific condition added.
-      return maxBinSize - 1;
-    }
-    int logValue = (int) Math.ceil(Math.log(dataSize)/Math.log(2));
-    if(logValue < 10){
-      return 0;
-    } else{
-      return (dataSize % ONE_KB == 0) ? logValue - 10 + 1: logValue - 10;
+  int calculateBinIndex(long dataSize) {
+    int index = 0;
+    while(dataSize != 0) {
+      dataSize >>= 1;
+      index += 1;
     }
+    return index < 10 ? 0 : index - 10;
   }
 
-  private void countFileSize(OmKeyInfo omKeyInfo) throws IOException{
-    int index = calcBinIndex(omKeyInfo.getDataSize());
-    if(index == Integer.MIN_VALUE) {
-      throw new IOException("File Size larger than permissible file size "
-          + maxFileSizeUpperBound +" bytes");
+  void countFileSize(OmKeyInfo omKeyInfo) {
+    int index;
+    if (omKeyInfo.getDataSize() >= maxFileSizeUpperBound) {
+      index = maxBinSize - 1;
+    } else {
+      index = calculateBinIndex(omKeyInfo.getDataSize());
     }
     upperBoundCount[index]++;
   }
 
-  private void populateFileCountBySizeDB() {
+  /**
+   * Populate DB with the counts of file sizes calculated
+   * using the dao.
+   *
+   */
+  void populateFileCountBySizeDB() {
     for (int i = 0; i < upperBoundCount.length; i++) {
       long fileSizeUpperBound = (long) Math.pow(2, (10 + i));
       FileCountBySize fileCountRecord =
           fileCountBySizeDao.findById(fileSizeUpperBound);
       FileCountBySize newRecord = new
           FileCountBySize(fileSizeUpperBound, upperBoundCount[i]);
-      if(fileCountRecord == null){
+      if (fileCountRecord == null) {
         fileCountBySizeDao.insert(newRecord);
-      } else{
+      } else {
         fileCountBySizeDao.update(newRecord);
       }
     }
   }
 
   private void updateUpperBoundCount(OmKeyInfo value, String operation)
       throws IOException {
-    int binIndex = calcBinIndex(value.getDataSize());
-    if(binIndex == Integer.MIN_VALUE) {
+    int binIndex = calculateBinIndex(value.getDataSize());
+    if (binIndex == Integer.MIN_VALUE) {
 
 Review comment:
   Yes, it was from a previous check where there was an exception for fileSize 
> permitted value of 1 PB. 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to