klcopp commented on a change in pull request #1085:
URL: https://github.com/apache/hive/pull/1085#discussion_r444709390



##########
File path: ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
##########
@@ -361,18 +361,25 @@ private CompactionType 
determineCompactionType(CompactionInfo ci, ValidWriteIdLi
           HiveConf.getFloatVar(conf, 
HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_PCT_THRESHOLD) :
           Float.parseFloat(deltaPctProp);
       boolean bigEnough =   (float)deltaSize/(float)baseSize > 
deltaPctThreshold;
+      boolean multiBase = dir.getObsolete().stream()
+              .filter(path -> 
path.getName().startsWith(AcidUtils.BASE_PREFIX)).findAny().isPresent();
+
       if (LOG.isDebugEnabled()) {
         StringBuilder msg = new StringBuilder("delta size: ");
         msg.append(deltaSize);
         msg.append(" base size: ");
         msg.append(baseSize);
+        msg.append(" multiBase ");
+        msg.append(multiBase);
+        msg.append(" deltaSize ");
+        msg.append(deltaSize);
         msg.append(" threshold: ");
         msg.append(deltaPctThreshold);
         msg.append(" will major compact: ");
-        msg.append(bigEnough);
+        msg.append(bigEnough || (deltaSize == 0  && multiBase));

Review comment:
       Nit: I think this is misleading, and unnecessary since we have already 
logged the values of deltaSize and multiBase.

##########
File path: 
ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
##########
@@ -1031,6 +1031,34 @@ private ShowCompactResponseElement generateElement(long 
id, String db, String ta
     return element;
   }
 
+  @Test
+  public void compactTableWithMultipleBase() throws Exception {
+    Table t = newTable("default", "nctdpnhe", false);
+
+    addBaseFile(t, null, 50L, 50);
+    addBaseFile(t, null, 100L, 50);
+
+    burnThroughTransactions("default", "nctdpnhe", 102);
+
+    long txnid = openTxn();
+    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, 
LockLevel.TABLE, "default");
+    comp.setTablename("nctdpnhe");
+    comp.setOperationType(DataOperationType.UPDATE);
+    List<LockComponent> components = new ArrayList<LockComponent>(1);
+    components.add(comp);
+    LockRequest req = new LockRequest(components, "me", "localhost");
+    req.setTxnid(txnid);
+    LockResponse res = txnHandler.lock(req);
+    long writeid = allocateWriteId("default", "nctdpnhe", txnid);
+    txnHandler.commitTxn(new CommitTxnRequest(txnid));
+
+    startInitiator();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    Assert.assertEquals(1, rsp.getCompactsSize());
+    Assert.assertEquals("initiated",rsp.getCompacts().get(0).getState());
+  }

Review comment:
       opportunity: add:
   startWorker();
   Assert.assertEquals("ready for 
cleaning",rsp.getCompacts().get(0).getState());

##########
File path: 
ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
##########
@@ -1040,4 +1068,4 @@ boolean useHive130DeltaDirName() {
   public void tearDown() throws Exception {
     compactorTestCleanup();
   }
-}
+}

Review comment:
       nit: no newline at end of file




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to