This is an automated email from the ASF dual-hosted git repository.

abstractdog pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 127c1b0558c HIVE-29478: FileSinkOperator shouldn't check for isMMTable 
for every row being processed (#6341)
127c1b0558c is described below

commit 127c1b0558c3c5b7b24b7a039d3692650ecf1baa
Author: Naresh P R <[email protected]>
AuthorDate: Fri Mar 6 05:03:05 2026 -0500

    HIVE-29478: FileSinkOperator shouldn't check for isMMTable for every row 
being processed (#6341)
---
 .../org/apache/hadoop/hive/ql/exec/FileSinkOperator.java    |  6 ++++--
 ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java     | 13 +++++++++----
 .../java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java   |  4 +---
 .../apache/hadoop/hive/metastore/utils/MetaStoreUtils.java  |  4 ++++
 4 files changed, 18 insertions(+), 9 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 6a57672207f..2c038d17b83 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -555,6 +555,7 @@ public int createDynamicBucket(int bucketNum) {
   protected BitSet filesCreatedPerBucket = new BitSet();
 
   protected boolean isCompactionTable = false;
+  protected boolean isMmTable = false;
 
   private void initializeSpecPath() {
     // For a query of the type:
@@ -625,6 +626,7 @@ protected void initializeOp(Configuration hconf) throws 
HiveException {
       multiFileSpray = conf.isMultiFileSpray();
       this.isBucketed = hconf.getInt(hive_metastoreConstants.BUCKET_COUNT, 0) 
> 0;
       this.isCompactionTable = conf.isCompactionTable();
+      this.isMmTable = conf.isMmTable();
       totalFiles = conf.getTotalFiles();
       numFiles = conf.getNumFiles();
       dpCtx = conf.getDynPartCtx();
@@ -1189,7 +1191,7 @@ public void process(Object row, int tag) throws 
HiveException {
       // for a given operator branch prediction should work quite nicely on it.
       // RecordUpdater expects to get the actual row, not a serialized version 
of it.  Thus we
       // pass the row rather than recordValue.
-      if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || 
conf.isMmTable() || isCompactionTable) {
+      if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || isMmTable || 
isCompactionTable) {
         writerOffset = bucketId;
         if (!isCompactionTable) {
           writerOffset = findWriterOffset(row);
@@ -1274,7 +1276,7 @@ private void closeRecordwriters(boolean abort) {
   protected boolean areAllTrue(boolean[] statsFromRW) {
     // If we are doing an acid operation they will always all be true as 
RecordUpdaters always
     // collect stats
-    if (conf.getWriteType() != AcidUtils.Operation.NOT_ACID && 
!conf.isMmTable() && !isCompactionTable) {
+    if (conf.getWriteType() != AcidUtils.Operation.NOT_ACID && !isMmTable && 
!isCompactionTable) {
       return true;
     }
     for(boolean b : statsFromRW) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 00909c6d010..8f3101902b4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -59,7 +59,6 @@
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.conf.Configuration;
@@ -483,7 +482,8 @@ private BucketMetaData(int bucketId, int copyNumber) {
    * @return true, if the tblProperties contains {@link 
AcidUtils#COMPACTOR_TABLE_PROPERTY}
    */
   public static boolean isCompactionTable(Properties tblProperties) {
-    return tblProperties != null && 
isCompactionTable(Maps.fromProperties(tblProperties));
+    return tblProperties != null &&
+        StringUtils.isNotBlank((String) 
tblProperties.get(COMPACTOR_TABLE_PROPERTY));
   }
 
   /**
@@ -1948,7 +1948,11 @@ private static boolean isDirUsable(Path child, long 
visibilityTxnId, List<Path>
   }
 
   public static boolean isTablePropertyTransactional(Properties props) {
-    return isTablePropertyTransactional(Maps.fromProperties(props));
+    String resultStr = (String) 
props.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+    if (resultStr == null) {
+      resultStr = (String) 
props.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.toUpperCase());
+    }
+    return Boolean.parseBoolean(resultStr);
   }
 
   public static boolean isTablePropertyTransactional(Map<String, String> 
parameters) {
@@ -2205,7 +2209,8 @@ public static boolean isInsertOnlyTable(Table table) {
   }
 
   public static boolean isInsertOnlyTable(Properties params) {
-    return isInsertOnlyTable(Maps.fromProperties(params));
+    String transactionalProp = (String) 
params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
+    return 
INSERTONLY_TRANSACTIONAL_PROPERTY.equalsIgnoreCase(transactionalProp);
   }
 
    /**
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index 5b5685df164..b9410a9bd9b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@ -24,7 +24,6 @@
 import java.util.Objects;
 import java.util.Set;
 
-import com.google.common.collect.Maps;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
@@ -375,8 +374,7 @@ public boolean isIcebergTable() {
     if (getTable() != null) {
       return DDLUtils.isIcebergTable(table);
     } else { 
-      return MetaStoreUtils.isIcebergTable(
-          Maps.fromProperties(getTableInfo().getProperties()));
+      return MetaStoreUtils.isIcebergTable(getTableInfo().getProperties());
     }
   }
 
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
index 70c272fc8ee..5fdb6a49f80 100644
--- 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
@@ -302,6 +302,10 @@ public static boolean isIcebergTable(Map<String, String> 
params) {
     return 
HiveMetaHook.ICEBERG.equalsIgnoreCase(params.get(HiveMetaHook.TABLE_TYPE));
   }
 
+  public static boolean isIcebergTable(Properties params) {
+    return HiveMetaHook.ICEBERG.equalsIgnoreCase((String) 
params.get(HiveMetaHook.TABLE_TYPE));
+  }
+
   public static boolean isTranslatedToExternalTable(Table table) {
     Map<String, String> params = table.getParameters();
     return params != null && MetaStoreUtils.isPropertyTrue(params, 
HiveMetaHook.EXTERNAL)

Reply via email to