[hive] branch master updated: HIVE-25603: Restore original method signature in VectorizedOrcAcidRowBatchReader.getDeleteDeltaDirsFromSplit (Denys Kuzmenko, reviewed by Peter Vary)

2021-10-13 Thread dkuzmenko
This is an automated email from the ASF dual-hosted git repository.

dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 3cf119f  HIVE-25603: Restore original method signature in 
VectorizedOrcAcidRowBatchReader.getDeleteDeltaDirsFromSplit (Denys Kuzmenko, 
reviewed by Peter Vary)
3cf119f is described below

commit 3cf119f3f73278864142a7cbabb7da3fbe0b4b50
Author: Denys Kuzmenko 
AuthorDate: Wed Oct 13 18:07:01 2021 +0300

HIVE-25603: Restore original method signature in 
VectorizedOrcAcidRowBatchReader.getDeleteDeltaDirsFromSplit (Denys Kuzmenko, 
reviewed by Peter Vary)

Closes #2708
---
 .../hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
index eed4f2b..2b70378 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.common.io.CacheTag;
-import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.IllegalCacheConfigurationException;
@@ -858,8 +857,13 @@ public class VectorizedOrcAcidRowBatchReader
 return false;
   }
 
+  @Deprecated
+  static Path[] getDeleteDeltaDirsFromSplit(OrcSplit orcSplit) {
+return getDeleteDeltaDirsFromSplit(orcSplit, null);
+  }
+
   static Path[] getDeleteDeltaDirsFromSplit(OrcSplit orcSplit,
-  Map pathToDeltaMetaData) throws 
IOException {
+  Map pathToDeltaMetaData) {
 Path path = orcSplit.getPath();
 Path root;
 if (orcSplit.hasBase()) {
@@ -1378,7 +1382,7 @@ public class VectorizedOrcAcidRowBatchReader
   /**
* see {@link BucketCodec}
*/
-  private int bucketProperty; 
+  private int bucketProperty;
   private long rowId;
   DeleteRecordKey() {
 this.originalWriteId = -1;


[hive] branch master updated: HIVE-25532: Fixing authorization for Kill Query command. (#2649) (Abhay Chennagiri reviewed by Saihemanth Gantasala and Zoltan Haindrich)

2021-10-13 Thread kgyrtkirk
This is an automated email from the ASF dual-hosted git repository.

kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new f53bb7c  HIVE-25532: Fixing authorization for Kill Query command. 
(#2649) (Abhay Chennagiri reviewed by Saihemanth Gantasala and Zoltan Haindrich)
f53bb7c is described below

commit f53bb7cefe64cd652b48bb802eaf0716f84fa592
Author: achennagiri <77031092+achennag...@users.noreply.github.com>
AuthorDate: Wed Oct 13 04:54:09 2021 -0700

HIVE-25532: Fixing authorization for Kill Query command. (#2649) (Abhay 
Chennagiri reviewed by Saihemanth Gantasala and Zoltan Haindrich)
---
 .../plugin/TestHiveAuthorizerCheckInvocation.java  | 40 +-
 .../apache/hive/service/server/KillQueryImpl.java  |  7 +++-
 2 files changed, 45 insertions(+), 2 deletions(-)

diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java
index 13656c5..ee6925d 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil;
 import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.Registry;
 import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
@@ -47,6 +48,10 @@ import 
org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator;
 import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.stats.StatsUtils;
+import org.apache.hive.service.cli.operation.OperationManager;
+import org.apache.hive.service.server.KillQueryImpl;
+import org.apache.hive.service.server.KillQueryZookeeperManager;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -64,6 +69,7 @@ public class TestHiveAuthorizerCheckInvocation {
   private final Logger LOG = 
LoggerFactory.getLogger(this.getClass().getName());;
   protected static HiveConf conf;
   protected static Driver driver;
+  protected static SessionState ss;
   private static final String tableName = 
TestHiveAuthorizerCheckInvocation.class.getSimpleName()
   + "Table";
   private static final String viewName = 
TestHiveAuthorizerCheckInvocation.class.getSimpleName()
@@ -102,10 +108,17 @@ public class TestHiveAuthorizerCheckInvocation {
 conf.setVar(ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName());
 conf.setBoolVar(ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED, true);
 conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
+conf.setBoolVar(ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE, true);
+conf.setBoolVar(ConfVars.HIVE_ZOOKEEPER_KILLQUERY_ENABLE, false);
 
 TestTxnDbUtil.prepDb(conf);
 
-SessionState.start(conf);
+SessionState ss = SessionState.start(conf);
+OperationManager operationManager = Mockito.mock(OperationManager.class);
+KillQueryZookeeperManager killQueryZookeeperManager = 
Mockito.mock(KillQueryZookeeperManager.class);
+KillQueryImpl killQueryImpl = new KillQueryImpl(operationManager, 
killQueryZookeeperManager);
+ss.setKillQuery(killQueryImpl);
+
 driver = new Driver(conf);
 runCmd("create table " + tableName
 + " (i int, j int, k string) partitioned by (city string, `date` 
string) ");
@@ -676,4 +689,29 @@ public class TestHiveAuthorizerCheckInvocation {
 inputsCapturer.getValue(), outputsCapturer.getValue());
   }
 
+  /**
+   * Unit test for HIVE-25532.
+   * Checks if the right privilege objects are being sent when a kill query 
call is made.
+   * @throws Exception
+   */
+  @Test
+  public void testKillQueryAuthorization() throws Exception {
+int queryStatus = driver.compile("select " + viewName + ".i, " + tableName 
+ ".city from "
++ viewName + " join " + tableName + " on " + viewName + ".city = " 
+ tableName
++ ".city where " + tableName + ".k = 'X'", true);
+assertEquals(0, queryStatus);
+
+resetAuthorizer();
+QueryState queryState = driver.getQueryState();
+String queryId = queryState.getQueryId();
+int killQueryStatus = driver.compile("kill query '" + queryId + "'", true);
+assertEquals(0, killQueryStatus);
+driver.run();
+
+List inputs = 

[hive] branch master updated: HIVE-25610: Handle partition field comments for Iceberg tables (Peter Vary reviewed by Marton Bod)(#2715)

2021-10-13 Thread pvary
This is an automated email from the ASF dual-hosted git repository.

pvary pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 65d03fc  HIVE-25610: Handle partition field comments for Iceberg 
tables (Peter Vary reviewed by Marton Bod)(#2715)
65d03fc is described below

commit 65d03fc3a1e40709645ea22a728c8a88468994d1
Author: pvary 
AuthorDate: Wed Oct 13 12:11:15 2021 +0200

HIVE-25610: Handle partition field comments for Iceberg tables (Peter Vary 
reviewed by Marton Bod)(#2715)
---
 .../apache/iceberg/mr/hive/HiveIcebergSerDe.java   | 78 --
 .../hive/TestHiveIcebergStorageHandlerNoScan.java  | 28 +++-
 .../org/apache/hadoop/hive/ql/plan/PlanUtils.java  |  2 +
 .../apache/hadoop/hive/serde/serdeConstants.java   |  4 ++
 .../apache/hadoop/hive/serde2/AbstractSerDe.java   | 45 -
 .../hive/metastore/utils/MetaStoreUtils.java   |  2 +-
 6 files changed, 104 insertions(+), 55 deletions(-)

diff --git 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java
 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java
index b260a2b..6bd4214 100644
--- 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java
+++ 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java
@@ -19,10 +19,8 @@
 
 package org.apache.iceberg.mr.hive;
 
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -31,19 +29,16 @@ import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import javax.annotation.Nullable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.ColumnType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.session.SessionStateUtil;
-import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.AbstractSerDe;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.SerDeStats;
-import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.io.Writable;
 import org.apache.iceberg.PartitionField;
 import org.apache.iceberg.PartitionSpec;
@@ -60,6 +55,7 @@ import org.apache.iceberg.mr.InputFormatConfig;
 import org.apache.iceberg.mr.hive.serde.objectinspector.IcebergObjectInspector;
 import org.apache.iceberg.mr.mapred.Container;
 import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -70,7 +66,6 @@ public class HiveIcebergSerDe extends AbstractSerDe {
   " queryable from Hive, since HMS does not know about it.";
 
   private static final Logger LOG = 
LoggerFactory.getLogger(HiveIcebergSerDe.class);
-  private static final String LIST_COLUMN_COMMENT = "columns.comments";
 
   private ObjectInspector inspector;
   private Schema tableSchema;
@@ -81,6 +76,8 @@ public class HiveIcebergSerDe extends AbstractSerDe {
   @Override
   public void initialize(@Nullable Configuration configuration, Properties 
serDeProperties,
  Properties partitionProperties) throws SerDeException 
{
+super.initialize(configuration, serDeProperties, partitionProperties);
+
 // HiveIcebergSerDe.initialize is called multiple places in Hive code:
 // - When we are trying to create a table - HiveDDL data is stored at the 
serDeProperties, but no Iceberg table
 // is created yet.
@@ -113,7 +110,7 @@ public class HiveIcebergSerDe extends AbstractSerDe {
 // provided in the CREATE TABLE query.
 boolean autoConversion = 
configuration.getBoolean(InputFormatConfig.SCHEMA_AUTO_CONVERSION, false);
 // If we can not load the table try the provided hive schema
-this.tableSchema = hiveSchemaOrThrow(serDeProperties, e, 
autoConversion);
+this.tableSchema = hiveSchemaOrThrow(e, autoConversion);
 // This is only for table creation, it is ok to have an empty 
partition column list
 this.partitionColumns = ImmutableList.of();
 // create table for CTAS
@@ -160,15 +157,10 @@ public class HiveIcebergSerDe extends AbstractSerDe {
 serDeProperties.setProperty(InputFormatConfig.TABLE_SCHEMA, 
SchemaParser.toJson(tableSchema));