[hive] branch master updated: HIVE-21225: ACID: getAcidState() should cache a recursive dir listing locally (Vaibhav Gumashta reviewed by Vineet Garg)

2019-07-23 Thread vgumashta
This is an automated email from the ASF dual-hosted git repository.

vgumashta pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 959ebeb  HIVE-21225: ACID: getAcidState() should cache a recursive dir 
listing locally (Vaibhav Gumashta reviewed by Vineet Garg)
959ebeb is described below

commit 959ebeb680b07d59f4f55939862ebbc2d7f16a92
Author: Vaibhav Gumashta 
AuthorDate: Tue Jul 23 19:55:30 2019 -0700

HIVE-21225: ACID: getAcidState() should cache a recursive dir listing 
locally (Vaibhav Gumashta reviewed by Vineet Garg)
---
 .../hive/hcatalog/streaming/TestStreaming.java |  12 +-
 .../hcatalog/streaming/mutate/StreamingAssert.java |   2 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java| 486 ++---
 .../org/apache/hadoop/hive/ql/io/HdfsUtils.java|   3 +-
 .../apache/hadoop/hive/ql/io/HiveInputFormat.java  |   6 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java  |   4 +-
 .../hadoop/hive/ql/io/orc/OrcRawRecordMerger.java  |  10 +-
 .../ql/io/orc/VectorizedOrcAcidRowBatchReader.java |   5 +-
 .../hadoop/hive/ql/session/SessionState.java   |   2 +-
 .../hadoop/hive/ql/txn/compactor/Cleaner.java  |   4 +-
 .../hadoop/hive/ql/txn/compactor/CompactorMR.java  |  17 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java|   3 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java|   1 +
 .../apache/hadoop/hive/ql/io/TestAcidUtils.java| 111 ++---
 .../hive/ql/io/orc/TestInputOutputFormat.java  |  50 ++-
 .../hive/ql/io/orc/TestOrcRawRecordMerger.java |   5 +-
 .../org/apache/hive/streaming/TestStreaming.java   |  10 +-
 17 files changed, 550 insertions(+), 181 deletions(-)

diff --git 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index 4dc04f4..bc67d03 100644
--- 
a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ 
b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -564,7 +564,7 @@ public class TestStreaming {
   private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, 
int buckets, int numExpectedFiles,
 String... records) throws Exception {
 ValidWriteIdList writeIds = getTransactionContext(conf);
-AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, 
writeIds);
+AcidUtils.Directory dir = AcidUtils.getAcidState(null, partitionPath, 
conf, writeIds, null, false, null, false);
 Assert.assertEquals(0, dir.getObsolete().size());
 Assert.assertEquals(0, dir.getOriginalFiles().size());
 List current = dir.getCurrentDirectories();
@@ -617,7 +617,8 @@ public class TestStreaming {
*/
   private void checkDataWritten2(Path partitionPath, long minTxn, long maxTxn, 
int numExpectedFiles,
 String validationQuery, boolean vectorize, 
String... records) throws Exception {
-AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, 
getTransactionContext(conf));
+AcidUtils.Directory dir = AcidUtils.getAcidState(null, partitionPath, 
conf, getTransactionContext(conf), null,
+false, null, false);
 Assert.assertEquals(0, dir.getObsolete().size());
 Assert.assertEquals(0, dir.getOriginalFiles().size());
 List current = dir.getCurrentDirectories();
@@ -667,7 +668,8 @@ public class TestStreaming {
 return TxnCommonUtils.createValidReaderWriteIdList(v.get(0));
   }
   private void checkNothingWritten(Path partitionPath) throws Exception {
-AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, 
getTransactionContext(conf));
+AcidUtils.Directory dir = AcidUtils.getAcidState(null, partitionPath, 
conf, getTransactionContext(conf), null,
+false, null, false);
 Assert.assertEquals(0, dir.getObsolete().size());
 Assert.assertEquals(0, dir.getOriginalFiles().size());
 List current = dir.getCurrentDirectories();
@@ -1250,7 +1252,7 @@ public class TestStreaming {
 /*now both batches have committed (but not closed) so we for each primary 
file we expect a side
 file to exist and indicate the true length of primary file*/
 FileSystem fs = partLoc.getFileSystem(conf);
-AcidUtils.Directory dir = AcidUtils.getAcidState(partLoc, conf, 
getTransactionContext(conf));
+AcidUtils.Directory dir = AcidUtils.getAcidState(fs, partLoc, conf, 
getTransactionContext(conf), null, false, null, false);
 for(AcidUtils.ParsedDelta pd : dir.getCurrentDirectories()) {
   for(FileStatus stat : fs.listStatus(pd.getPath(), 
AcidUtils.bucketFileFilter)) {
 Path lengthFile = OrcAcidUtils.getSideFile(stat.getPath());
@@ -1275,7 +1277,7 @@ public class TestStreaming {
 //so each of 2 deltas has 1 bucket0 and 1 bucket0_flush_length.  
Furthermore, each

[hive] branch master updated: HIVE-21061: CTAS query fails with IllegalStateException for empty source (Riju Trivedi reviewed by Vaibhav Gumashta)

2019-04-30 Thread vgumashta
This is an automated email from the ASF dual-hosted git repository.

vgumashta pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 0f8119f  HIVE-21061: CTAS query fails with IllegalStateException for 
empty source (Riju Trivedi reviewed by Vaibhav Gumashta)
0f8119f is described below

commit 0f8119fe797c5b596d22ec4eaaef8aeeb501ccae
Author: Vaibhav Gumashta 
AuthorDate: Mon Apr 29 22:45:13 2019 -0700

HIVE-21061: CTAS query fails with IllegalStateException for empty source 
(Riju Trivedi reviewed by Vaibhav Gumashta)
---
 .../test/resources/testconfiguration.properties|  4 +++-
 .../hive/ql/optimizer/physical/Vectorizer.java |  2 ++
 .../queries/clientpositive/ctas_empty_source.q |  5 
 .../clientpositive/llap/ctas_empty_source.q.out| 27 ++
 4 files changed, 37 insertions(+), 1 deletion(-)

diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 561d1f2..3229122 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -965,7 +965,9 @@ minillaplocal.query.files=\
   stats_date.q,\
   dst.q,\
   q93_with_constraints.q,\
-  approx_distinct.q
+  approx_distinct.q,\
+  ctas_empty_source.q
+  
 
 encrypted.query.files=encryption_join_unencrypted_tbl.q,\
   encryption_insert_partition_static.q,\
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index c623adf..52e8dcb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -98,6 +98,7 @@ import 
org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggreg
 import org.apache.hadoop.hive.ql.io.NullRowsInputFormat;
 import org.apache.hadoop.hive.ql.io.OneNullRowInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
+import org.apache.hadoop.hive.ql.io.ZeroRowsInputFormat;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
 import org.apache.hadoop.hive.ql.lib.Dispatcher;
 import org.apache.hadoop.hive.ql.lib.Node;
@@ -347,6 +348,7 @@ public class Vectorizer implements PhysicalPlanResolver {
 // For metadataonly or empty rows optimizations, null/onerow input format 
can be selected.
 supportedAcidInputFormats.add(NullRowsInputFormat.class.getName());
 supportedAcidInputFormats.add(OneNullRowInputFormat.class.getName());
+supportedAcidInputFormats.add(ZeroRowsInputFormat.class.getName());
   }
 
   private boolean isTestVectorizationSuppressExplainExecutionMode;
diff --git a/ql/src/test/queries/clientpositive/ctas_empty_source.q 
b/ql/src/test/queries/clientpositive/ctas_empty_source.q
new file mode 100644
index 000..e587947
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/ctas_empty_source.q
@@ -0,0 +1,5 @@
+
+drop table if exists testctas1;
+drop table if exists testctas2;
+create table testctas1 (id int);
+create table testctas2 as select * from testctas1 where 1=2;
diff --git a/ql/src/test/results/clientpositive/llap/ctas_empty_source.q.out 
b/ql/src/test/results/clientpositive/llap/ctas_empty_source.q.out
new file mode 100644
index 000..0285f1d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/ctas_empty_source.q.out
@@ -0,0 +1,27 @@
+PREHOOK: query: drop table if exists testctas1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists testctas1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists testctas2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists testctas2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table testctas1 (id int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testctas1
+POSTHOOK: query: create table testctas1 (id int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testctas1
+PREHOOK: query: create table testctas2 as select * from testctas1 where 1=2
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@testctas1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testctas2
+POSTHOOK: query: create table testctas2 as select * from testctas1 where 1=2
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@testctas1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testctas2
+POSTHOOK: Lineage: testctas2.id SIMPLE 
[(testctas1)testctas1.FieldSchema(name:id, type:int, comment:null), ]



[hive] branch master updated: HIVE-20699: Query based compactor for full CRUD Acid tables (Vaibhav Gumashta reviewed by Eugene Koifman)

2019-02-04 Thread vgumashta
This is an automated email from the ASF dual-hosted git repository.

vgumashta pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 313e49f  HIVE-20699: Query based compactor for full CRUD Acid tables 
(Vaibhav Gumashta reviewed by Eugene Koifman)
313e49f is described below

commit 313e49f6b706555a16288fab50c79b7aedf7ba77
Author: Vaibhav Gumashta 
AuthorDate: Mon Feb 4 17:42:02 2019 -0800

HIVE-20699: Query based compactor for full CRUD Acid tables (Vaibhav 
Gumashta reviewed by Eugene Koifman)
---
 .../java/org/apache/hadoop/hive/conf/HiveConf.java |   7 +
 .../org/apache/hadoop/hive/ql/TestAcidOnTez.java   |  54 ++-
 .../ql/txn/compactor/TestCrudCompactorOnTez.java   | 429 +
 .../hadoop/hive/ql/exec/FunctionRegistry.java  |   1 +
 .../hive/ql/exec/tez/HiveSplitGenerator.java   |   2 +-
 .../hadoop/hive/ql/exec/tez/SplitGrouper.java  | 164 +++-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java|   8 +-
 .../hadoop/hive/ql/io/orc/OrcRawRecordMerger.java  |   3 -
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java|   2 +-
 .../org/apache/hadoop/hive/ql/io/orc/OrcSplit.java |  38 +-
 .../hadoop/hive/ql/parse/DDLSemanticAnalyzer.java  |   6 +
 .../hadoop/hive/ql/txn/compactor/CompactorMR.java  | 199 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java|   6 +
 .../generic/GenericUDFValidateAcidSortOrder.java   | 100 +
 .../results/clientpositive/show_functions.q.out|   2 +
 15 files changed, 987 insertions(+), 34 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 414070e..a3b03ca 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2705,6 +2705,13 @@ public class HiveConf extends Configuration {
 
 HIVE_COMPACTOR_COMPACT_MM("hive.compactor.compact.insert.only", true,
 "Whether the compactor should compact insert-only tables. A safety 
switch."),
+COMPACTOR_CRUD_QUERY_BASED("hive.compactor.crud.query.based", false,
+"Means Major compaction on full CRUD tables is done as a query, "
++ "and minor compaction will be disabled."),
+SPLIT_GROUPING_MODE("hive.split.grouping.mode", "query", new 
StringSet("query", "compactor"), 
+"This is set to compactor from within the query based compactor. This 
enables the Tez SplitGrouper "
++ "to group splits based on their bucket number, so that all rows from 
different bucket files "
++ " for the same bucket number can end up in the same bucket file 
after the compaction."),
 /**
  * @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_SUCCEEDED
  */
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
index d6a4191..142c2d2 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
@@ -121,10 +121,15 @@ public class TestAcidOnTez {
 SessionState.start(new SessionState(hiveConf));
 d = DriverFactory.newDriver(hiveConf);
 dropTables();
-runStatementOnDriver("create table " + Table.ACIDTBL + "(a int, b int) 
clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc " + 
getTblProperties());
-runStatementOnDriver("create table " + Table.ACIDTBLPART + "(a int, b int) 
partitioned by (p string) clustered by (a) into " + BUCKET_COUNT + " buckets 
stored as orc " + getTblProperties());
-runStatementOnDriver("create table " + Table.NONACIDORCTBL + "(a int, b 
int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc ");
-runStatementOnDriver("create table " + Table.NONACIDPART + "(a int, b int) 
partitioned by (p string) stored as orc ");
+runStatementOnDriver("create table " + Table.ACIDTBL + "(a int, b int) 
clustered by (a) into " + BUCKET_COUNT
++ " buckets stored as orc " + getTblProperties());
+runStatementOnDriver("create table " + Table.ACIDTBLPART
++ "(a int, b int) partitioned by (p string) clustered by (a) into " + 
BUCKET_COUNT + " buckets stored as orc "
++ getTblProperties());
+runStatementOnDriver("create table " + Table.NONACIDORCTBL + "(a int, b 
int) clustered by (a) into " + BUCKET_COUNT
++ " buckets stored as orc ");
+runStatementOnDriver("create table " + Table.NONACIDPART
+

hive git commit: HIVE-21065: Upgrade Hive to use ORC 1.5.4 (Igor Kryvenko reviewed by Eugene Koifman)

2019-01-02 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 926c1e8e3 -> dc215b187


HIVE-21065: Upgrade Hive to use ORC 1.5.4 (Igor Kryvenko reviewed by Eugene 
Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dc215b18
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dc215b18
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dc215b18

Branch: refs/heads/master
Commit: dc215b1870dc77e5e9088e06abea42eb47a16c1c
Parents: 926c1e8
Author: Vaibhav Gumashta 
Authored: Wed Jan 2 11:28:15 2019 -0800
Committer: Vaibhav Gumashta 
Committed: Wed Jan 2 11:28:15 2019 -0800

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/dc215b18/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 240472a..9871bae 100644
--- a/pom.xml
+++ b/pom.xml
@@ -186,7 +186,7 @@
 0.9.3
 2.10.0
 2.3
-1.5.3
+1.5.4
 1.10.19
 1.7.4
 2.0.0-M5



hive git commit: HIVE-20430: CachedStore: bug fixes for TestEmbeddedHiveMetaStore, TestRemoteHiveMetaStore, TestMiniLlapCliDriver, TestMiniTezCliDriver, TestMinimrCliDriver (Vaibhav Gumashta reviewed

2018-09-25 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 6137ee5dd -> a81f53ac1


HIVE-20430: CachedStore: bug fixes for TestEmbeddedHiveMetaStore, 
TestRemoteHiveMetaStore, TestMiniLlapCliDriver, TestMiniTezCliDriver, 
TestMinimrCliDriver (Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a81f53ac
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a81f53ac
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a81f53ac

Branch: refs/heads/master
Commit: a81f53ac17f4a5f0fd68fbdabe7a038b3612fd80
Parents: 6137ee5
Author: Vaibhav Gumashta 
Authored: Tue Sep 25 12:04:39 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Tue Sep 25 12:11:01 2018 -0700

--
 .../hadoop/hive/ql/exec/TestOperators.java  |   1 +
 .../hive/metastore/cache/CachedStore.java   | 130 +++
 .../hive/metastore/cache/SharedCache.java   |   8 +-
 3 files changed, 81 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a81f53ac/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
index abf7198..c7cd4ad 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
@@ -394,6 +394,7 @@ public class TestOperators extends TestCase {
   // ensure that both of the partitions are in the complete list.
   String[] dirs = job.get("hive.complete.dir.list").split("\t");
   assertEquals(2, dirs.length);
+  Arrays.sort(dirs);
   assertEquals(true, dirs[0].endsWith("/state=CA"));
   assertEquals(true, dirs[1].endsWith("/state=OR"));
   return super.getSplits(job, splits);

http://git-wip-us.apache.org/repos/asf/hive/blob/a81f53ac/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
--
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 0445cbf..b9a5458 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -853,9 +853,7 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  public Table getTable(String catName, String dbName, String tblName,
-String validWriteIds)
-  throws MetaException {
+  public Table getTable(String catName, String dbName, String tblName, String 
validWriteIds) throws MetaException {
 catName = normalizeIdentifier(catName);
 dbName = StringUtils.normalizeIdentifier(dbName);
 tblName = StringUtils.normalizeIdentifier(tblName);
@@ -872,12 +870,28 @@ public class CachedStore implements RawStore, 
Configurable {
   return rawStore.getTable(catName, dbName, tblName, validWriteIds);
 }
 if (validWriteIds != null) {
-  tbl.setParameters(adjustStatsParamsForGet(tbl.getParameters(),
-  tbl.getParameters(), tbl.getWriteId(), validWriteIds));
+  tbl.setParameters(
+  adjustStatsParamsForGet(tbl.getParameters(), tbl.getParameters(), 
tbl.getWriteId(), validWriteIds));
 }
 
 tbl.unsetPrivileges();
 tbl.setRewriteEnabled(tbl.isRewriteEnabled());
+if (tbl.getPartitionKeys() == null) {
+  // getTable call from ObjectStore returns an empty list
+  tbl.setPartitionKeys(new ArrayList<>());
+}
+String tableType = tbl.getTableType();
+if (tableType == null) {
+  // for backwards compatibility with old metastore persistence
+  if (tbl.getViewOriginalText() != null) {
+tableType = TableType.VIRTUAL_VIEW.toString();
+  } else if ("TRUE".equals(tbl.getParameters().get("EXTERNAL"))) {
+tableType = TableType.EXTERNAL_TABLE.toString();
+  } else {
+tableType = TableType.MANAGED_TABLE.toString();
+  }
+}
+tbl.setTableType(tableType);
 return tbl;
   }
 
@@ -1133,6 +1147,10 @@ public class CachedStore implements RawStore, 
Configurable {
 if (!isCachePrewarmed.get() || missSomeInCache) {
   return rawStore.getTableObjectsByName(catName, dbName, tblNames);
 }
+Database db = sharedCache.getDatabaseFromCache(catName, dbName);
+if (db == null) {
+  throw new UnknownDBException("Could not find database " + dbName);
+}
 List tables 

hive git commit: HIVE-20555: HiveServer2: Preauthenticated subject for http transport is not retained for entire duration of http communication in some cases (Vaibhav Gumashta reviewed by Daniel Dai)

2018-09-21 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-3 ca5e241c2 -> d8c97cf28


HIVE-20555: HiveServer2: Preauthenticated subject for http transport is not 
retained for entire duration of http communication in some cases (Vaibhav 
Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d8c97cf2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d8c97cf2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d8c97cf2

Branch: refs/heads/branch-3
Commit: d8c97cf2804e4f48c0ae8ae3df64d1c9e10cb28d
Parents: ca5e241
Author: Vaibhav Gumashta 
Authored: Fri Sep 21 16:27:15 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Sep 21 16:33:06 2018 -0700

--
 .../org/apache/hive/jdbc/HiveConnection.java| 19 +---
 .../jdbc/HttpKerberosRequestInterceptor.java| 23 +--
 .../apache/hive/service/auth/HttpAuthUtils.java | 24 +---
 3 files changed, 33 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d8c97cf2/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
--
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index 14939cb..a4920bf 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -70,6 +70,7 @@ import org.slf4j.LoggerFactory;
 import javax.net.ssl.KeyManagerFactory;
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.TrustManagerFactory;
+import javax.security.auth.Subject;
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
 import java.io.BufferedReader;
@@ -81,6 +82,8 @@ import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
+import java.security.AccessControlContext;
+import java.security.AccessController;
 import java.security.KeyStore;
 import java.security.SecureRandom;
 import java.sql.Array;
@@ -140,6 +143,7 @@ public class HiveConnection implements java.sql.Connection {
   private String initFile = null;
   private String wmPool = null, wmApp = null;
   private Properties clientInfo;
+  private Subject loggedInSubject;
 
   /**
* Get all direct HiveServer2 URLs from a ZooKeeper based HiveServer2 URL
@@ -405,15 +409,24 @@ public class HiveConnection implements 
java.sql.Connection {
 }
 // Configure http client for kerberos/password based authentication
 if (isKerberosAuthMode()) {
+  if (assumeSubject) {
+// With this option, we're assuming that the external application,
+// using the JDBC driver has done a JAAS kerberos login already
+AccessControlContext context = AccessController.getContext();
+loggedInSubject = Subject.getSubject(context);
+if (loggedInSubject == null) {
+  throw new SQLException("The Subject is not set");
+}
+  }
   /**
* Add an interceptor which sets the appropriate header in the request.
* It does the kerberos authentication and get the final service ticket,
* for sending to the server before every request.
* In https mode, the entire information is encrypted
*/
-  requestInterceptor = new HttpKerberosRequestInterceptor(
-  sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL), host, 
getServerHttpUrl(useSsl),
-  assumeSubject, cookieStore, cookieName, useSsl, 
additionalHttpHeaders, customCookies);
+  requestInterceptor = new 
HttpKerberosRequestInterceptor(sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL),
+  host, getServerHttpUrl(useSsl), loggedInSubject, cookieStore, 
cookieName, useSsl, additionalHttpHeaders,
+  customCookies);
 } else {
   // Check for delegation token, if present add it in the header
   String tokenStr = getClientDelegationToken(sessConfMap);

http://git-wip-us.apache.org/repos/asf/hive/blob/d8c97cf2/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
--
diff --git 
a/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java 
b/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
index 28d42d7..516825f 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
@@ -21,6 +21,8 @@ package org.apache.hive.jdbc;
 import java.util.Map;
 import java.util.concurrent.locks.ReentrantLock;
 
+import javax.security.auth.Subject;
+
 import org.apache.hive.service.auth.HttpAuthUtils;
 import 

hive git commit: HIVE-20555: HiveServer2: Preauthenticated subject for http transport is not retained for entire duration of http communication in some cases (Vaibhav Gumashta reviewed by Daniel Dai)

2018-09-21 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-3.1 bcc7df958 -> 3560db30c


HIVE-20555: HiveServer2: Preauthenticated subject for http transport is not 
retained for entire duration of http communication in some cases (Vaibhav 
Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3560db30
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3560db30
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3560db30

Branch: refs/heads/branch-3.1
Commit: 3560db30c461215ecedf19fd7f6e38fcbc85cec3
Parents: bcc7df9
Author: Vaibhav Gumashta 
Authored: Fri Sep 21 16:27:15 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Sep 21 16:33:52 2018 -0700

--
 .../org/apache/hive/jdbc/HiveConnection.java| 19 +---
 .../jdbc/HttpKerberosRequestInterceptor.java| 23 +--
 .../apache/hive/service/auth/HttpAuthUtils.java | 24 +---
 3 files changed, 33 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3560db30/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
--
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index 458158e..a654b05 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -70,6 +70,7 @@ import org.slf4j.LoggerFactory;
 import javax.net.ssl.KeyManagerFactory;
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.TrustManagerFactory;
+import javax.security.auth.Subject;
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
 import java.io.BufferedReader;
@@ -81,6 +82,8 @@ import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
+import java.security.AccessControlContext;
+import java.security.AccessController;
 import java.security.KeyStore;
 import java.security.SecureRandom;
 import java.sql.Array;
@@ -140,6 +143,7 @@ public class HiveConnection implements java.sql.Connection {
   private String initFile = null;
   private String wmPool = null, wmApp = null;
   private Properties clientInfo;
+  private Subject loggedInSubject;
 
   /**
* Get all direct HiveServer2 URLs from a ZooKeeper based HiveServer2 URL
@@ -397,15 +401,24 @@ public class HiveConnection implements 
java.sql.Connection {
 }
 // Configure http client for kerberos/password based authentication
 if (isKerberosAuthMode()) {
+  if (assumeSubject) {
+// With this option, we're assuming that the external application,
+// using the JDBC driver has done a JAAS kerberos login already
+AccessControlContext context = AccessController.getContext();
+loggedInSubject = Subject.getSubject(context);
+if (loggedInSubject == null) {
+  throw new SQLException("The Subject is not set");
+}
+  }
   /**
* Add an interceptor which sets the appropriate header in the request.
* It does the kerberos authentication and get the final service ticket,
* for sending to the server before every request.
* In https mode, the entire information is encrypted
*/
-  requestInterceptor = new HttpKerberosRequestInterceptor(
-  sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL), host, 
getServerHttpUrl(useSsl),
-  assumeSubject, cookieStore, cookieName, useSsl, 
additionalHttpHeaders, customCookies);
+  requestInterceptor = new 
HttpKerberosRequestInterceptor(sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL),
+  host, getServerHttpUrl(useSsl), loggedInSubject, cookieStore, 
cookieName, useSsl, additionalHttpHeaders,
+  customCookies);
 } else {
   // Check for delegation token, if present add it in the header
   String tokenStr = getClientDelegationToken(sessConfMap);

http://git-wip-us.apache.org/repos/asf/hive/blob/3560db30/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
--
diff --git 
a/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java 
b/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
index 28d42d7..516825f 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
@@ -21,6 +21,8 @@ package org.apache.hive.jdbc;
 import java.util.Map;
 import java.util.concurrent.locks.ReentrantLock;
 
+import javax.security.auth.Subject;
+
 import org.apache.hive.service.auth.HttpAuthUtils;
 import 

hive git commit: HIVE-20555: HiveServer2: Preauthenticated subject for http transport is not retained for entire duration of http communication in some cases (Vaibhav Gumashta reviewed by Daniel Dai)

2018-09-21 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master cfdb433bc -> cdba00c96


HIVE-20555: HiveServer2: Preauthenticated subject for http transport is not 
retained for entire duration of http communication in some cases (Vaibhav 
Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cdba00c9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cdba00c9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cdba00c9

Branch: refs/heads/master
Commit: cdba00c96fd86c4f9c28dbaa411727f1666d26cb
Parents: cfdb433
Author: Vaibhav Gumashta 
Authored: Fri Sep 21 16:27:15 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Sep 21 16:32:35 2018 -0700

--
 .../org/apache/hive/jdbc/HiveConnection.java| 19 +---
 .../jdbc/HttpKerberosRequestInterceptor.java| 23 +--
 .../apache/hive/service/auth/HttpAuthUtils.java | 24 +---
 3 files changed, 33 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/cdba00c9/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
--
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index 335995c..8d5aa70 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -70,6 +70,7 @@ import org.slf4j.LoggerFactory;
 import javax.net.ssl.KeyManagerFactory;
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.TrustManagerFactory;
+import javax.security.auth.Subject;
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
 import java.io.BufferedReader;
@@ -81,6 +82,8 @@ import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
+import java.security.AccessControlContext;
+import java.security.AccessController;
 import java.security.KeyStore;
 import java.security.SecureRandom;
 import java.sql.Array;
@@ -140,6 +143,7 @@ public class HiveConnection implements java.sql.Connection {
   private String initFile = null;
   private String wmPool = null, wmApp = null;
   private Properties clientInfo;
+  private Subject loggedInSubject;
 
   /**
* Get all direct HiveServer2 URLs from a ZooKeeper based HiveServer2 URL
@@ -478,15 +482,24 @@ public class HiveConnection implements 
java.sql.Connection {
 }
 // Configure http client for kerberos/password based authentication
 if (isKerberosAuthMode()) {
+  if (assumeSubject) {
+// With this option, we're assuming that the external application,
+// using the JDBC driver has done a JAAS kerberos login already
+AccessControlContext context = AccessController.getContext();
+loggedInSubject = Subject.getSubject(context);
+if (loggedInSubject == null) {
+  throw new SQLException("The Subject is not set");
+}
+  }
   /**
* Add an interceptor which sets the appropriate header in the request.
* It does the kerberos authentication and get the final service ticket,
* for sending to the server before every request.
* In https mode, the entire information is encrypted
*/
-  requestInterceptor = new HttpKerberosRequestInterceptor(
-  sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL), host, 
getServerHttpUrl(useSsl),
-  assumeSubject, cookieStore, cookieName, useSsl, 
additionalHttpHeaders, customCookies);
+  requestInterceptor = new 
HttpKerberosRequestInterceptor(sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL),
+  host, getServerHttpUrl(useSsl), loggedInSubject, cookieStore, 
cookieName, useSsl, additionalHttpHeaders,
+  customCookies);
 } else {
   // Check for delegation token, if present add it in the header
   String tokenStr = getClientDelegationToken(sessConfMap);

http://git-wip-us.apache.org/repos/asf/hive/blob/cdba00c9/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
--
diff --git 
a/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java 
b/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
index 28d42d7..516825f 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HttpKerberosRequestInterceptor.java
@@ -21,6 +21,8 @@ package org.apache.hive.jdbc;
 import java.util.Map;
 import java.util.concurrent.locks.ReentrantLock;
 
+import javax.security.auth.Subject;
+
 import org.apache.hive.service.auth.HttpAuthUtils;
 import 

hive git commit: HIVE-20507: Beeline: Add a utility command to retrieve all uris from beeline-site.xml (Vaibhav Gumashta, reviewed by Daniel Dai)

2018-09-20 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 208e202f5 -> 3635c4dba


HIVE-20507: Beeline: Add a utility command to retrieve all uris from 
beeline-site.xml (Vaibhav Gumashta, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3635c4db
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3635c4db
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3635c4db

Branch: refs/heads/master
Commit: 3635c4dba1613f0aef697b652dbe9e45bb64cb33
Parents: 208e202f
Author: Vaibhav Gumashta 
Authored: Thu Sep 20 15:23:12 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Thu Sep 20 15:23:12 2018 -0700

--
 .../java/org/apache/hive/beeline/BeeLine.java   | 57 ++-
 .../org/apache/hive/beeline/BeeLineOpts.java| 10 +++
 beeline/src/main/resources/BeeLine.properties   |  1 +
 .../org/apache/hive/jdbc/HiveConnection.java| 73 
 jdbc/src/java/org/apache/hive/jdbc/Utils.java   |  2 +-
 5 files changed, 141 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3635c4db/beeline/src/java/org/apache/hive/beeline/BeeLine.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 29ec2de..e54e818 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -65,6 +65,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.ListIterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.ResourceBundle;
 import java.util.ServiceLoader;
@@ -94,6 +95,7 @@ import 
org.apache.hive.beeline.hs2connection.HS2ConnectionFileUtils;
 import org.apache.hive.beeline.hs2connection.HiveSiteHS2ConnectionFileParser;
 import org.apache.hive.beeline.hs2connection.UserHS2ConnectionFileParser;
 import org.apache.hive.common.util.ShutdownHookManager;
+import org.apache.hive.jdbc.HiveConnection;
 import org.apache.hive.jdbc.JdbcUriParseException;
 import org.apache.hive.jdbc.Utils;
 import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
@@ -389,6 +391,12 @@ public class BeeLine implements Closeable {
 .withLongOpt("help")
 .withDescription("Display this message")
 .create('h'));
+
+// -getUrlsFromBeelineSite
+options.addOption(OptionBuilder
+.withLongOpt("getUrlsFromBeelineSite")
+.withDescription("Print all urls from beeline-site.xml, if it is 
present in the classpath")
+.create());
 
 // Substitution option --hivevar
 options.addOption(OptionBuilder
@@ -712,7 +720,7 @@ public class BeeLine implements Closeable {
 
 private boolean isBeeLineOpt(String arg) {
   return arg.startsWith("--") && !(HIVE_VAR_PREFIX.equals(arg) || 
(HIVE_CONF_PREFIX.equals(arg))
-  || "--help".equals(arg) || PROP_FILE_PREFIX.equals(arg));
+  || "--help".equals(arg) || PROP_FILE_PREFIX.equals(arg) || 
"--getUrlsFromBeelineSite".equals(arg));
 }
   }
 
@@ -846,6 +854,12 @@ public class BeeLine implements Closeable {
   getOpts().setHelpAsked(true);
   return true;
 }
+
+if (cl.hasOption("getUrlsFromBeelineSite")) {
+  printBeelineSiteUrls();
+  getOpts().setBeelineSiteUrlsAsked(true);
+  return true;
+}
 
 Properties hiveVars = cl.getOptionProperties("hivevar");
 for (String key : hiveVars.stringPropertyNames()) {
@@ -922,6 +936,44 @@ public class BeeLine implements Closeable {
 return false;
   }
 
+  private void printBeelineSiteUrls() {
+BeelineSiteParser beelineSiteParser = getUserBeelineSiteParser();
+if (!beelineSiteParser.configExists()) {
+  output("No beeline-site.xml in the path", true);
+}
+if (beelineSiteParser.configExists()) {
+  // Get the named url from user specific config file if present
+  try {
+Properties userNamedConnectionURLs = 
beelineSiteParser.getConnectionProperties();
+
userNamedConnectionURLs.remove(BeelineSiteParser.DEFAULT_NAMED_JDBC_URL_PROPERTY_KEY);
+StringBuilder sb = new StringBuilder("urls: ");
+for (Entry entry : userNamedConnectionURLs.entrySet()) 
{
+  String urlFromBeelineSite = (String) entry.getValue();
+  if (isZkBasedUrl(urlFromBeelineSite)) {
+List jdbcUrls = 
HiveConnection.getAllUrlStrings(urlFromBeelineSite);
+for (String jdbcUrl : jdbcUrls) {
+  sb.append(jdbcUrl + ", ");
+}
+  } else {
+sb.append(urlFromBeelineSite + ", ");
+  }
+}
+output(sb.toString(), true);
+  } catch (Exception e) {
+output(e.getMessage(), true);
+

hive git commit: HIVE-20577: Disable org.apache.hive.jdbc.miniHS2.TestHs2ConnectionMetricsHttp.testOpenConnectionMetrics (Vaibhav Gumashta reviewed by Daniel Dai)

2018-09-17 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master a5dc0c099 -> 393b382af


HIVE-20577: Disable 
org.apache.hive.jdbc.miniHS2.TestHs2ConnectionMetricsHttp.testOpenConnectionMetrics
 (Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/393b382a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/393b382a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/393b382a

Branch: refs/heads/master
Commit: 393b382af00013c72c074c79d71b261c3c9e3bd3
Parents: a5dc0c0
Author: Vaibhav Gumashta 
Authored: Mon Sep 17 10:48:24 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Mon Sep 17 10:48:24 2018 -0700

--
 .../org/apache/hive/jdbc/miniHS2/TestHs2ConnectionMetricsHttp.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/393b382a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2ConnectionMetricsHttp.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2ConnectionMetricsHttp.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2ConnectionMetricsHttp.java
index 1e29363..65889aa 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2ConnectionMetricsHttp.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2ConnectionMetricsHttp.java
@@ -38,6 +38,7 @@ import org.apache.thrift.transport.TTransport;
 
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 /**
@@ -58,6 +59,7 @@ public class TestHs2ConnectionMetricsHttp extends 
Hs2ConnectionMetrics {
 Hs2ConnectionMetrics.tearDown();
   }
 
+  @Ignore("Flaky test. Should be re-enabled in HIVE-20578")
   @Test
   public void testOpenConnectionMetrics() throws Exception {
 CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance();



hive git commit: HIVE-20337: CachedStore: getPartitionsByExpr is not populating the partition list correctly (Vaibhav Gumashta, reviewed by Daniel Dai)

2018-08-08 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-3 4d18b792c -> 18b5c8b8d


HIVE-20337: CachedStore: getPartitionsByExpr is not populating the partition 
list correctly (Vaibhav Gumashta, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/18b5c8b8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/18b5c8b8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/18b5c8b8

Branch: refs/heads/branch-3
Commit: 18b5c8b8d5edb2ba45775430484a08cfe52c5da3
Parents: 4d18b79
Author: Vaibhav Gumashta 
Authored: Wed Aug 8 18:55:32 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Wed Aug 8 19:01:30 2018 -0700

--
 .../hadoop/hive/metastore/cache/CachedStore.java | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/18b5c8b8/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 54c833d..2b03d87 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -1247,18 +1247,21 @@ public class CachedStore implements RawStore, 
Configurable {
 dbName = StringUtils.normalizeIdentifier(dbName);
 tblName = StringUtils.normalizeIdentifier(tblName);
 if (!shouldCacheTable(catName, dbName, tblName)) {
-  return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, 
defaultPartitionName, maxParts,
-  result);
+  return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, 
defaultPartitionName, maxParts, result);
 }
 List partNames = new LinkedList<>();
 Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
 if (table == null) {
   // The table is not yet loaded in cache
-  return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, 
defaultPartitionName, maxParts,
-  result);
+  return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, 
defaultPartitionName, maxParts, result);
+}
+boolean hasUnknownPartitions =
+getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartitionName, 
maxParts, partNames, sharedCache);
+for (String partName : partNames) {
+  Partition part = sharedCache.getPartitionFromCache(catName, dbName, 
tblName, partNameToVals(partName));
+  part.unsetPrivileges();
+  result.add(part);
 }
-boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(table, 
expr,
-defaultPartitionName, maxParts, partNames, sharedCache);
 return hasUnknownPartitions;
   }
 



hive git commit: HIVE-20337: CachedStore: getPartitionsByExpr is not populating the partition list correctly (Vaibhav Gumashta, reviewed by Daniel Dai)

2018-08-08 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 3f3d9189f -> 3a649b6d0


HIVE-20337: CachedStore: getPartitionsByExpr is not populating the partition 
list correctly (Vaibhav Gumashta, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3a649b6d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3a649b6d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3a649b6d

Branch: refs/heads/master
Commit: 3a649b6d048a002e1a79588bd329b21eb7df419a
Parents: 3f3d918
Author: Vaibhav Gumashta 
Authored: Wed Aug 8 18:55:32 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Wed Aug 8 18:55:32 2018 -0700

--
 .../hadoop/hive/metastore/cache/CachedStore.java | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3a649b6d/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
--
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index f73047f..1d53244 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -1258,18 +1258,21 @@ public class CachedStore implements RawStore, 
Configurable {
 dbName = StringUtils.normalizeIdentifier(dbName);
 tblName = StringUtils.normalizeIdentifier(tblName);
 if (!shouldCacheTable(catName, dbName, tblName)) {
-  return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, 
defaultPartitionName, maxParts,
-  result);
+  return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, 
defaultPartitionName, maxParts, result);
 }
 List partNames = new LinkedList<>();
 Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
 if (table == null) {
   // The table is not yet loaded in cache
-  return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, 
defaultPartitionName, maxParts,
-  result);
+  return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, 
defaultPartitionName, maxParts, result);
+}
+boolean hasUnknownPartitions =
+getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartitionName, 
maxParts, partNames, sharedCache);
+for (String partName : partNames) {
+  Partition part = sharedCache.getPartitionFromCache(catName, dbName, 
tblName, partNameToVals(partName));
+  part.unsetPrivileges();
+  result.add(part);
 }
-boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(table, 
expr,
-defaultPartitionName, maxParts, partNames, sharedCache);
 return hasUnknownPartitions;
   }
 



hive git commit: HIVE-19389: Schematool: For Hive's Information Schema, use embedded HS2 as default (Vaibhav Gumashta reviewed by Daniel Dai)

2018-06-07 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-3 566a48db4 -> 7899face4


HIVE-19389: Schematool: For Hive's Information Schema, use embedded HS2 as 
default (Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7899face
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7899face
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7899face

Branch: refs/heads/branch-3
Commit: 7899face415f66cff0c44eab1ac908aa255ff501
Parents: 566a48d
Author: Vaibhav Gumashta 
Authored: Wed May 9 12:23:10 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Thu Jun 7 11:09:52 2018 -0700

--
 beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java  | 7 +++
 .../apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java  | 3 +++
 2 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/7899face/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java 
b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index 314dff8..262eaa2 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -108,6 +108,13 @@ public class HiveSchemaTool {
 this.needsQuotedIdentifier = parser.needsQuotedIdentifier();
 this.quoteCharacter = parser.getQuoteCharacter();
 this.metaStoreSchemaInfo = MetaStoreSchemaInfoFactory.get(hiveConf, 
hiveHome, dbType);
+// If the dbType is "hive", this is setting up the information schema in 
Hive. 
+// We will set the default jdbc url and driver.
+// It is overriden by command line options if passed (-url and -driver
+if (dbType.equalsIgnoreCase(HiveSchemaHelper.DB_HIVE)) {
+  url = HiveSchemaHelper.EMBEDDED_HS2_URL;
+  driver = HiveSchemaHelper.HIVE_JDBC_DRIVER;
+}
   }
 
   public HiveConf getHiveConf() {

http://git-wip-us.apache.org/repos/asf/hive/blob/7899face/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
index 785978b..70746e8 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
@@ -44,6 +44,9 @@ public class HiveSchemaHelper {
   public static final String DB_MYSQL = "mysql";
   public static final String DB_POSTGRACE = "postgres";
   public static final String DB_ORACLE = "oracle";
+  public static final String EMBEDDED_HS2_URL = "jdbc:hive2://";
+  public static final String HIVE_JDBC_DRIVER = 
"org.apache.hive.jdbc.HiveDriver";
+  
 
   /***
* Get JDBC connection to metastore db



hive git commit: HIVE-19728: beeline with USE_BEELINE_FOR_HIVE_CLI fails when trying to set hive.aux.jars.path (Daniel Voros reviewed by Vaibhav Gumashta)

2018-06-01 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 17d661e5d -> 48d1a6a5e


HIVE-19728: beeline with USE_BEELINE_FOR_HIVE_CLI fails when trying to set 
hive.aux.jars.path (Daniel Voros reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/48d1a6a5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/48d1a6a5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/48d1a6a5

Branch: refs/heads/master
Commit: 48d1a6a5e47085cdc3a3d71649ddd28af7566c88
Parents: 17d661e
Author: Vaibhav Gumashta 
Authored: Fri Jun 1 10:49:48 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Jun 1 10:50:36 2018 -0700

--
 bin/ext/cli.sh | 22 +++---
 bin/hive   |  4 
 2 files changed, 7 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/48d1a6a5/bin/ext/cli.sh
--
diff --git a/bin/ext/cli.sh b/bin/ext/cli.sh
index 14337c1..c837508 100644
--- a/bin/ext/cli.sh
+++ b/bin/ext/cli.sh
@@ -24,25 +24,9 @@ fi
 
 updateCli() {
   if [ "$USE_DEPRECATED_CLI" == "true" ]; then
-if [ "$USE_BEELINE_FOR_HIVE_CLI" == "true" ]; then
- CLASS=org.apache.hive.beeline.BeeLine;
-  # include only the beeline client jar and its dependencies
-  beelineJarPath=`ls ${HIVE_LIB}/hive-beeline-*.jar`
-  superCsvJarPath=`ls ${HIVE_LIB}/super-csv-*.jar`
-  jlineJarPath=`ls ${HIVE_LIB}/jline-*.jar`
-  hadoopClasspath=""
-  if [[ -n "${HADOOP_CLASSPATH}" ]]
-  then
-hadoopClasspath="${HADOOP_CLASSPATH}:"
-  fi
-  export 
HADOOP_CLASSPATH="${hadoopClasspath}${HIVE_CONF_DIR}:${beelineJarPath}:${superCsvJarPath}:${jlineJarPath}"
-  export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS 
-Dlog4j.configurationFile=beeline-log4j2.properties "
-  exec $HADOOP jar ${beelineJarPath} $CLASS $HIVE_OPTS "$@"
-else
-  export HADOOP_CLIENT_OPTS=" -Dproc_hivecli $HADOOP_CLIENT_OPTS "
-  CLASS=org.apache.hadoop.hive.cli.CliDriver
-  JAR=hive-cli-*.jar
-fi
+export HADOOP_CLIENT_OPTS=" -Dproc_hivecli $HADOOP_CLIENT_OPTS "
+CLASS=org.apache.hadoop.hive.cli.CliDriver
+JAR=hive-cli-*.jar
   else
 export HADOOP_CLIENT_OPTS=" -Dproc_beeline $HADOOP_CLIENT_OPTS 
-Dlog4j.configurationFile=beeline-log4j2.properties"
 CLASS=org.apache.hive.beeline.cli.HiveCli

http://git-wip-us.apache.org/repos/asf/hive/blob/48d1a6a5/bin/hive
--
diff --git a/bin/hive b/bin/hive
index 87be599..c54c6fb 100755
--- a/bin/hive
+++ b/bin/hive
@@ -85,6 +85,10 @@ if [ "$SERVICE" = "" ] ; then
   fi
 fi
 
+if [[ "$SERVICE" == "cli" && "$USE_BEELINE_FOR_HIVE_CLI" == "true" ]] ; then
+  SERVICE="beeline"
+fi
+
 if [[ "$SERVICE" =~ 
^(help|version|orcfiledump|rcfilecat|schemaTool|cleardanglingscratchdir|metastore|beeline|llapstatus|llap)$
 ]] ; then
   SKIP_HBASECP=true
 fi



hive git commit: HIVE-19728: beeline with USE_BEELINE_FOR_HIVE_CLI fails when trying to set hive.aux.jars.path (Daniel Voros reviewed by Vaibhav Gumashta)

2018-06-01 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-3 8350e61a0 -> 6f5d4dd87


HIVE-19728: beeline with USE_BEELINE_FOR_HIVE_CLI fails when trying to set 
hive.aux.jars.path (Daniel Voros reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6f5d4dd8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6f5d4dd8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6f5d4dd8

Branch: refs/heads/branch-3
Commit: 6f5d4dd87df7dc63d81a2c57a013f4f94fa60099
Parents: 8350e61
Author: Vaibhav Gumashta 
Authored: Fri Jun 1 10:49:48 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Jun 1 10:49:48 2018 -0700

--
 bin/ext/cli.sh | 22 +++---
 bin/hive   |  4 
 2 files changed, 7 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6f5d4dd8/bin/ext/cli.sh
--
diff --git a/bin/ext/cli.sh b/bin/ext/cli.sh
index 14337c1..c837508 100644
--- a/bin/ext/cli.sh
+++ b/bin/ext/cli.sh
@@ -24,25 +24,9 @@ fi
 
 updateCli() {
   if [ "$USE_DEPRECATED_CLI" == "true" ]; then
-if [ "$USE_BEELINE_FOR_HIVE_CLI" == "true" ]; then
- CLASS=org.apache.hive.beeline.BeeLine;
-  # include only the beeline client jar and its dependencies
-  beelineJarPath=`ls ${HIVE_LIB}/hive-beeline-*.jar`
-  superCsvJarPath=`ls ${HIVE_LIB}/super-csv-*.jar`
-  jlineJarPath=`ls ${HIVE_LIB}/jline-*.jar`
-  hadoopClasspath=""
-  if [[ -n "${HADOOP_CLASSPATH}" ]]
-  then
-hadoopClasspath="${HADOOP_CLASSPATH}:"
-  fi
-  export 
HADOOP_CLASSPATH="${hadoopClasspath}${HIVE_CONF_DIR}:${beelineJarPath}:${superCsvJarPath}:${jlineJarPath}"
-  export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS 
-Dlog4j.configurationFile=beeline-log4j2.properties "
-  exec $HADOOP jar ${beelineJarPath} $CLASS $HIVE_OPTS "$@"
-else
-  export HADOOP_CLIENT_OPTS=" -Dproc_hivecli $HADOOP_CLIENT_OPTS "
-  CLASS=org.apache.hadoop.hive.cli.CliDriver
-  JAR=hive-cli-*.jar
-fi
+export HADOOP_CLIENT_OPTS=" -Dproc_hivecli $HADOOP_CLIENT_OPTS "
+CLASS=org.apache.hadoop.hive.cli.CliDriver
+JAR=hive-cli-*.jar
   else
 export HADOOP_CLIENT_OPTS=" -Dproc_beeline $HADOOP_CLIENT_OPTS 
-Dlog4j.configurationFile=beeline-log4j2.properties"
 CLASS=org.apache.hive.beeline.cli.HiveCli

http://git-wip-us.apache.org/repos/asf/hive/blob/6f5d4dd8/bin/hive
--
diff --git a/bin/hive b/bin/hive
index 87be599..c54c6fb 100755
--- a/bin/hive
+++ b/bin/hive
@@ -85,6 +85,10 @@ if [ "$SERVICE" = "" ] ; then
   fi
 fi
 
+if [[ "$SERVICE" == "cli" && "$USE_BEELINE_FOR_HIVE_CLI" == "true" ]] ; then
+  SERVICE="beeline"
+fi
+
 if [[ "$SERVICE" =~ 
^(help|version|orcfiledump|rcfilecat|schemaTool|cleardanglingscratchdir|metastore|beeline|llapstatus|llap)$
 ]] ; then
   SKIP_HBASECP=true
 fi



hive git commit: HIVE-19389: Schematool: For Hive's Information Schema, use embedded HS2 as default (Vaibhav Gumashta reviewed by Daniel Dai)

2018-05-09 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 72eff127a -> 8ac625744


HIVE-19389: Schematool: For Hive's Information Schema, use embedded HS2 as 
default (Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8ac62574
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8ac62574
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8ac62574

Branch: refs/heads/master
Commit: 8ac625744109fde23e105fde3e02f5da894da8d4
Parents: 72eff12
Author: Vaibhav Gumashta 
Authored: Wed May 9 12:23:10 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Wed May 9 12:23:10 2018 -0700

--
 beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java  | 7 +++
 .../apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java  | 3 +++
 2 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8ac62574/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java 
b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index a469cd4..7aad265 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -110,6 +110,13 @@ public class HiveSchemaTool {
 this.needsQuotedIdentifier = parser.needsQuotedIdentifier();
 this.quoteCharacter = parser.getQuoteCharacter();
 this.metaStoreSchemaInfo = MetaStoreSchemaInfoFactory.get(hiveConf, 
hiveHome, dbType);
+// If the dbType is "hive", this is setting up the information schema in 
Hive. 
+// We will set the default jdbc url and driver.
+// It is overriden by command line options if passed (-url and -driver
+if (dbType.equalsIgnoreCase(HiveSchemaHelper.DB_HIVE)) {
+  url = HiveSchemaHelper.EMBEDDED_HS2_URL;
+  driver = HiveSchemaHelper.HIVE_JDBC_DRIVER;
+}
   }
 
   public HiveConf getHiveConf() {

http://git-wip-us.apache.org/repos/asf/hive/blob/8ac62574/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
index 785978b..70746e8 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
@@ -44,6 +44,9 @@ public class HiveSchemaHelper {
   public static final String DB_MYSQL = "mysql";
   public static final String DB_POSTGRACE = "postgres";
   public static final String DB_ORACLE = "oracle";
+  public static final String EMBEDDED_HS2_URL = "jdbc:hive2://";
+  public static final String HIVE_JDBC_DRIVER = 
"org.apache.hive.jdbc.HiveDriver";
+  
 
   /***
* Get JDBC connection to metastore db



hive git commit: HIVE-19385: Optional hive env variable to redirect bin/hive to use Beeline (Vaibhav Gumashta reviewed by Daniel Dai)

2018-05-02 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-3 7de49b655 -> 0dfdece81


HIVE-19385: Optional hive env variable to redirect bin/hive to use Beeline 
(Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0dfdece8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0dfdece8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0dfdece8

Branch: refs/heads/branch-3
Commit: 0dfdece81cdf1e9b1904a9715e29df62c9ecdc99
Parents: 7de49b6
Author: Vaibhav Gumashta 
Authored: Wed May 2 14:53:49 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Wed May 2 14:54:37 2018 -0700

--
 bin/ext/cli.sh | 22 +++---
 1 file changed, 19 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0dfdece8/bin/ext/cli.sh
--
diff --git a/bin/ext/cli.sh b/bin/ext/cli.sh
index c837508..14337c1 100644
--- a/bin/ext/cli.sh
+++ b/bin/ext/cli.sh
@@ -24,9 +24,25 @@ fi
 
 updateCli() {
   if [ "$USE_DEPRECATED_CLI" == "true" ]; then
-export HADOOP_CLIENT_OPTS=" -Dproc_hivecli $HADOOP_CLIENT_OPTS "
-CLASS=org.apache.hadoop.hive.cli.CliDriver
-JAR=hive-cli-*.jar
+if [ "$USE_BEELINE_FOR_HIVE_CLI" == "true" ]; then
+ CLASS=org.apache.hive.beeline.BeeLine;
+  # include only the beeline client jar and its dependencies
+  beelineJarPath=`ls ${HIVE_LIB}/hive-beeline-*.jar`
+  superCsvJarPath=`ls ${HIVE_LIB}/super-csv-*.jar`
+  jlineJarPath=`ls ${HIVE_LIB}/jline-*.jar`
+  hadoopClasspath=""
+  if [[ -n "${HADOOP_CLASSPATH}" ]]
+  then
+hadoopClasspath="${HADOOP_CLASSPATH}:"
+  fi
+  export 
HADOOP_CLASSPATH="${hadoopClasspath}${HIVE_CONF_DIR}:${beelineJarPath}:${superCsvJarPath}:${jlineJarPath}"
+  export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS 
-Dlog4j.configurationFile=beeline-log4j2.properties "
+  exec $HADOOP jar ${beelineJarPath} $CLASS $HIVE_OPTS "$@"
+else
+  export HADOOP_CLIENT_OPTS=" -Dproc_hivecli $HADOOP_CLIENT_OPTS "
+  CLASS=org.apache.hadoop.hive.cli.CliDriver
+  JAR=hive-cli-*.jar
+fi
   else
 export HADOOP_CLIENT_OPTS=" -Dproc_beeline $HADOOP_CLIENT_OPTS 
-Dlog4j.configurationFile=beeline-log4j2.properties"
 CLASS=org.apache.hive.beeline.cli.HiveCli



hive git commit: HIVE-19385: Optional hive env variable to redirect bin/hive to use Beeline (Vaibhav Gumashta reviewed by Daniel Dai)

2018-05-02 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master c06e106b8 -> f44cae409


HIVE-19385: Optional hive env variable to redirect bin/hive to use Beeline 
(Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f44cae40
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f44cae40
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f44cae40

Branch: refs/heads/master
Commit: f44cae409dbfe9fea29d396d89bc1baab910dafb
Parents: c06e106
Author: Vaibhav Gumashta 
Authored: Wed May 2 14:53:49 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Wed May 2 14:53:49 2018 -0700

--
 bin/ext/cli.sh | 22 +++---
 1 file changed, 19 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f44cae40/bin/ext/cli.sh
--
diff --git a/bin/ext/cli.sh b/bin/ext/cli.sh
index c837508..14337c1 100644
--- a/bin/ext/cli.sh
+++ b/bin/ext/cli.sh
@@ -24,9 +24,25 @@ fi
 
 updateCli() {
   if [ "$USE_DEPRECATED_CLI" == "true" ]; then
-export HADOOP_CLIENT_OPTS=" -Dproc_hivecli $HADOOP_CLIENT_OPTS "
-CLASS=org.apache.hadoop.hive.cli.CliDriver
-JAR=hive-cli-*.jar
+if [ "$USE_BEELINE_FOR_HIVE_CLI" == "true" ]; then
+ CLASS=org.apache.hive.beeline.BeeLine;
+  # include only the beeline client jar and its dependencies
+  beelineJarPath=`ls ${HIVE_LIB}/hive-beeline-*.jar`
+  superCsvJarPath=`ls ${HIVE_LIB}/super-csv-*.jar`
+  jlineJarPath=`ls ${HIVE_LIB}/jline-*.jar`
+  hadoopClasspath=""
+  if [[ -n "${HADOOP_CLASSPATH}" ]]
+  then
+hadoopClasspath="${HADOOP_CLASSPATH}:"
+  fi
+  export 
HADOOP_CLASSPATH="${hadoopClasspath}${HIVE_CONF_DIR}:${beelineJarPath}:${superCsvJarPath}:${jlineJarPath}"
+  export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS 
-Dlog4j.configurationFile=beeline-log4j2.properties "
+  exec $HADOOP jar ${beelineJarPath} $CLASS $HIVE_OPTS "$@"
+else
+  export HADOOP_CLIENT_OPTS=" -Dproc_hivecli $HADOOP_CLIENT_OPTS "
+  CLASS=org.apache.hadoop.hive.cli.CliDriver
+  JAR=hive-cli-*.jar
+fi
   else
 export HADOOP_CLIENT_OPTS=" -Dproc_beeline $HADOOP_CLIENT_OPTS 
-Dlog4j.configurationFile=beeline-log4j2.properties"
 CLASS=org.apache.hive.beeline.cli.HiveCli



hive git commit: HIVE-19252: TestJdbcWithMiniKdcCookie.testCookieNegative is failing consistently (Vaibhav Gumashta reviewed by Daniel Dai)

2018-04-25 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-3 63b07222f -> 82706e596


HIVE-19252: TestJdbcWithMiniKdcCookie.testCookieNegative is failing 
consistently (Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/82706e59
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/82706e59
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/82706e59

Branch: refs/heads/branch-3
Commit: 82706e5964d534b597ab967e100a75eb4bb16600
Parents: 63b0722
Author: Vaibhav Gumashta 
Authored: Wed Apr 25 12:53:14 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Wed Apr 25 12:54:03 2018 -0700

--
 .../java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/82706e59/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java
--
diff --git 
a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java
 
b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java
index 9cad3ea..2fa2a87 100644
--- 
a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java
+++ 
b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java
@@ -109,7 +109,7 @@ public class TestJdbcWithMiniKdcCookie {
   // login failure.
   getConnection(HIVE_NON_EXISTENT_USER);
 } catch (IOException e) {
-  Assert.assertTrue(e.getMessage().contains("Login failure"));
+  
Assert.assertTrue(e.getMessage().contains("javax.security.auth.login.LoginException"));
 }
   }
 



hive git commit: HIVE-19249: Replication: WITH clause is not passing the configuration to Task correctly in all cases (Vaibhav Gumashta reviewed by Thejas Nair, Daniel Dai)

2018-04-20 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-3 caafbf403 -> ae700b0b1


HIVE-19249: Replication: WITH clause is not passing the configuration to Task 
correctly in all cases (Vaibhav Gumashta reviewed by Thejas Nair, Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ae700b0b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ae700b0b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ae700b0b

Branch: refs/heads/branch-3
Commit: ae700b0b18eac7aa37ff659179b5d5bd3bba30a7
Parents: caafbf4
Author: Vaibhav Gumashta 
Authored: Fri Apr 20 11:28:41 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Apr 20 11:37:02 2018 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ae700b0b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 009a890..49c355b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -2299,7 +2299,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
 assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName();
 boolean isMmTable = AcidUtils.isInsertOnlyTable(tbl);
 boolean isFullAcidTable = AcidUtils.isFullAcidTable(tbl);
-HiveConf sessionConf = SessionState.getSessionConf();
 if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) {
   newFiles = Collections.synchronizedList(new ArrayList());
 }
@@ -2341,11 +2340,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
 boolean isAutopurge = 
"true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
 // TODO: this should never run for MM tables anymore. Remove the flag, 
and maybe the filter?
 replaceFiles(tblPath, loadPath, destPath, tblPath,
-sessionConf, isSrcLocal, isAutopurge, newFiles, filter, 
isMmTable?true:false, !tbl.isTemporary());
+conf, isSrcLocal, isAutopurge, newFiles, filter, 
isMmTable?true:false, !tbl.isTemporary());
   } else {
 try {
-  FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf);
-  copyFiles(sessionConf, loadPath, destPath, fs, isSrcLocal, 
isAcidIUDoperation,
+  FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
+  copyFiles(conf, loadPath, destPath, fs, isSrcLocal, 
isAcidIUDoperation,
 loadFileType == LoadFileType.OVERWRITE_EXISTING, newFiles,
   tbl.getNumBuckets() > 0 ? true : false, isFullAcidTable);
 } catch (IOException e) {



hive git commit: HIVE-19249: Replication: WITH clause is not passing the configuration to Task correctly in all cases (Vaibhav Gumashta reviewed by Thejas Nair, Daniel Dai)

2018-04-20 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 9cfaf6b00 -> 77afeb2d8


HIVE-19249: Replication: WITH clause is not passing the configuration to Task 
correctly in all cases (Vaibhav Gumashta reviewed by Thejas Nair, Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/77afeb2d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/77afeb2d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/77afeb2d

Branch: refs/heads/master
Commit: 77afeb2d80a8534a40061e7fd45cde42156d
Parents: 9cfaf6b
Author: Vaibhav Gumashta 
Authored: Fri Apr 20 11:28:41 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Apr 20 11:36:33 2018 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/77afeb2d/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 009a890..49c355b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -2299,7 +2299,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
 assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName();
 boolean isMmTable = AcidUtils.isInsertOnlyTable(tbl);
 boolean isFullAcidTable = AcidUtils.isFullAcidTable(tbl);
-HiveConf sessionConf = SessionState.getSessionConf();
 if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) {
   newFiles = Collections.synchronizedList(new ArrayList());
 }
@@ -2341,11 +2340,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
 boolean isAutopurge = 
"true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
 // TODO: this should never run for MM tables anymore. Remove the flag, 
and maybe the filter?
 replaceFiles(tblPath, loadPath, destPath, tblPath,
-sessionConf, isSrcLocal, isAutopurge, newFiles, filter, 
isMmTable?true:false, !tbl.isTemporary());
+conf, isSrcLocal, isAutopurge, newFiles, filter, 
isMmTable?true:false, !tbl.isTemporary());
   } else {
 try {
-  FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf);
-  copyFiles(sessionConf, loadPath, destPath, fs, isSrcLocal, 
isAcidIUDoperation,
+  FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
+  copyFiles(conf, loadPath, destPath, fs, isSrcLocal, 
isAcidIUDoperation,
 loadFileType == LoadFileType.OVERWRITE_EXISTING, newFiles,
   tbl.getNumBuckets() > 0 ? true : false, isFullAcidTable);
 } catch (IOException e) {



[1/2] hive git commit: HIVE-19126: CachedStore: Use memory estimation to limit cache size during prewarm (Vaibhav Gumashta reviewed by Thejas Nair)

2018-04-17 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-3 9db29e9d4 -> 624e464a2


http://git-wip-us.apache.org/repos/asf/hive/blob/624e464a/storage-api/src/java/org/apache/hadoop/hive/ql/util/IncrementalObjectSizeEstimator.java
--
diff --git 
a/storage-api/src/java/org/apache/hadoop/hive/ql/util/IncrementalObjectSizeEstimator.java
 
b/storage-api/src/java/org/apache/hadoop/hive/ql/util/IncrementalObjectSizeEstimator.java
new file mode 100644
index 000..9421691
--- /dev/null
+++ 
b/storage-api/src/java/org/apache/hadoop/hive/ql/util/IncrementalObjectSizeEstimator.java
@@ -0,0 +1,640 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.util;
+
+import java.lang.reflect.AccessibleObject;
+import java.lang.reflect.Array;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Creates size estimators for java objects. The estimators attempt to do most 
of the reflection
+ * work at initialization time, and also take some shortcuts, to minimize the 
amount of work done
+ * during the actual estimation.
+ * TODO: clean up
+ */
+public class IncrementalObjectSizeEstimator {
+  public static final JavaDataModel memoryModel = JavaDataModel.get();
+  static final private Logger LOG =
+  LoggerFactory.getLogger(IncrementalObjectSizeEstimator.class.getName());
+
+  private enum FieldType {
+PRIMITIVE_ARRAY, OBJECT_ARRAY, COLLECTION, MAP, OTHER
+  };
+
+  public static HashMap createEstimators(Object 
rootObj) {
+HashMap byType = new HashMap<>();
+addHardcodedEstimators(byType);
+createEstimators(rootObj, byType);
+return byType;
+  }
+
+  public static void createEstimators(Object rootObj, HashMap byType) {
+// Code initially inspired by Google ObjectExplorer.
+// TODO: roll in the direct-only estimators from fields. Various other 
optimizations possible.
+Deque stack = createWorkStack(rootObj, byType);
+
+while (!stack.isEmpty()) {
+  Object obj = stack.pop();
+  Class clazz;
+  if (obj instanceof Class) {
+clazz = (Class) obj;
+obj = null;
+  } else {
+clazz = obj.getClass();
+  }
+  ObjectEstimator estimator = byType.get(clazz);
+  assert estimator != null;
+  if (!estimator.isFromClass && obj == null) {
+// The object was added later for the same class; see addToProcessing.
+continue;
+  }
+  if (estimator.isProcessed())
+continue;
+  estimator.init();
+  for (Field field : getAllFields(clazz)) {
+Class fieldClass = field.getType();
+if (Modifier.isStatic(field.getModifiers())) {
+  continue;
+}
+if (Class.class.isAssignableFrom(fieldClass)) {
+  continue;
+}
+if (fieldClass.isPrimitive()) {
+  estimator.addPrimitive(fieldClass);
+  continue;
+}
+if (Enum.class.isAssignableFrom(fieldClass)) {
+  estimator.addEnum();
+  continue;
+}
+boolean isArray = fieldClass.isArray();
+if (isArray && fieldClass.getComponentType().isPrimitive()) {
+  estimator.addField(FieldType.PRIMITIVE_ARRAY, field);
+  continue;
+}
+Object fieldObj = null;
+if (obj != null) {
+  fieldObj = extractFieldObj(obj, field);
+  fieldClass = determineRealClass(byType, stack, field, fieldClass, 
fieldObj);
+}
+if (isArray) {
+  estimator.addField(FieldType.OBJECT_ARRAY, field);
+  addArrayEstimator(byType, stack, field, fieldObj);
+} else if (Collection.class.isAssignableFrom(fieldClass)) {
+

[2/2] hive git commit: HIVE-19126: CachedStore: Use memory estimation to limit cache size during prewarm (Vaibhav Gumashta reviewed by Thejas Nair)

2018-04-17 Thread vgumashta
HIVE-19126: CachedStore: Use memory estimation to limit cache size during 
prewarm (Vaibhav Gumashta reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/624e464a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/624e464a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/624e464a

Branch: refs/heads/branch-3
Commit: 624e464a2cc4fe4dd9395edf8b377fd7323a299e
Parents: 9db29e9
Author: Vaibhav Gumashta 
Authored: Tue Apr 17 12:53:40 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Tue Apr 17 12:58:13 2018 -0700

--
 .../llap/IncrementalObjectSizeEstimator.java| 605 --
 .../llap/io/metadata/OrcFileEstimateErrors.java |   4 +-
 .../TestIncrementalObjectSizeEstimator.java |   4 +-
 .../hive/metastore/cache/CachedStore.java   |  35 +-
 .../hive/metastore/cache/SharedCache.java   | 280 +---
 .../hive/metastore/conf/MetastoreConf.java  |  10 +-
 .../hive/metastore/conf/SizeValidator.java  | 110 
 .../hive/metastore/cache/TestCachedStore.java   |   5 +
 .../ql/util/IncrementalObjectSizeEstimator.java | 640 +++
 9 files changed, 996 insertions(+), 697 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/624e464a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
deleted file mode 100644
index 6f4ec6f..000
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
+++ /dev/null
@@ -1,605 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.llap;
-
-import com.google.common.collect.Lists;
-import com.google.protobuf.UnknownFieldSet;
-import java.lang.reflect.AccessibleObject;
-import java.lang.reflect.Array;
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
-import java.lang.reflect.ParameterizedType;
-import java.lang.reflect.Type;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Deque;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.IdentityHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import 
org.apache.hadoop.hive.llap.IncrementalObjectSizeEstimator.ObjectEstimator;
-import org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer;
-import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
-import org.apache.hadoop.hive.ql.util.JavaDataModel;
-
-/**
- * Creates size estimators for java objects. The estimators attempt to do most 
of the reflection
- * work at initialization time, and also take some shortcuts, to minimize the 
amount of work done
- * during the actual estimation. TODO: clean up
- */
-public class IncrementalObjectSizeEstimator {
-  public static final JavaDataModel memoryModel = JavaDataModel.get();
-  private enum FieldType { PRIMITIVE_ARRAY, OBJECT_ARRAY, COLLECTION, MAP, 
OTHER };
-
-  public static HashMap createEstimators(Object 
rootObj) {
-HashMap byType = new HashMap<>();
-addHardcodedEstimators(byType);
-createEstimators(rootObj, byType);
-return byType;
-  }
-
-  public static void createEstimators(Object rootObj, HashMap byType) {
-// Code initially inspired by Google ObjectExplorer.
-// TODO: roll in the direct-only estimators from fields. Various other 
optimizations possible.
-Deque stack = createWorkStack(rootObj, byType);
-
-while (!stack.isEmpty()) {
-  Object obj = stack.pop();
-  Class clazz;
-  if (obj instanceof Class) {
-clazz = (Class)obj;
-obj = null;
-  } else {
-clazz = obj.getClass();
-  

[1/2] hive git commit: HIVE-19126: CachedStore: Use memory estimation to limit cache size during prewarm (Vaibhav Gumashta reviewed by Thejas Nair)

2018-04-17 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 3d1bf34b1 -> 4cfec3eb9


http://git-wip-us.apache.org/repos/asf/hive/blob/4cfec3eb/storage-api/src/java/org/apache/hadoop/hive/ql/util/IncrementalObjectSizeEstimator.java
--
diff --git 
a/storage-api/src/java/org/apache/hadoop/hive/ql/util/IncrementalObjectSizeEstimator.java
 
b/storage-api/src/java/org/apache/hadoop/hive/ql/util/IncrementalObjectSizeEstimator.java
new file mode 100644
index 000..9421691
--- /dev/null
+++ 
b/storage-api/src/java/org/apache/hadoop/hive/ql/util/IncrementalObjectSizeEstimator.java
@@ -0,0 +1,640 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.util;
+
+import java.lang.reflect.AccessibleObject;
+import java.lang.reflect.Array;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Creates size estimators for java objects. The estimators attempt to do most 
of the reflection
+ * work at initialization time, and also take some shortcuts, to minimize the 
amount of work done
+ * during the actual estimation.
+ * TODO: clean up
+ */
+public class IncrementalObjectSizeEstimator {
+  public static final JavaDataModel memoryModel = JavaDataModel.get();
+  static final private Logger LOG =
+  LoggerFactory.getLogger(IncrementalObjectSizeEstimator.class.getName());
+
+  private enum FieldType {
+PRIMITIVE_ARRAY, OBJECT_ARRAY, COLLECTION, MAP, OTHER
+  };
+
+  public static HashMap createEstimators(Object 
rootObj) {
+HashMap byType = new HashMap<>();
+addHardcodedEstimators(byType);
+createEstimators(rootObj, byType);
+return byType;
+  }
+
+  public static void createEstimators(Object rootObj, HashMap byType) {
+// Code initially inspired by Google ObjectExplorer.
+// TODO: roll in the direct-only estimators from fields. Various other 
optimizations possible.
+Deque stack = createWorkStack(rootObj, byType);
+
+while (!stack.isEmpty()) {
+  Object obj = stack.pop();
+  Class clazz;
+  if (obj instanceof Class) {
+clazz = (Class) obj;
+obj = null;
+  } else {
+clazz = obj.getClass();
+  }
+  ObjectEstimator estimator = byType.get(clazz);
+  assert estimator != null;
+  if (!estimator.isFromClass && obj == null) {
+// The object was added later for the same class; see addToProcessing.
+continue;
+  }
+  if (estimator.isProcessed())
+continue;
+  estimator.init();
+  for (Field field : getAllFields(clazz)) {
+Class fieldClass = field.getType();
+if (Modifier.isStatic(field.getModifiers())) {
+  continue;
+}
+if (Class.class.isAssignableFrom(fieldClass)) {
+  continue;
+}
+if (fieldClass.isPrimitive()) {
+  estimator.addPrimitive(fieldClass);
+  continue;
+}
+if (Enum.class.isAssignableFrom(fieldClass)) {
+  estimator.addEnum();
+  continue;
+}
+boolean isArray = fieldClass.isArray();
+if (isArray && fieldClass.getComponentType().isPrimitive()) {
+  estimator.addField(FieldType.PRIMITIVE_ARRAY, field);
+  continue;
+}
+Object fieldObj = null;
+if (obj != null) {
+  fieldObj = extractFieldObj(obj, field);
+  fieldClass = determineRealClass(byType, stack, field, fieldClass, 
fieldObj);
+}
+if (isArray) {
+  estimator.addField(FieldType.OBJECT_ARRAY, field);
+  addArrayEstimator(byType, stack, field, fieldObj);
+} else if (Collection.class.isAssignableFrom(fieldClass)) {
+  

hive git commit: HIVE-18840: CachedStore: Prioritize loading of recently accessed tables during prewarm (Vaibhav Gumashta reviewed by Daniel Dai)

2018-04-11 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-3 f57c33607 -> a3d9c46df


HIVE-18840: CachedStore: Prioritize loading of recently accessed tables during 
prewarm (Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a3d9c46d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a3d9c46d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a3d9c46d

Branch: refs/heads/branch-3
Commit: a3d9c46df453f787c3eaeeb2c33373997d17dec4
Parents: f57c336
Author: Vaibhav Gumashta 
Authored: Wed Apr 11 15:39:30 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Wed Apr 11 15:46:26 2018 -0700

--
 .../hive/metastore/cache/CachedStore.java   | 198 +++
 1 file changed, 114 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a3d9c46d/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index c47856d..1ce86bb 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -18,23 +18,21 @@
 package org.apache.hadoop.hive.metastore.cache;
 
 
-import java.io.Closeable;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
+import java.util.EmptyStackException;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Stack;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -100,7 +98,6 @@ import 
org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
@@ -146,6 +143,7 @@ public class CachedStore implements RawStore, Configurable {
   // Time after which metastore cache is updated from metastore DB by the 
background update thread
   private static long cacheRefreshPeriodMS = DEFAULT_CACHE_REFRESH_PERIOD;
   private static AtomicBoolean isCachePrewarmed = new AtomicBoolean(false);
+  private static TablesPendingPrewarm tblsPendingPrewarm = new 
TablesPendingPrewarm();
   private RawStore rawStore = null;
   private Configuration conf;
   private PartitionExpressionProxy expressionProxy = null;
@@ -153,10 +151,6 @@ public class CachedStore implements RawStore, Configurable 
{
 
   static final private Logger LOG = 
LoggerFactory.getLogger(CachedStore.class.getName());
 
-  public CachedStore() {
-
-  }
-
   @Override
   public void setConf(Configuration conf) {
 setConfInternal(conf);
@@ -211,12 +205,13 @@ public class CachedStore implements RawStore, 
Configurable {
   Collection catalogsToCache;
   try {
 catalogsToCache = catalogsToCache(rawStore);
-LOG.info("Going to cache catalogs: " +
-org.apache.commons.lang.StringUtils.join(catalogsToCache, ", "));
+LOG.info("Going to cache catalogs: "
++ org.apache.commons.lang.StringUtils.join(catalogsToCache, ", "));
 List catalogs = new ArrayList<>(catalogsToCache.size());
-for (String catName : catalogsToCache) 
catalogs.add(rawStore.getCatalog(catName));
+for (String catName : catalogsToCache)
+  catalogs.add(rawStore.getCatalog(catName));
 sharedCache.populateCatalogsInCache(catalogs);
-  } catch (MetaException|NoSuchObjectException e) {
+  } catch (MetaException | NoSuchObjectException e) {
 LOG.warn("Failed to populate catalogs in cache, going to try again", 
e);
 // try again
 continue;
@@ -232,8 +227,8 @@ public class CachedStore implements RawStore, Configurable {
   databases.add(rawStore.getDatabase(catName, dbName));
 } catch 

hive git commit: HIVE-18840: CachedStore: Prioritize loading of recently accessed tables during prewarm (Vaibhav Gumashta reviewed by Daniel Dai)

2018-04-11 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 42187fdbc -> b3fe6522e


HIVE-18840: CachedStore: Prioritize loading of recently accessed tables during 
prewarm (Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b3fe6522
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b3fe6522
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b3fe6522

Branch: refs/heads/master
Commit: b3fe6522e651fa4f00f1a1a75e6f12c132eacf21
Parents: 42187fd
Author: Vaibhav Gumashta 
Authored: Wed Apr 11 15:39:30 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Wed Apr 11 15:39:30 2018 -0700

--
 .../hive/metastore/cache/CachedStore.java   | 198 +++
 1 file changed, 114 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b3fe6522/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index c47856d..1ce86bb 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -18,23 +18,21 @@
 package org.apache.hadoop.hive.metastore.cache;
 
 
-import java.io.Closeable;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
+import java.util.EmptyStackException;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Stack;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -100,7 +98,6 @@ import 
org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
@@ -146,6 +143,7 @@ public class CachedStore implements RawStore, Configurable {
   // Time after which metastore cache is updated from metastore DB by the 
background update thread
   private static long cacheRefreshPeriodMS = DEFAULT_CACHE_REFRESH_PERIOD;
   private static AtomicBoolean isCachePrewarmed = new AtomicBoolean(false);
+  private static TablesPendingPrewarm tblsPendingPrewarm = new 
TablesPendingPrewarm();
   private RawStore rawStore = null;
   private Configuration conf;
   private PartitionExpressionProxy expressionProxy = null;
@@ -153,10 +151,6 @@ public class CachedStore implements RawStore, Configurable 
{
 
   static final private Logger LOG = 
LoggerFactory.getLogger(CachedStore.class.getName());
 
-  public CachedStore() {
-
-  }
-
   @Override
   public void setConf(Configuration conf) {
 setConfInternal(conf);
@@ -211,12 +205,13 @@ public class CachedStore implements RawStore, 
Configurable {
   Collection catalogsToCache;
   try {
 catalogsToCache = catalogsToCache(rawStore);
-LOG.info("Going to cache catalogs: " +
-org.apache.commons.lang.StringUtils.join(catalogsToCache, ", "));
+LOG.info("Going to cache catalogs: "
++ org.apache.commons.lang.StringUtils.join(catalogsToCache, ", "));
 List catalogs = new ArrayList<>(catalogsToCache.size());
-for (String catName : catalogsToCache) 
catalogs.add(rawStore.getCatalog(catName));
+for (String catName : catalogsToCache)
+  catalogs.add(rawStore.getCatalog(catName));
 sharedCache.populateCatalogsInCache(catalogs);
-  } catch (MetaException|NoSuchObjectException e) {
+  } catch (MetaException | NoSuchObjectException e) {
 LOG.warn("Failed to populate catalogs in cache, going to try again", 
e);
 // try again
 continue;
@@ -232,8 +227,8 @@ public class CachedStore implements RawStore, Configurable {
   databases.add(rawStore.getDatabase(catName, dbName));
 } catch 

hive git commit: HIVE-18963: JDBC: Provide an option to simplify beeline usage by supporting default and named URL for beeline (Vaibhav Gumashta reviewed by Vihang Karajgaonkar, Thejas Nair)

2018-04-02 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 733aecf28 -> 54dbd7fe2


HIVE-18963: JDBC: Provide an option to simplify beeline usage by supporting 
default and named URL for beeline (Vaibhav Gumashta reviewed by Vihang 
Karajgaonkar, Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/54dbd7fe
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/54dbd7fe
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/54dbd7fe

Branch: refs/heads/master
Commit: 54dbd7fe2fe37fc9f9b0721fde4dd327e4011bc9
Parents: 733aecf
Author: Vaibhav Gumashta 
Authored: Mon Apr 2 11:28:30 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Mon Apr 2 11:28:30 2018 -0700

--
 .../java/org/apache/hive/beeline/BeeLine.java   | 108 +++---
 .../BeelineConfFileParseException.java  |  30 +++
 .../BeelineHS2ConnectionFileParseException.java |   2 +-
 .../BeelineSiteParseException.java  |  30 +++
 .../hs2connection/BeelineSiteParser.java| 145 +
 .../hs2connection/HS2ConnectionFileParser.java  |   2 +-
 .../hs2connection/HS2ConnectionFileUtils.java   | 131 
 .../UserHS2ConnectionFileParser.java|   3 -
 beeline/src/main/resources/BeeLine.properties   |   3 +
 .../TestUserHS2ConnectionFileParser.java|   4 +-
 .../BeelineWithHS2ConnectionFileTestBase.java   |   2 +-
 jdbc/src/java/org/apache/hive/jdbc/Utils.java   | 212 ++-
 12 files changed, 536 insertions(+), 136 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/54dbd7fe/beeline/src/java/org/apache/hive/beeline/BeeLine.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 4928761..6f7f1fc 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -35,7 +35,6 @@ import java.io.PrintStream;
 import java.io.SequenceInputStream;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
-import java.lang.reflect.Modifier;
 import java.net.JarURLConnection;
 import java.net.URL;
 import java.net.URLConnection;
@@ -75,9 +74,6 @@ import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.jar.Attributes;
 import java.util.jar.Manifest;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipFile;
-
 import jline.console.completer.Completer;
 import jline.console.completer.StringsCompleter;
 import jline.console.completer.FileNameCompleter;
@@ -95,7 +91,9 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hive.beeline.cli.CliOptionsProcessor;
 import org.apache.hive.common.util.ShutdownHookManager;
-import 
org.apache.hive.beeline.hs2connection.BeelineHS2ConnectionFileParseException;
+import org.apache.hive.beeline.hs2connection.BeelineConfFileParseException;
+import org.apache.hive.beeline.hs2connection.BeelineSiteParseException;
+import org.apache.hive.beeline.hs2connection.BeelineSiteParser;
 import org.apache.hive.beeline.hs2connection.HS2ConnectionFileUtils;
 import org.apache.hive.beeline.hs2connection.UserHS2ConnectionFileParser;
 import org.apache.hive.beeline.hs2connection.HS2ConnectionFileParser;
@@ -104,6 +102,7 @@ import org.apache.thrift.transport.TTransportException;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import org.apache.hive.jdbc.JdbcUriParseException;
 import org.apache.hive.jdbc.Utils;
 import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
 
@@ -309,16 +308,24 @@ public class BeeLine implements Closeable {
 options.addOption(OptionBuilder
 .hasArg()
 .withArgName("driver class")
-.withDescription("the driver class to use")
+.withDescription("The driver class to use")
 .create('d'));
 
 // -u 
 options.addOption(OptionBuilder
 .hasArg()
 .withArgName("database url")
-.withDescription("the JDBC URL to connect to")
+.withDescription("The JDBC URL to connect to")
 .create('u'));
 
+// -c 
+options.addOption(OptionBuilder
+.hasArg()
+.withArgName("named JDBC URL in beeline-site.xml")
+.withDescription("The named JDBC URL to connect to, which should be 
present in "
++ "beeline-site.xml as the value of 
beeline.hs2.jdbc.url.")
+.create('c'));
+
 // -r
 options.addOption(OptionBuilder
 .withLongOpt("reconnect")
@@ -329,14 +336,14 @@ public class BeeLine implements Closeable {
 options.addOption(OptionBuilder
 .hasArg()
 .withArgName("username")
-.withDescription("the username to connect 

[3/4] hive git commit: HIVE-18264: CachedStore: Store cached partitions/col stats within the table cache and make prewarm non-blocking (Vaibhav Gumashta reviewed by Daniel Dai, Alexander Kolbasov)

2018-03-19 Thread vgumashta
http://git-wip-us.apache.org/repos/asf/hive/blob/26c0ab6a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index d28b196..d37b201 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -20,12 +20,12 @@ package org.apache.hadoop.hive.metastore.cache;
 import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 import org.apache.hadoop.hive.metastore.api.ISchemaName;
 import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -35,7 +35,6 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -95,8 +94,6 @@ import 
org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
@@ -124,130 +121,50 @@ import com.google.common.annotations.VisibleForTesting;
 // TODO constraintCache
 // TODO need sd nested copy?
 // TODO String intern
-// TODO restructure HBaseStore
 // TODO monitor event queue
 // TODO initial load slow?
 // TODO size estimation
-// TODO factor in extrapolation logic (using partitions found) during 
aggregate stats calculation
 
 public class CachedStore implements RawStore, Configurable {
   private static ScheduledExecutorService cacheUpdateMaster = null;
-  private static ReentrantReadWriteLock databaseCacheLock = new 
ReentrantReadWriteLock(true);
-  private static AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false);
-  private static ReentrantReadWriteLock tableCacheLock = new 
ReentrantReadWriteLock(true);
-  private static AtomicBoolean isTableCacheDirty = new AtomicBoolean(false);
-  private static ReentrantReadWriteLock partitionCacheLock = new 
ReentrantReadWriteLock(true);
-  private static AtomicBoolean isPartitionCacheDirty = new 
AtomicBoolean(false);
-  private static ReentrantReadWriteLock tableColStatsCacheLock = new 
ReentrantReadWriteLock(true);
-  private static AtomicBoolean isTableColStatsCacheDirty = new 
AtomicBoolean(false);
-  private static ReentrantReadWriteLock partitionColStatsCacheLock = new 
ReentrantReadWriteLock(
-  true);
-  private static ReentrantReadWriteLock partitionAggrColStatsCacheLock =
-  new ReentrantReadWriteLock(true);
-  private static AtomicBoolean isPartitionAggrColStatsCacheDirty = new 
AtomicBoolean(false);
-  private static AtomicBoolean isPartitionColStatsCacheDirty = new 
AtomicBoolean(false);
   private static List whitelistPatterns = null;
   private static List blacklistPatterns = null;
+  // Default value set to 100 milliseconds for test purpose
+  private static long DEFAULT_CACHE_REFRESH_PERIOD = 100;
+  // Time after which metastore cache is updated from metastore DB by the 
background update thread
+  private static long cacheRefreshPeriodMS = DEFAULT_CACHE_REFRESH_PERIOD;
+  private static AtomicBoolean isCachePrewarmed = new AtomicBoolean(false);
   private RawStore rawStore = null;
   private Configuration conf;
   private PartitionExpressionProxy expressionProxy = null;
-  // Default value set to 100 milliseconds for test purpose
-  private static long cacheRefreshPeriod = 100;
-
-  /** A wrapper over SharedCache. Allows one to get SharedCache safely; should 
be merged
-   *  into SharedCache itself (see the TODO on the class). */
-  private static final SharedCacheWrapper sharedCacheWrapper = new 
SharedCacheWrapper();
+  private static final SharedCache sharedCache = new SharedCache();
 
   static final private Logger LOG = 
LoggerFactory.getLogger(CachedStore.class.getName());
 
-  static class TableWrapper {
-Table t;
-String location;
-Map parameters;
-byte[] sdHash;
-

[1/4] hive git commit: HIVE-18264: CachedStore: Store cached partitions/col stats within the table cache and make prewarm non-blocking (Vaibhav Gumashta reviewed by Daniel Dai, Alexander Kolbasov)

2018-03-19 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 79e88695c -> 26c0ab6ad


http://git-wip-us.apache.org/repos/asf/hive/blob/26c0ab6a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
--
diff --git 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index 0006815..a72fc0b 100644
--- 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@ -22,7 +22,10 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog;
 import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
@@ -66,19 +69,13 @@ public class TestCachedStore {
 objectStore = new ObjectStore();
 objectStore.setConf(conf);
 cachedStore = new CachedStore();
-cachedStore.setConf(conf);
-// Stop the CachedStore cache update service. We'll start it explicitly to 
control the test
-CachedStore.stopCacheUpdateService(1);
-cachedStore.setInitializedForTest();
-
+cachedStore.setConfForTest(conf);
 // Stop the CachedStore cache update service. We'll start it explicitly to 
control the test
 CachedStore.stopCacheUpdateService(1);
 sharedCache = new SharedCache();
 sharedCache.getDatabaseCache().clear();
 sharedCache.getTableCache().clear();
-sharedCache.getPartitionCache().clear();
 sharedCache.getSdCache().clear();
-sharedCache.getPartitionColStatsCache().clear();
   }
 
   
/**
@@ -89,61 +86,49 @@ public class TestCachedStore {
   public void testDatabaseOps() throws Exception {
 // Add a db via ObjectStore
 String dbName = "testDatabaseOps";
-String dbDescription = "testDatabaseOps";
-String dbLocation = "file:/tmp";
-Map dbParams = new HashMap<>();
 String dbOwner = "user1";
-Database db = new Database(dbName, dbDescription, dbLocation, dbParams);
-db.setOwnerName(dbOwner);
-db.setOwnerType(PrincipalType.USER);
+Database db = createTestDb(dbName, dbOwner);
 objectStore.createDatabase(db);
 db = objectStore.getDatabase(dbName);
 // Prewarm CachedStore
+CachedStore.setCachePrewarmedState(false);
 CachedStore.prewarm(objectStore);
 
 // Read database via CachedStore
-Database dbNew = cachedStore.getDatabase(dbName);
-Assert.assertEquals(db, dbNew);
+Database dbRead = cachedStore.getDatabase(dbName);
+Assert.assertEquals(db, dbRead);
 
 // Add another db via CachedStore
 final String dbName1 = "testDatabaseOps1";
-final String dbDescription1 = "testDatabaseOps1";
-Database db1 = new Database(dbName1, dbDescription1, dbLocation, dbParams);
-db1.setOwnerName(dbOwner);
-db1.setOwnerType(PrincipalType.USER);
+Database db1 = createTestDb(dbName1, dbOwner);
 cachedStore.createDatabase(db1);
 db1 = cachedStore.getDatabase(dbName1);
 
 // Read db via ObjectStore
-dbNew = objectStore.getDatabase(dbName1);
-Assert.assertEquals(db1, dbNew);
+dbRead = objectStore.getDatabase(dbName1);
+Assert.assertEquals(db1, dbRead);
 
 // Alter the db via CachedStore (can only alter owner or parameters)
-db = new Database(dbName, dbDescription, dbLocation, dbParams);
 dbOwner = "user2";
+db = new Database(db);
 db.setOwnerName(dbOwner);
-db.setOwnerType(PrincipalType.USER);
 cachedStore.alterDatabase(dbName, db);
 db = cachedStore.getDatabase(dbName);
 
 // Read db via ObjectStore
-dbNew = objectStore.getDatabase(dbName);
-Assert.assertEquals(db, dbNew);
+dbRead = objectStore.getDatabase(dbName);
+Assert.assertEquals(db, dbRead);
 
 // Add another db via ObjectStore
 final String dbName2 = "testDatabaseOps2";
-final String dbDescription2 = "testDatabaseOps2";
-Database db2 = new Database(dbName2, dbDescription2, dbLocation, dbParams);
-db2.setOwnerName(dbOwner);
-db2.setOwnerType(PrincipalType.USER);
+Database db2 = createTestDb(dbName2, dbOwner);
 objectStore.createDatabase(db2);
 db2 = objectStore.getDatabase(dbName2);
 
 // Alter db "testDatabaseOps" via ObjectStore
 dbOwner = "user1";
-db = new Database(dbName, dbDescription, dbLocation, dbParams);
+db = new Database(db);
 db.setOwnerName(dbOwner);
-db.setOwnerType(PrincipalType.USER);
 

[2/4] hive git commit: HIVE-18264: CachedStore: Store cached partitions/col stats within the table cache and make prewarm non-blocking (Vaibhav Gumashta reviewed by Daniel Dai, Alexander Kolbasov)

2018-03-19 Thread vgumashta
http://git-wip-us.apache.org/repos/asf/hive/blob/26c0ab6a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
index 32ea174..cf92eda 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
@@ -21,15 +21,20 @@ import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.TreeMap;
-
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 import org.apache.hadoop.hive.metastore.StatObjectConverter;
+import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -38,11 +43,7 @@ import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.cache.CachedStore.PartitionWrapper;
-import 
org.apache.hadoop.hive.metastore.cache.CachedStore.StorageDescriptorWrapper;
-import org.apache.hadoop.hive.metastore.cache.CachedStore.TableWrapper;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
 import org.apache.hadoop.hive.metastore.utils.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -50,15 +51,21 @@ import org.slf4j.LoggerFactory;
 import com.google.common.annotations.VisibleForTesting;
 
 public class SharedCache {
-  private Map databaseCache = new TreeMap<>();
-  private Map tableCache = new TreeMap<>();
-  private Map partitionCache = new TreeMap<>();
-  private Map partitionColStatsCache = new 
TreeMap<>();
-  private Map tableColStatsCache = new 
TreeMap<>();
-  private Map sdCache = new 
HashMap<>();
-  private Map aggrColStatsCache =
-  new HashMap();
+  private static ReentrantReadWriteLock cacheLock = new 
ReentrantReadWriteLock(true);
+  // For caching Database objects. Key is database name
+  private Map databaseCache = new ConcurrentHashMap();
+  private boolean isDatabaseCachePrewarmed = false;
+  private HashSet databasesDeletedDuringPrewarm = new 
HashSet();
+  private AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false);
+  // For caching TableWrapper objects. Key is aggregate of database name and 
table name
+  private Map tableCache = new ConcurrentHashMap();
+  private boolean isTableCachePrewarmed = false;
+  private HashSet tablesDeletedDuringPrewarm = new HashSet();
+  private AtomicBoolean isTableCacheDirty = new AtomicBoolean(false);
+  private Map sdCache = new 
ConcurrentHashMap<>();
   private static MessageDigest md;
+  static final private Logger LOG = 
LoggerFactory.getLogger(SharedCache.class.getName());
+  private AtomicLong cacheUpdateCount = new AtomicLong(0);
 
   static enum StatsType {
 ALL(0), ALLBUTDEFAULT(1);
@@ -74,8 +81,6 @@ public class SharedCache {
 }
   }
 
-  private static final Logger LOG = LoggerFactory.getLogger(SharedCache.class);
-
   static {
 try {
   md = MessageDigest.getInstance("MD5");
@@ -84,43 +89,804 @@ public class SharedCache {
 }
   }
 
-  public synchronized Database getDatabaseFromCache(String name) {
-return 
databaseCache.get(name)!=null?databaseCache.get(name).deepCopy():null;
+  static class TableWrapper {
+Table t;
+String location;
+Map parameters;
+byte[] sdHash;
+ReentrantReadWriteLock tableLock = new ReentrantReadWriteLock(true);
+// For caching column stats for an unpartitioned table
+// Key is column name and the value is the col stat object
+private 

[4/4] hive git commit: HIVE-18264: CachedStore: Store cached partitions/col stats within the table cache and make prewarm non-blocking (Vaibhav Gumashta reviewed by Daniel Dai, Alexander Kolbasov)

2018-03-19 Thread vgumashta
HIVE-18264: CachedStore: Store cached partitions/col stats within the table 
cache and make prewarm non-blocking (Vaibhav Gumashta reviewed by Daniel Dai, 
Alexander Kolbasov)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/26c0ab6a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/26c0ab6a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/26c0ab6a

Branch: refs/heads/master
Commit: 26c0ab6adb48755ef2f5cff2ec9c4b0e9a431821
Parents: 79e8869
Author: Vaibhav Gumashta 
Authored: Mon Mar 19 10:47:37 2018 -0700
Committer: Vaibhav Gumashta 
Committed: Mon Mar 19 10:47:37 2018 -0700

--
 .../listener/DummyRawStoreFailEvent.java|9 +-
 .../apache/hive/service/server/HiveServer2.java |6 +-
 .../hadoop/hive/metastore/HiveMetaStore.java|4 -
 .../hadoop/hive/metastore/ObjectStore.java  |   30 -
 .../apache/hadoop/hive/metastore/RawStore.java  |   11 -
 .../hadoop/hive/metastore/cache/CacheUtils.java |   85 +-
 .../hive/metastore/cache/CachedStore.java   | 1552 +
 .../hive/metastore/cache/SharedCache.java   | 1588 +-
 .../hive/metastore/utils/MetaStoreUtils.java|   11 +-
 .../DummyRawStoreControlledCommit.java  |7 -
 .../DummyRawStoreForJdoConnection.java  |7 -
 .../hive/metastore/cache/TestCachedStore.java   |  546 +++---
 .../src/test/resources/log4j2.properties|   74 +-
 13 files changed, 2043 insertions(+), 1887 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/26c0ab6a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
--
diff --git 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 6144b61..e2244a1 100644
--- 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -976,7 +976,7 @@ public class DummyRawStoreFailEvent implements RawStore, 
Configurable {
   public List getAllResourcePlans() throws MetaException {
 return objectStore.getAllResourcePlans();
   }
- 
+
   @Override
   public WMFullResourcePlan alterResourcePlan(String name, 
WMNullableResourcePlan resourcePlan,
   boolean canActivateDisabled, boolean canDeactivate, boolean isReplace)
@@ -1069,13 +1069,6 @@ public class DummyRawStoreFailEvent implements RawStore, 
Configurable {
 objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, 
poolPath);
   }
 
-  @Override
-  public List 
getPartitionColStatsForDatabase(String dbName)
-  throws MetaException, NoSuchObjectException {
-// TODO Auto-generated method stub
-return null;
-  }
-
   public void createISchema(ISchema schema) throws AlreadyExistsException, 
MetaException,
   NoSuchObjectException {
 objectStore.createISchema(schema);

http://git-wip-us.apache.org/repos/asf/hive/blob/26c0ab6a/service/src/java/org/apache/hive/service/server/HiveServer2.java
--
diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java 
b/service/src/java/org/apache/hive/service/server/HiveServer2.java
index 5b792ac..bb92c44 100644
--- a/service/src/java/org/apache/hive/service/server/HiveServer2.java
+++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java
@@ -64,7 +64,6 @@ import 
org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMPool;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.cache.CachedStore;
 import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
 import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl;
 import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager;
@@ -163,9 +162,6 @@ public class HiveServer2 extends CompositeService {
   LOG.warn("Could not initiate the HiveServer2 Metrics system.  Metrics 
may not be reported.", t);
 }
 
-// Initialize cachedstore with background prewarm. The prewarm will only 
start if configured.
-CachedStore.initSharedCacheAsync(hiveConf);
-
 cliService = new CLIService(this);
 addService(cliService);
 final HiveServer2 hiveServer2 = this;
@@ -570,7 +566,7 @@ public class HiveServer2 extends CompositeService {
 
   private void removeServerInstanceFromZooKeeper() throws Exception {
 

hive git commit: HIVE-18447: JDBC: Provide a way for JDBC users to pass cookie info via connection string (Vaibhav Gumashta reviewed by Thejas Nair)

2018-02-02 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 39f1e82ad -> fdd8fabdc


HIVE-18447: JDBC: Provide a way for JDBC users to pass cookie info via 
connection string (Vaibhav Gumashta reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fdd8fabd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fdd8fabd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fdd8fabd

Branch: refs/heads/master
Commit: fdd8fabdcc5f8eb6ce749f55ec4637a0b96b4423
Parents: 39f1e82
Author: Vaibhav Gumashta 
Authored: Fri Feb 2 10:22:18 2018 -0800
Committer: Vaibhav Gumashta 
Committed: Fri Feb 2 10:22:18 2018 -0800

--
 .../TestThriftHttpCLIServiceFeatures.java   | 70 +++-
 .../org/apache/hive/jdbc/HiveConnection.java| 23 ---
 .../hive/jdbc/HttpBasicAuthInterceptor.java | 13 ++--
 .../jdbc/HttpKerberosRequestInterceptor.java|  8 +--
 .../hive/jdbc/HttpRequestInterceptorBase.java   | 20 +-
 .../hive/jdbc/HttpTokenAuthInterceptor.java |  6 +-
 jdbc/src/java/org/apache/hive/jdbc/Utils.java   |  8 ++-
 7 files changed, 105 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/fdd8fabd/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java
index 93b10fb..9012867 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java
@@ -90,18 +90,19 @@ public class TestThriftHttpCLIServiceFeatures  {
*/
   public class HttpBasicAuthInterceptorWithLogging extends 
HttpBasicAuthInterceptor {
 
-   ArrayList requestHeaders;
+ArrayList requestHeaders;
+String cookieHeader;
 
-   public HttpBasicAuthInterceptorWithLogging(String username,
-  String password, CookieStore cookieStore, String cn, boolean isSSL,
-  Map additionalHeaders) {
-  super(username, password, cookieStore, cn, isSSL, additionalHeaders);
+public HttpBasicAuthInterceptorWithLogging(String username, String 
password,
+CookieStore cookieStore, String cn, boolean isSSL, Map 
additionalHeaders,
+Map customCookies) {
+  super(username, password, cookieStore, cn, isSSL, additionalHeaders, 
customCookies);
   requestHeaders = new ArrayList();
 }
 
 @Override
 public void process(HttpRequest httpRequest, HttpContext httpContext)
-  throws HttpException, IOException {
+throws HttpException, IOException {
   super.process(httpRequest, httpContext);
 
   String currHeaders = "";
@@ -110,11 +111,21 @@ public class TestThriftHttpCLIServiceFeatures  {
 currHeaders += h.getName() + ":" + h.getValue() + " ";
   }
   requestHeaders.add(currHeaders);
+
+  Header[] headers = httpRequest.getHeaders("Cookie");
+  cookieHeader = "";
+  for (Header h : headers) {
+cookieHeader = cookieHeader + h.getName() + ":" + h.getValue();
+  }
 }
 
-public ArrayList  getRequestHeaders() {
+public ArrayList getRequestHeaders() {
   return requestHeaders;
 }
+
+public String getCookieHeader() {
+  return cookieHeader;
+}
   }
 
 
@@ -130,7 +141,7 @@ public class TestThriftHttpCLIServiceFeatures  {
 assertNotNull(ThriftCLIServiceTest.hiveServer2);
 assertNotNull(ThriftCLIServiceTest.hiveConf);
 HiveConf hiveConf = ThriftCLIServiceTest.hiveConf;
-
+
 hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
 hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, 
ThriftCLIServiceTest.host);
 hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, 
ThriftCLIServiceTest.port);
@@ -219,7 +230,7 @@ public class TestThriftHttpCLIServiceFeatures  {
 String httpUrl = getHttpUrl();
 httpClient.addRequestInterceptor(
 new HttpBasicAuthInterceptor(ThriftCLIServiceTest.USERNAME, 
ThriftCLIServiceTest.PASSWORD,
-null, null, false, null));
+null, null, false, null, null));
 return new THttpClient(httpUrl, httpClient);
   }
 
@@ -243,7 +254,7 @@ public class TestThriftHttpCLIServiceFeatures  {
 additionalHeaders.put("key2", "value2");
 HttpBasicAuthInterceptorWithLogging authInt =
   new HttpBasicAuthInterceptorWithLogging(ThriftCLIServiceTest.USERNAME, 

hive git commit: HIVE-18528: Stats: In the bitvector codepath, when extrapolating column stats for String type columnStringColumnStatsAggregator uses the min value instead of max (Vaibhav Gumashta rev

2018-02-01 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 419593e70 -> 32b899448


HIVE-18528: Stats: In the bitvector codepath, when extrapolating column stats 
for String type columnStringColumnStatsAggregator uses the min value instead of 
max (Vaibhav Gumashta reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/32b89944
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/32b89944
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/32b89944

Branch: refs/heads/master
Commit: 32b8994480ec94cb1f28ba9cd295cd85cc7fe064
Parents: 419593e
Author: Vaibhav Gumashta 
Authored: Thu Feb 1 11:47:57 2018 -0800
Committer: Vaibhav Gumashta 
Committed: Thu Feb 1 11:47:57 2018 -0800

--
 .../metastore/columnstats/aggr/StringColumnStatsAggregator.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/32b89944/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java
index 2b8c493..9537647 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java
@@ -178,7 +178,7 @@ public class StringColumnStatsAggregator extends 
ColumnStatsAggregator implement
   if (aggregateData == null) {
 aggregateData = newData.deepCopy();
   } else {
-aggregateData.setAvgColLen(Math.min(aggregateData.getAvgColLen(),
+aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(),
 newData.getAvgColLen()));
 aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(),
 newData.getMaxColLen()));



[1/2] hive git commit: HIVE-17495: CachedStore: prewarm improvement (avoid multiple sql calls to read partition column stats), refactoring and caching some aggregate stats (Vaibhav Gumashta reviewed b

2018-01-17 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master fbb3ed15f -> 456a65180


http://git-wip-us.apache.org/repos/asf/hive/blob/456a6518/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java
index d12cdc0..e6823d3 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import 
org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
+import 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -43,27 +44,24 @@ public class LongColumnStatsAggregator extends 
ColumnStatsAggregator implements
   private static final Logger LOG = 
LoggerFactory.getLogger(LongColumnStatsAggregator.class);
 
   @Override
-  public ColumnStatisticsObj aggregate(String colName, List partNames,
-  List css) throws MetaException {
+  public ColumnStatisticsObj aggregate(List 
colStatsWithSourceInfo,
+  List partNames, boolean areAllPartsFound) throws MetaException {
 ColumnStatisticsObj statsObj = null;
-
+String colType = null;
+String colName = null;
 // check if all the ColumnStatisticsObjs contain stats and all the ndv are
 // bitvectors
-boolean doAllPartitionContainStats = partNames.size() == css.size();
-LOG.debug("doAllPartitionContainStats for " + colName + " is " + 
doAllPartitionContainStats);
+boolean doAllPartitionContainStats = partNames.size() == 
colStatsWithSourceInfo.size();
 NumDistinctValueEstimator ndvEstimator = null;
-String colType = null;
-for (ColumnStatistics cs : css) {
-  if (cs.getStatsObjSize() != 1) {
-throw new MetaException(
-"The number of columns should be exactly one in aggrStats, but 
found "
-+ cs.getStatsObjSize());
-  }
-  ColumnStatisticsObj cso = cs.getStatsObjIterator().next();
+for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+  ColumnStatisticsObj cso = csp.getColStatsObj();
   if (statsObj == null) {
+colName = cso.getColName();
 colType = cso.getColType();
-statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, 
colType, cso
-.getStatsData().getSetField());
+statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, 
colType,
+cso.getStatsData().getSetField());
+LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
+doAllPartitionContainStats);
   }
   LongColumnStatsDataInspector longColumnStatsData =
   (LongColumnStatsDataInspector) cso.getStatsData().getLongStats();
@@ -91,13 +89,13 @@ public class LongColumnStatsAggregator extends 
ColumnStatsAggregator implements
 }
 LOG.debug("all of the bit vectors can merge for " + colName + " is " + 
(ndvEstimator != null));
 ColumnStatisticsData columnStatisticsData = new ColumnStatisticsData();
-if (doAllPartitionContainStats || css.size() < 2) {
+if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
   LongColumnStatsDataInspector aggregateData = null;
   long lowerBound = 0;
   long higherBound = 0;
   double densityAvgSum = 0.0;
-  for (ColumnStatistics cs : css) {
-ColumnStatisticsObj cso = cs.getStatsObjIterator().next();
+  for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+ColumnStatisticsObj cso = csp.getColStatsObj();
 LongColumnStatsDataInspector newData =
 (LongColumnStatsDataInspector) cso.getStatsData().getLongStats();
 lowerBound = Math.max(lowerBound, newData.getNumDVs());
@@ -155,9 +153,9 @@ public class LongColumnStatsAggregator extends 
ColumnStatsAggregator implements
   if (ndvEstimator == null) {
 // if not every partition uses bitvector for ndv, we just fall back to
 // the traditional extrapolation methods.
-for (ColumnStatistics cs : css) {
-  String partName = cs.getStatsDesc().getPartName();
-  ColumnStatisticsObj cso = cs.getStatsObjIterator().next();
+for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+  ColumnStatisticsObj cso = csp.getColStatsObj();
+  String partName = 

[2/2] hive git commit: HIVE-17495: CachedStore: prewarm improvement (avoid multiple sql calls to read partition column stats), refactoring and caching some aggregate stats (Vaibhav Gumashta reviewed b

2018-01-17 Thread vgumashta
HIVE-17495: CachedStore: prewarm improvement (avoid multiple sql calls to read 
partition column stats), refactoring and caching some aggregate stats (Vaibhav 
Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/456a6518
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/456a6518
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/456a6518

Branch: refs/heads/master
Commit: 456a65180dcb84f69f26b4c9b9265165ad16dfe4
Parents: fbb3ed1
Author: Vaibhav Gumashta 
Authored: Wed Jan 17 09:59:02 2018 -0800
Committer: Vaibhav Gumashta 
Committed: Wed Jan 17 09:59:02 2018 -0800

--
 .../listener/DummyRawStoreFailEvent.java|  15 +-
 .../hive/metastore/MetaStoreDirectSql.java  |  95 ++---
 .../hadoop/hive/metastore/ObjectStore.java  |  24 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |  11 +-
 .../hadoop/hive/metastore/cache/CacheUtils.java |  12 +
 .../hive/metastore/cache/CachedStore.java   | 392 ++-
 .../hive/metastore/cache/SharedCache.java   | 167 +++-
 .../aggr/BinaryColumnStatsAggregator.java   |  23 +-
 .../aggr/BooleanColumnStatsAggregator.java  |  23 +-
 .../columnstats/aggr/ColumnStatsAggregator.java |   8 +-
 .../aggr/DateColumnStatsAggregator.java |  56 ++-
 .../aggr/DecimalColumnStatsAggregator.java  |  57 ++-
 .../aggr/DoubleColumnStatsAggregator.java   |  57 ++-
 .../aggr/LongColumnStatsAggregator.java |  56 +--
 .../aggr/StringColumnStatsAggregator.java   |  59 ++-
 .../hive/metastore/utils/MetaStoreUtils.java| 144 ---
 .../DummyRawStoreControlledCommit.java  |  16 +-
 .../DummyRawStoreForJdoConnection.java  |  16 +-
 18 files changed, 805 insertions(+), 426 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/456a6518/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
--
diff --git 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 1fca332..bc9ef62 100644
--- 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -75,6 +74,7 @@ import 
org.apache.hadoop.hive.metastore.api.UnknownTableException;
 import org.apache.hadoop.hive.metastore.api.WMMapping;
 import org.apache.hadoop.hive.metastore.api.WMPool;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
 import org.apache.thrift.TException;
 
 /**
@@ -976,12 +976,6 @@ public class DummyRawStoreFailEvent implements RawStore, 
Configurable {
   }
 
   @Override
-  public Map 
getColStatsForTablePartitions(String dbName,
-  String tableName) throws MetaException, NoSuchObjectException {
-return objectStore.getColStatsForTablePartitions(dbName, tableName);
-  }
-
-  @Override
   public String getMetastoreDbUuid() throws MetaException {
 throw new MetaException("getMetastoreDbUuid is not implemented");
   }
@@ -1092,4 +1086,11 @@ public class DummyRawStoreFailEvent implements RawStore, 
Configurable {
   String poolPath) throws NoSuchObjectException, 
InvalidOperationException, MetaException {
 objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, 
poolPath);
   }
+
+  @Override
+  public List 
getPartitionColStatsForDatabase(String dbName)
+  throws MetaException, NoSuchObjectException {
+// TODO Auto-generated method stub
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/456a6518/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
 

hive git commit: HIVE-17790: Export/Import: Bug while getting auth entities due to which we write partition info during compilation phase (Vaibhav Gumashta reviewed by Thejas Nair)

2017-10-12 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master c1a1d5960 -> 0a9fabbd0


HIVE-17790: Export/Import: Bug while getting auth entities due to which we 
write partition info during compilation phase (Vaibhav Gumashta reviewed by 
Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0a9fabbd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0a9fabbd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0a9fabbd

Branch: refs/heads/master
Commit: 0a9fabbd0b01179867042151da5a4409ad7d68d2
Parents: c1a1d59
Author: Vaibhav Gumashta 
Authored: Thu Oct 12 20:47:53 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Thu Oct 12 20:47:53 2017 -0700

--
 .../java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0a9fabbd/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
index ab94ec5..a44f98f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
@@ -297,7 +297,6 @@ public class TableExport {
 throw new IllegalStateException("partitions cannot be null for 
partitionTable :"
 + tableSpec.tableName);
   }
-  new PartitionExport(paths, partitions, distCpDoAsUser, 
conf).write(replicationSpec);
   for (Partition partition : partitions) {
 authEntities.inputs.add(new ReadEntity(partition));
   }



hive git commit: HIVE-17649: Export/Import: Move export data write to a task (Vaibhav Gumashta reviewed by Thejas Nair)

2017-10-03 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master ae68d85ab -> 88ca553c4


HIVE-17649: Export/Import: Move export data write to a task (Vaibhav Gumashta 
reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/88ca553c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/88ca553c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/88ca553c

Branch: refs/heads/master
Commit: 88ca553c451d8d23778a912ec26b262eda402c68
Parents: ae68d85
Author: Vaibhav Gumashta 
Authored: Tue Oct 3 00:03:28 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Tue Oct 3 00:04:28 2017 -0700

--
 .../apache/hadoop/hive/ql/exec/ExportTask.java  | 69 ++
 .../apache/hadoop/hive/ql/exec/TaskFactory.java |  2 +
 .../hadoop/hive/ql/exec/repl/ReplDumpTask.java  |  2 +-
 .../hive/ql/parse/ExportSemanticAnalyzer.java   | 20 -
 .../ql/parse/repl/dump/PartitionExport.java |  5 +-
 .../hive/ql/parse/repl/dump/TableExport.java| 77 ++--
 .../apache/hadoop/hive/ql/plan/ExportWork.java  | 73 +++
 .../authorization_uri_export.q.out  |  1 -
 .../exim_12_nonnative_export.q.out  |  6 +-
 9 files changed, 222 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/88ca553c/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java
new file mode 100644
index 000..bb45f30
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java
@@ -0,0 +1,69 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.exec;
+
+import org.apache.hadoop.hive.ql.DriverContext;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.parse.repl.dump.TableExport;
+import org.apache.hadoop.hive.ql.plan.ExportWork;
+import org.apache.hadoop.hive.ql.plan.api.StageType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Serializable;
+
+public class ExportTask extends Task implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+  private Logger LOG = LoggerFactory.getLogger(ExportTask.class);
+
+  public ExportTask() {
+super();
+  }
+
+  @Override
+  public String getName() {
+return "EXPORT";
+  }
+
+  @Override
+  protected int execute(DriverContext driverContext) {
+try {
+  // Also creates the root directory
+  TableExport.Paths exportPaths =
+  new TableExport.Paths(work.getAstRepresentationForErrorMsg(), 
work.getExportRootDir(),
+  conf, false);
+  Hive db = getHive();
+  LOG.debug("Exporting data to: {}", exportPaths.getExportRootDir());
+  new TableExport(exportPaths, work.getTableSpec(), 
work.getReplicationSpec(), db, null, conf)
+  .write();
+} catch (Exception e) {
+  LOG.error("failed", e);
+  setException(e);
+  return 1;
+}
+return 0;
+  }
+
+  @Override
+  public StageType getType() {
+// TODO: Modify Thrift IDL to generate export stage if needed
+return StageType.REPL_DUMP;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/88ca553c/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
index fe9b624..e9c69d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork;
 import org.apache.hadoop.hive.ql.plan.ExplainSQRewriteWork;
 import 

hive git commit: HIVE-17625: Replication: update hive.repl.partitions.dump.parallelism to 100 (Vaibhav Gumashta reviewed by Thejas Nair)

2017-09-29 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master b8aad3602 -> 11beadff3


HIVE-17625: Replication: update hive.repl.partitions.dump.parallelism to 100 
(Vaibhav Gumashta reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/11beadff
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/11beadff
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/11beadff

Branch: refs/heads/master
Commit: 11beadff376dbb30058dba1672909ddb14b47df1
Parents: b8aad36
Author: Vaibhav Gumashta 
Authored: Fri Sep 29 01:40:08 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Sep 29 01:40:08 2017 -0700

--
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/11beadff/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 5bec15e..cd6998e 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -447,7 +447,7 @@ public class HiveConf extends Configuration {
 + "dynamically generating the next set of tasks. The number is 
approximate as Hive \n"
 + "will stop at a slightly higher number, the reason being some 
events might lead to a \n"
 + "task increment that would cross the specified limit."),
-REPL_PARTITIONS_DUMP_PARALLELISM("hive.repl.partitions.dump.parallelism",5,
+
REPL_PARTITIONS_DUMP_PARALLELISM("hive.repl.partitions.dump.parallelism",100,
 "Number of threads that will be used to dump partition data 
information during repl dump."),
 REPL_DUMPDIR_CLEAN_FREQ("hive.repl.dumpdir.clean.freq", "0s",
 new TimeValidator(TimeUnit.SECONDS),



[1/2] hive git commit: HIVE-13989: Extended ACLs are not handled according to specification (Chris Drome reviewed by Vaibhav Gumashta)

2017-09-09 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-2.2 b2e7d5ef6 -> a549696a2


http://git-wip-us.apache.org/repos/asf/hive/blob/a549696a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
--
diff --git 
a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java 
b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
index 7b6a9bd..9613cfb 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
@@ -60,26 +60,63 @@ public class HdfsUtils {
 
   public static void setFullFileStatus(Configuration conf, 
HdfsUtils.HadoopFileStatus sourceStatus,
   FileSystem fs, Path target, boolean recursion) throws IOException {
-setFullFileStatus(conf, sourceStatus, null, fs, target, recursion);
+setFullFileStatus(conf, sourceStatus, null, fs, target, recursion, true);
   }
 
   public static void setFullFileStatus(Configuration conf, 
HdfsUtils.HadoopFileStatus sourceStatus,
 String targetGroup, FileSystem fs, Path target, boolean recursion) throws 
IOException {
+setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, 
true);
+  }
+
+  public static void setFullFileStatus(Configuration conf, 
HdfsUtils.HadoopFileStatus sourceStatus,
+String targetGroup, FileSystem fs, Path target, boolean recursion, boolean 
isDir) throws IOException {
 FileStatus fStatus= sourceStatus.getFileStatus();
 String group = fStatus.getGroup();
 boolean aclEnabled = Objects.equal(conf.get("dfs.namenode.acls.enabled"), 
"true");
 FsPermission sourcePerm = fStatus.getPermission();
 List aclEntries = null;
 if (aclEnabled) {
-  if (sourceStatus.getAclEntries() != null) {
+  if (sourceStatus.getAclEntries() != null && ! 
sourceStatus.getAclEntries().isEmpty()) {
 LOG.trace(sourceStatus.aclStatus.toString());
-aclEntries = new ArrayList<>(sourceStatus.getAclEntries());
-removeBaseAclEntries(aclEntries);
 
-//the ACL api's also expect the tradition user/group/other permission 
in the form of ACL
-aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, 
sourcePerm.getUserAction()));
-aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, 
sourcePerm.getGroupAction()));
-aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, 
sourcePerm.getOtherAction()));
+List defaults = 
extractDefaultAcls(sourceStatus.getAclEntries());
+if (! defaults.isEmpty()) {
+  // Generate child ACLs based on parent DEFAULTs.
+  aclEntries = new ArrayList(defaults.size() * 2);
+
+  // All ACCESS ACLs are derived from the DEFAULT ACLs of the parent.
+  // All DEFAULT ACLs of the parent are inherited by the child.
+  // If DEFAULT ACLs exist, it should include DEFAULTs for USER, 
OTHER, and MASK.
+  for (AclEntry acl : defaults) {
+// OTHER permissions are not inherited by the child.
+if (acl.getType() == AclEntryType.OTHER) {
+  aclEntries.add(newAclEntry(AclEntryScope.ACCESS, 
AclEntryType.OTHER, FsAction.NONE));
+} else {
+  aclEntries.add(newAclEntry(AclEntryScope.ACCESS, acl.getType(), 
acl.getName(), acl.getPermission()));
+}
+  }
+
+  // Add DEFAULTs for directories only; adding DEFAULTs for files 
throws an exception.
+  if (isDir) {
+aclEntries.addAll(defaults);
+  }
+} else {
+  // Parent has no DEFAULTs, hence child inherits no ACLs.
+  // Set basic permissions only.
+  FsAction groupAction = null;
+
+  for (AclEntry acl : sourceStatus.getAclEntries()) {
+if (acl.getType() == AclEntryType.GROUP) {
+  groupAction = acl.getPermission();
+  break;
+}
+  }
+
+  aclEntries = new ArrayList(3);
+  aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, 
sourcePerm.getUserAction()));
+  aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, 
groupAction));
+  aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, 
FsAction.NONE));
+}
   }
 }
 
@@ -93,19 +130,16 @@ public class HdfsUtils {
 if (group != null && !group.isEmpty()) {
   run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()});
 }
-if (aclEnabled) {
-  if (null != aclEntries) {
-//Attempt extended Acl operations only if its enabled, 8791but 
don't fail the operation regardless.
-try {
-  //construct the -setfacl command
-  String aclEntry = Joiner.on(",").join(aclEntries);
-  run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, 
target.toString()});
-
-

[2/2] hive git commit: HIVE-13989: Extended ACLs are not handled according to specification (Chris Drome reviewed by Vaibhav Gumashta)

2017-09-09 Thread vgumashta
HIVE-13989: Extended ACLs are not handled according to specification (Chris 
Drome reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a549696a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a549696a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a549696a

Branch: refs/heads/branch-2.2
Commit: a549696a2d485eb01638e06969602851140f9199
Parents: b2e7d5e
Author: Vaibhav Gumashta 
Authored: Sat Sep 9 12:23:49 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Sat Sep 9 12:23:49 2017 -0700

--
 .../mapreduce/FileOutputCommitterContainer.java | 147 +++-
 .../org/apache/hive/hcatalog/MiniCluster.java   |  16 +-
 itests/hcatalog-unit/pom.xml|   6 +
 .../hive/hcatalog/pig/TestExtendedAcls.java | 748 +++
 .../hive/ql/security/TestExtendedAcls.java  | 228 +-
 .../hive/ql/security/FolderPermissionBase.java  | 199 +++--
 .../hive/ql/security/TestFolderPermissions.java |   5 +
 .../apache/hadoop/hive/ql/metadata/Hive.java|   4 +-
 .../org/apache/hadoop/hive/io/HdfsUtils.java| 176 +++--
 9 files changed, 1351 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a549696a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
--
diff --git 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
index 9056f11..4c11181 100644
--- 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
+++ 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
@@ -32,6 +32,11 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
@@ -64,6 +69,9 @@ import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+
 /**
  * Part of the FileOutput*Container classes
  * See {@link FileOutputFormatContainer} for more information
@@ -334,7 +342,7 @@ class FileOutputCommitterContainer extends 
OutputCommitterContainer {
 String partLocnRoot, String dynPartPath, Map partKVs,
 HCatSchema outputSchema, Map params,
 Table table, FileSystem fs,
-String grpName, FsPermission perms) throws IOException {
+String grpName, FsPermission perms, List acls) throws 
IOException {
 
 Partition partition = new Partition();
 partition.setDbName(table.getDbName());
@@ -371,7 +379,7 @@ class FileOutputCommitterContainer extends 
OutputCommitterContainer {
   for (FieldSchema partKey : table.getPartitionKeys()) {
 if (i++ != 0) {
   fs.mkdirs(partPath); // Attempt to make the path in case it does not 
exist before we check
-  applyGroupAndPerms(fs, partPath, perms, grpName, false);
+  applyGroupAndPerms(fs, partPath, perms, acls, grpName, false);
 }
 partPath = constructPartialPartPath(partPath, 
partKey.getName().toLowerCase(), partKVs);
   }
@@ -381,7 +389,7 @@ class FileOutputCommitterContainer extends 
OutputCommitterContainer {
 // Need not bother in case of HDFS as permission is taken care of by 
setting UMask
 fs.mkdirs(partPath); // Attempt to make the path in case it does not exist 
before we check
 if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) 
{
-  applyGroupAndPerms(fs, partPath, perms, grpName, true);
+  applyGroupAndPerms(fs, partPath, perms, acls, grpName, true);
 }
 
 // Set the location in the StorageDescriptor
@@ -400,21 +408,29 @@ class FileOutputCommitterContainer extends 
OutputCommitterContainer {
 return partition;
   }
 
-  private void applyGroupAndPerms(FileSystem fs, Path dir, FsPermission 
permission,
-  String group, boolean recursive)
+  private void applyGroupAndPerms(FileSystem fs, Path path, FsPermission 
permission,
+  List acls, String group, boolean recursive)
 throws 

[2/2] hive git commit: HIVE-13989: Extended ACLs are not handled according to specification (Chris Drome reviewed by Vaibhav Gumashta)

2017-09-09 Thread vgumashta
HIVE-13989: Extended ACLs are not handled according to specification (Chris 
Drome reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/988c491d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/988c491d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/988c491d

Branch: refs/heads/branch-2
Commit: 988c491dd3d22ace3d34635b9a8d53abc4dbf1c7
Parents: b3a6e52
Author: Vaibhav Gumashta 
Authored: Sat Sep 9 12:15:53 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Sat Sep 9 12:15:53 2017 -0700

--
 .../apache/hadoop/hive/common/FileUtils.java|   7 +-
 .../org/apache/hadoop/hive/io/HdfsUtils.java| 135 ++--
 .../apache/hadoop/hive/io/TestHdfsUtils.java|   9 +-
 .../mapreduce/FileOutputCommitterContainer.java | 147 +++-
 .../org/apache/hive/hcatalog/MiniCluster.java   |  16 +-
 itests/hcatalog-unit/pom.xml|   6 +
 .../hive/hcatalog/pig/TestExtendedAcls.java | 748 +++
 .../hive/ql/security/TestExtendedAcls.java  | 228 +-
 .../hive/ql/security/FolderPermissionBase.java  | 199 +++--
 .../hive/ql/security/TestFolderPermissions.java |   5 +
 .../hadoop/hive/metastore/HiveAlterHandler.java |   2 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java|   6 +-
 12 files changed, 1349 insertions(+), 159 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/988c491d/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java 
b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index 8ed8cc4..2b7a57b 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -1023,6 +1023,11 @@ public final class FileUtils {
 
   public static void inheritPerms(Configuration conf, 
HdfsUtils.HadoopFileStatus sourceStatus, String targetGroup,
   FileSystem fs, Path target, boolean 
recursive) {
-HdfsUtils.setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, 
recursive);
+inheritPerms(conf, sourceStatus, targetGroup, fs, target, recursive, true);
+  }
+
+  public static void inheritPerms(Configuration conf, 
HdfsUtils.HadoopFileStatus sourceStatus, String targetGroup,
+  FileSystem fs, Path target, boolean 
recursive, boolean isDir) {
+HdfsUtils.setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, 
recursive, isDir);
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/988c491d/common/src/java/org/apache/hadoop/hive/io/HdfsUtils.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/io/HdfsUtils.java 
b/common/src/java/org/apache/hadoop/hive/io/HdfsUtils.java
index 1b57184..16fc96e 100644
--- a/common/src/java/org/apache/hadoop/hive/io/HdfsUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/io/HdfsUtils.java
@@ -77,7 +77,7 @@ public class HdfsUtils {
   public static void setFullFileStatus(Configuration conf, 
HdfsUtils.HadoopFileStatus sourceStatus,
   FileSystem fs, Path target, boolean recursion) {
 if (StorageUtils.shouldSetPerms(conf, fs)) {
-  setFullFileStatus(conf, sourceStatus, null, fs, target, recursion);
+  setFullFileStatus(conf, sourceStatus, null, fs, target, recursion, true);
 }
   }
 
@@ -95,14 +95,25 @@ public class HdfsUtils {
*/
   public static void setFullFileStatus(Configuration conf, 
HdfsUtils.HadoopFileStatus sourceStatus,
   String targetGroup, FileSystem fs, Path target, boolean recursion) {
+setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, 
true);
+  }
+
+  public static void setFullFileStatus(Configuration conf, 
HdfsUtils.HadoopFileStatus sourceStatus,
+  String targetGroup, FileSystem fs, Path target, boolean recursion, 
boolean isDir) {
 if (StorageUtils.shouldSetPerms(conf, fs)) {
-  setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, 
recursion, recursion ? new FsShell() : null);
+  setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, 
recursion, recursion ? new FsShell() : null, isDir);
 }
   }
 
   @VisibleForTesting
   static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus 
sourceStatus,
 String targetGroup, FileSystem fs, Path target, boolean recursion, FsShell 
fsShell) {
+setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, 
fsShell, true);
+  }
+
+  @VisibleForTesting
+  static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus 
sourceStatus,
+String 

[1/2] hive git commit: HIVE-13989: Extended ACLs are not handled according to specification (Chris Drome reviewed by Vaibhav Gumashta)

2017-09-09 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-2 b3a6e524a -> 988c491dd


http://git-wip-us.apache.org/repos/asf/hive/blob/988c491d/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
index 2ae9cc0..bf6b7e1 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
@@ -60,11 +60,11 @@ public abstract class FolderPermissionBase {
 }
   };
 
-
   public abstract void setPermission(String locn, int permIndex) throws 
Exception;
 
   public abstract void verifyPermission(String locn, int permIndex) throws 
Exception;
 
+  public abstract void verifyInheritedPermission(String locn, int permIndex) 
throws Exception;
 
   public void setPermission(String locn) throws Exception {
 setPermission(locn, 0);
@@ -74,6 +74,9 @@ public abstract class FolderPermissionBase {
 verifyPermission(locn, 0);
   }
 
+  public void verifyInheritedPermission(String locn) throws Exception {
+verifyInheritedPermission(locn, 0);
+  }
 
   public static void baseSetup() throws Exception {
 MiniDFSShim dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, 
null);
@@ -138,7 +141,7 @@ public abstract class FolderPermissionBase {
 Assert.assertEquals(0,ret.getResponseCode());
 
 assertExistence(warehouseDir + "/" + testDb + ".db");
-verifyPermission(warehouseDir + "/" + testDb + ".db");
+verifyInheritedPermission(warehouseDir + "/" + testDb + ".db");
 
 ret = driver.run("USE " + testDb);
 Assert.assertEquals(0,ret.getResponseCode());
@@ -146,22 +149,28 @@ public abstract class FolderPermissionBase {
 ret = driver.run("CREATE TABLE " + tableName + " (key string, value 
string)");
 Assert.assertEquals(0,ret.getResponseCode());
 
-verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+verifyInheritedPermission(warehouseDir + "/" + testDb + ".db/" + 
tableName);
 
 ret = driver.run("insert into table " + tableName + " select key,value 
from default.mysrc");
 Assert.assertEquals(0,ret.getResponseCode());
 
 assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
-verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+verifyInheritedPermission(warehouseDir + "/" + testDb + ".db/" + 
tableName);
 
 Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + 
tableName).size() > 0);
 for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + 
tableName)) {
-  verifyPermission(child);
+  verifyInheritedPermission(child);
 }
 
+ret = driver.run("DROP TABLE " + tableName);
+Assert.assertEquals(0,ret.getResponseCode());
+
 ret = driver.run("USE default");
 Assert.assertEquals(0,ret.getResponseCode());
 
+ret = driver.run("DROP DATABASE " + testDb);
+Assert.assertEquals(0,ret.getResponseCode());
+
 //cleanup after the test.
 fs.delete(warehouseDir, true);
 fs.mkdirs(warehouseDir);
@@ -186,21 +195,27 @@ public abstract class FolderPermissionBase {
 ret = driver.run("CREATE TABLE " + tableName + " (key string, value 
string)");
 Assert.assertEquals(0,ret.getResponseCode());
 
-verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+verifyInheritedPermission(warehouseDir + "/" + testDb + ".db/" + 
tableName);
 
 ret = driver.run("insert into table " + tableName + " select key,value 
from default.mysrc");
 Assert.assertEquals(0,ret.getResponseCode());
 
 assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
-verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+verifyInheritedPermission(warehouseDir + "/" + testDb + ".db/" + 
tableName);
 
 Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + 
tableName).size() > 0);
 for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + 
tableName)) {
-  verifyPermission(child);
+  verifyInheritedPermission(child);
 }
 
+ret = driver.run("DROP TABLE " + tableName);
+Assert.assertEquals(0,ret.getResponseCode());
+
 ret = driver.run("USE default");
 Assert.assertEquals(0,ret.getResponseCode());
+
+ret = driver.run("DROP DATABASE " + testDb);
+Assert.assertEquals(0,ret.getResponseCode());
   }
 
 
@@ -223,7 +238,7 @@ public abstract class FolderPermissionBase {
 verifyPermission(warehouseDir + "/" + tableName);
 Assert.assertTrue(listStatus(tableLoc).size() > 0);
 for (String child : listStatus(tableLoc)) {
-  verifyPermission(child);
+  verifyInheritedPermission(child);
 }
 
 //case1B: insert 

hive git commit: HIVE-17352: HiveSever2 error with Illegal Operation state transition from CLOSED to FINISHED (Deepak Jaiswal reviewed by Vaibhav Gumashta)

2017-08-22 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master ff30a1ebf -> f0b0cc268


HIVE-17352: HiveSever2 error with Illegal Operation state transition from 
CLOSED to FINISHED (Deepak Jaiswal reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f0b0cc26
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f0b0cc26
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f0b0cc26

Branch: refs/heads/master
Commit: f0b0cc268e5fcb59391bbf4f4773f6f92aed6dab
Parents: ff30a1e
Author: Vaibhav Gumashta 
Authored: Tue Aug 22 11:34:04 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Tue Aug 22 11:34:04 2017 -0700

--
 .../org/apache/hive/service/cli/operation/SQLOperation.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f0b0cc26/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
--
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java 
b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
index 1a2be8b..773dd51 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -257,11 +257,12 @@ public class SQLOperation extends 
ExecuteStatementOperation {
   /**
* If the operation was cancelled by another thread, or the execution 
timed out, Driver#run
* may return a non-zero response code. We will simply return if the 
operation state is
-   * CANCELED, TIMEDOUT or CLOSED, otherwise throw an exception
+   * CANCELED, TIMEDOUT, CLOSED or FINISHED, otherwise throw an exception
*/
   if ((getStatus().getState() == OperationState.CANCELED)
   || (getStatus().getState() == OperationState.TIMEDOUT)
-  || (getStatus().getState() == OperationState.CLOSED)) {
+  || (getStatus().getState() == OperationState.CLOSED)
+  || (getStatus().getState() == OperationState.FINISHED)) {
 LOG.warn("Ignore exception in terminal state", e);
 return;
   }



hive git commit: HIVE-4577: hive CLI can't handle hadoop dfs command with space and quotes (Bing Li reviewed by Vaibhav Gumashta)

2017-07-14 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 4af462495 -> adca35a46


HIVE-4577: hive CLI can't handle hadoop dfs command with space and quotes (Bing 
Li reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/adca35a4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/adca35a4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/adca35a4

Branch: refs/heads/master
Commit: adca35a469e31f496a5001e88f265a9145bfbcdf
Parents: 4af4624
Author: Vaibhav Gumashta 
Authored: Fri Jul 14 10:40:20 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Jul 14 10:40:20 2017 -0700

--
 .../hadoop/hive/ql/processors/DfsProcessor.java | 60 +++-
 ql/src/test/queries/clientpositive/dfscmd.q |  7 +++
 ql/src/test/results/clientpositive/dfscmd.q.out |  1 +
 .../results/clientpositive/perf/query14.q.out   |  2 +-
 4 files changed, 68 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/adca35a4/ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java
index 19f5bde..87a0c5a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.processors;
 
 import java.io.PrintStream;
 import java.util.Arrays;
+import java.util.ArrayList;
 import java.util.Map;
 
 import org.slf4j.Logger;
@@ -30,6 +31,7 @@ import org.apache.hadoop.hive.conf.HiveVariableSource;
 import org.apache.hadoop.hive.conf.VariableSubstitution;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Schema;
+import org.apache.hadoop.hive.ql.CommandNeedRetryException;
 import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
@@ -74,7 +76,7 @@ public class DfsProcessor implements CommandProcessor {
 }
   }).substitute(ss.getConf(), command);
 
-  String[] tokens = command.split("\\s+");
+  String[] tokens = splitCmd(command);
   CommandProcessorResponse authErrResp =
   CommandUtil.authorizeCommand(ss, HiveOperationType.DFS, 
Arrays.asList(tokens));
   if(authErrResp != null){
@@ -104,4 +106,60 @@ public class DfsProcessor implements CommandProcessor {
 }
   }
 
+  private String[] splitCmd(String command) throws CommandNeedRetryException {
+
+ArrayList paras = new ArrayList();
+int cmdLng = command.length();
+char y = 0;
+int start = 0;
+
+for (int i = 0; i < cmdLng; i++) {
+  char x = command.charAt(i);
+
+  switch(x) {
+case ' ':
+  if ((int) y == 0) {
+String str = command.substring(start, i).trim();
+if (!str.equals("")) {
+  paras.add(str);
+  start = i + 1;
+}
+  }
+  break;
+case '"':
+  if ((int) y == 0) {
+y = x;
+start = i + 1;
+  } else if ('"' == y) {
+paras.add(command.substring(start, i).trim());
+y = 0;
+start = i + 1;
+  }
+  break;
+case '\'':
+  if ((int) y == 0) {
+y = x;
+start = i + 1;
+  } else if ('\'' == y) {
+paras.add(command.substring(start, i).trim());
+y = 0;
+start = i + 1;
+  }
+  break;
+default:
+  if (i == cmdLng-1 && start < cmdLng) {
+paras.add(command.substring(start, cmdLng).trim());
+  }
+  break;
+  }
+}
+
+if ((int) y != 0) {
+  console.printError("Syntax error on hadoop options: dfs " + command);
+  throw new CommandNeedRetryException();
+}
+
+return paras.toArray(new String[paras.size()]);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/adca35a4/ql/src/test/queries/clientpositive/dfscmd.q
--
diff --git a/ql/src/test/queries/clientpositive/dfscmd.q 
b/ql/src/test/queries/clientpositive/dfscmd.q
new file mode 100644
index 000..0789336
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/dfscmd.q
@@ -0,0 +1,7 @@
+dfs -mkdir "hello";
+dfs -mkdir 'world';
+dfs -mkdir "bei jing";
+dfs -rmr 'hello';
+dfs -rmr "world";
+dfs -rmr 'bei jing';
+


hive git commit: HIVE-16554: ACID: Make HouseKeeperService threads daemon (Vaibhav Gumashta reviewed by Eugene Koifman)

2017-05-25 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 3c1b354ea -> 823f01c38


HIVE-16554: ACID: Make HouseKeeperService threads daemon (Vaibhav Gumashta 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/823f01c3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/823f01c3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/823f01c3

Branch: refs/heads/master
Commit: 823f01c386989eed2b5c60492f16140521b583a8
Parents: 3c1b354
Author: Vaibhav Gumashta 
Authored: Thu May 25 00:53:09 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Thu May 25 00:53:09 2017 -0700

--
 .../hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java   | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/823f01c3/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java
index 0b7332c..0aa160c 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/HouseKeeperServiceBase.java
@@ -48,7 +48,11 @@ public abstract class HouseKeeperServiceBase implements 
HouseKeeperService {
   private final AtomicInteger threadCounter = new AtomicInteger();
   @Override
   public Thread newThread(Runnable r) {
-return new Thread(r, HouseKeeperServiceBase.this.getClass().getName() 
+ "-" + threadCounter.getAndIncrement());
+Thread t =
+new Thread(r, HouseKeeperServiceBase.this.getClass().getName() + 
"-"
++ threadCounter.getAndIncrement());
+t.setDaemon(true);
+return t;
   }
 });
 



[2/2] hive git commit: HIVE-16579: CachedStore: improvements to partition col stats caching and cache column stats for unpartitioned table (Daniel Dai, Thejas Nair, Vaibhav Gumashta reviewed by Daniel

2017-05-22 Thread vgumashta
HIVE-16579: CachedStore: improvements to partition col stats caching and cache 
column stats for unpartitioned table (Daniel Dai, Thejas Nair, Vaibhav Gumashta 
reviewed by Daniel Dai, Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d85beaa9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d85beaa9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d85beaa9

Branch: refs/heads/master
Commit: d85beaa99ba349d9334d3d96abb6e89c94db8481
Parents: 952fe6e
Author: Vaibhav Gumashta 
Authored: Mon May 22 15:52:58 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Mon May 22 15:52:58 2017 -0700

--
 .../listener/DummyRawStoreFailEvent.java|   4 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java|   2 +-
 .../hive/metastore/MetaStoreDirectSql.java  |  73 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |  11 +-
 .../hadoop/hive/metastore/ObjectStore.java  |  19 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |   8 +-
 .../hive/metastore/StatObjectConverter.java | 148 +++
 .../hadoop/hive/metastore/cache/CacheUtils.java |  31 +
 .../hive/metastore/cache/CachedStore.java   | 943 ---
 .../hive/metastore/cache/SharedCache.java   | 293 +-
 .../hadoop/hive/metastore/hbase/HBaseStore.java |   2 +-
 .../stats/merge/ColumnStatsMergerFactory.java   |  18 +-
 .../stats/merge/DateColumnStatsMerger.java  |  55 ++
 .../DummyRawStoreControlledCommit.java  |   2 +-
 .../DummyRawStoreForJdoConnection.java  |   2 +-
 .../hive/metastore/cache/TestCachedStore.java   | 450 -
 16 files changed, 1637 insertions(+), 424 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d85beaa9/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
--
diff --git 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 91a3a38..3dc63bd 100644
--- 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -914,9 +914,9 @@ public class DummyRawStoreFailEvent implements RawStore, 
Configurable {
   }
 
   @Override
-  public Map 
getAggrColStatsForTablePartitions(String dbName,
+  public Map 
getColStatsForTablePartitions(String dbName,
   String tableName) throws MetaException, NoSuchObjectException {
-return objectStore.getAggrColStatsForTablePartitions(dbName, tableName);
+return objectStore.getColStatsForTablePartitions(dbName, tableName);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/d85beaa9/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
--
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index d296851..111cc11 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -350,7 +350,7 @@ public class QTestUtil {
 if (!useHBaseMetastore) {
   // Plug verifying metastore in for testing DirectSQL.
   conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
-"org.apache.hadoop.hive.metastore.VerifyingObjectStore");
+  "org.apache.hadoop.hive.metastore.VerifyingObjectStore");
 } else {
   conf.setVar(ConfVars.METASTORE_RAW_STORE_IMPL, 
HBaseStore.class.getName());
   conf.setBoolVar(ConfVars.METASTORE_FASTPATH, true);

http://git-wip-us.apache.org/repos/asf/hive/blob/d85beaa9/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index b96c27e..df73693 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -1208,7 +1208,9 @@ class MetaStoreDirectSql {
   }
 };
 List list = runBatched(colNames, b);
-if (list.isEmpty()) return null;
+if (list.isEmpty()) {
+  return null;
+}
 ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, 
tableName);
 ColumnStatistics 

[1/2] hive git commit: HIVE-16579: CachedStore: improvements to partition col stats caching and cache column stats for unpartitioned table (Daniel Dai, Thejas Nair, Vaibhav Gumashta reviewed by Daniel

2017-05-22 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 952fe6e17 -> d85beaa99


http://git-wip-us.apache.org/repos/asf/hive/blob/d85beaa9/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
index 7beee42..6b6355b 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
@@ -21,14 +21,18 @@ import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.TreeMap;
 
-import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.metastore.StatObjectConverter;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -38,17 +42,26 @@ import 
org.apache.hadoop.hive.metastore.cache.CachedStore.StorageDescriptorWrapp
 import org.apache.hadoop.hive.metastore.cache.CachedStore.TableWrapper;
 import org.apache.hadoop.hive.metastore.hbase.HBaseUtils;
 import org.apache.hive.common.util.HiveStringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 
 public class SharedCache {
   private static Map databaseCache = new TreeMap();
   private static Map tableCache = new TreeMap();
-  private static Map partitionCache = new 
TreeMap();
-  private static Map partitionColStatsCache = new 
TreeMap();
-  private static Map sdCache = new 
HashMap();
+  private static Map partitionCache =
+  new TreeMap();
+  private static Map partitionColStatsCache =
+  new TreeMap();
+  private static Map tableColStatsCache =
+  new TreeMap();
+  private static Map sdCache =
+  new HashMap();
   private static MessageDigest md;
 
+  static final private Logger LOG = 
LoggerFactory.getLogger(SharedCache.class.getName());
+
   static {
 try {
   md = MessageDigest.getInstance("MD5");
@@ -97,11 +110,13 @@ public class SharedCache {
 Table tblCopy = tbl.deepCopy();
 tblCopy.setDbName(HiveStringUtils.normalizeIdentifier(dbName));
 tblCopy.setTableName(HiveStringUtils.normalizeIdentifier(tblName));
-for (FieldSchema fs : tblCopy.getPartitionKeys()) {
-  fs.setName(HiveStringUtils.normalizeIdentifier(fs.getName()));
+if (tblCopy.getPartitionKeys() != null) {
+  for (FieldSchema fs : tblCopy.getPartitionKeys()) {
+fs.setName(HiveStringUtils.normalizeIdentifier(fs.getName()));
+  }
 }
 TableWrapper wrapper;
-if (tbl.getSd()!=null) {
+if (tbl.getSd() != null) {
   byte[] sdHash = HBaseUtils.hashStorageDescriptor(tbl.getSd(), md);
   StorageDescriptor sd = tbl.getSd();
   increSd(sd, sdHash);
@@ -121,10 +136,54 @@ public class SharedCache {
 }
   }
 
+  public static synchronized ColumnStatisticsObj getCachedTableColStats(String 
colStatsCacheKey) {
+return tableColStatsCache.get(colStatsCacheKey);
+  }
+
+  public static synchronized void removeTableColStatsFromCache(String dbName, 
String tblName) {
+String partialKey = CacheUtils.buildKeyWithDelimit(dbName, tblName);
+Iterator> iterator =
+tableColStatsCache.entrySet().iterator();
+while (iterator.hasNext()) {
+  Entry entry = iterator.next();
+  String key = entry.getKey();
+  if (key.toLowerCase().startsWith(partialKey.toLowerCase())) {
+iterator.remove();
+  }
+}
+  }
+
+  public static synchronized void removeTableColStatsFromCache(String dbName, 
String tblName,
+  String colName) {
+tableColStatsCache.remove(CacheUtils.buildKey(dbName, tblName, colName));
+  }
+
+  public static synchronized void 

hive git commit: HIVE-16419: Exclude hadoop related classes for JDBC stabdalone jar (Tao Li reviewed by Vaibhav Gumashta)

2017-04-21 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-2.3 240558258 -> ee57fa1c5


HIVE-16419: Exclude hadoop related classes for JDBC stabdalone jar (Tao Li 
reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ee57fa1c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ee57fa1c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ee57fa1c

Branch: refs/heads/branch-2.3
Commit: ee57fa1c5c44839dba92cc2ec237d97aee3496c8
Parents: 2405582
Author: Vaibhav Gumashta 
Authored: Fri Apr 21 11:14:09 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Apr 21 11:16:16 2017 -0700

--
 jdbc/pom.xml | 13 +
 1 file changed, 1 insertion(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ee57fa1c/jdbc/pom.xml
--
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index 83c2734..1a818c4 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -180,11 +180,7 @@
   
 
   org.apache.commons:commons-compress
-  org.apache.hadoop:hadoop-yarn*
-  org.apache.hadoop:hadoop-mapreduce*
-  org.apache.hadoop:hadoop-hdfs
-  org.apache.hadoop:hadoop-client
-  org.apache.hadoop:hadoop-annotations
+  org.apache.hadoop:*
   org.apache.hive:hive-vector-code-gen
   org.apache.ant:*
   junit:*
@@ -292,13 +288,6 @@
   org.apache.hive.com.facebook
 
 
-  org.apache.hadoop
-  
org.apache.hive.org.apache.hadoop
-  
-org.apache.hadoop.security.*
-  
-
-
   org.apache.zookeeper
   
org.apache.hive.org.apache.zookeeper
 



hive git commit: HIVE-16419: Exclude hadoop related classes for JDBC stabdalone jar (Tao Li reviewed by Vaibhav Gumashta)

2017-04-21 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-2 c089e9d45 -> ca29a7c1c


HIVE-16419: Exclude hadoop related classes for JDBC stabdalone jar (Tao Li 
reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ca29a7c1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ca29a7c1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ca29a7c1

Branch: refs/heads/branch-2
Commit: ca29a7c1c245d7ac566dbaad93d4434bfa05b8dc
Parents: c089e9d
Author: Vaibhav Gumashta 
Authored: Fri Apr 21 11:14:09 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Apr 21 11:14:53 2017 -0700

--
 jdbc/pom.xml | 13 +
 1 file changed, 1 insertion(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ca29a7c1/jdbc/pom.xml
--
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index 83c2734..1a818c4 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -180,11 +180,7 @@
   
 
   org.apache.commons:commons-compress
-  org.apache.hadoop:hadoop-yarn*
-  org.apache.hadoop:hadoop-mapreduce*
-  org.apache.hadoop:hadoop-hdfs
-  org.apache.hadoop:hadoop-client
-  org.apache.hadoop:hadoop-annotations
+  org.apache.hadoop:*
   org.apache.hive:hive-vector-code-gen
   org.apache.ant:*
   junit:*
@@ -292,13 +288,6 @@
   org.apache.hive.com.facebook
 
 
-  org.apache.hadoop
-  
org.apache.hive.org.apache.hadoop
-  
-org.apache.hadoop.security.*
-  
-
-
   org.apache.zookeeper
   
org.apache.hive.org.apache.zookeeper
 



hive git commit: HIVE-16419: Exclude hadoop related classes for JDBC stabdalone jar (Tao Li reviewed by Vaibhav Gumashta)

2017-04-21 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master a698bb5f8 -> 17fcac09a


HIVE-16419: Exclude hadoop related classes for JDBC stabdalone jar (Tao Li 
reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/17fcac09
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/17fcac09
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/17fcac09

Branch: refs/heads/master
Commit: 17fcac09a891b79ad21c356f8ced3bb579c86472
Parents: a698bb5
Author: Vaibhav Gumashta 
Authored: Fri Apr 21 11:14:09 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Apr 21 11:14:09 2017 -0700

--
 jdbc/pom.xml | 13 +
 1 file changed, 1 insertion(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/17fcac09/jdbc/pom.xml
--
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index ee8d497..1294a61 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -192,11 +192,7 @@
   
 
   org.apache.commons:commons-compress
-  org.apache.hadoop:hadoop-yarn*
-  org.apache.hadoop:hadoop-mapreduce*
-  org.apache.hadoop:hadoop-hdfs
-  org.apache.hadoop:hadoop-client
-  org.apache.hadoop:hadoop-annotations
+  org.apache.hadoop:*
   org.apache.hive:hive-vector-code-gen
   org.apache.ant:*
   junit:*
@@ -305,13 +301,6 @@
   org.apache.hive.com.facebook
 
 
-  org.apache.hadoop
-  
org.apache.hive.org.apache.hadoop
-  
-org.apache.hadoop.security.*
-  
-
-
   org.apache.zookeeper
   
org.apache.hive.org.apache.zookeeper
 



hive git commit: Preparing for 1.2.3 development

2017-04-07 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 395368fc6 -> 18ddf46e0


Preparing for 1.2.3 development


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/18ddf46e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/18ddf46e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/18ddf46e

Branch: refs/heads/branch-1.2
Commit: 18ddf46e0a8f092358725fc102235cbe6ba3e24d
Parents: 395368f
Author: Vaibhav Gumashta 
Authored: Fri Apr 7 22:13:32 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Apr 7 22:13:32 2017 -0700

--
 accumulo-handler/pom.xml  | 2 +-
 ant/pom.xml   | 2 +-
 beeline/pom.xml   | 2 +-
 cli/pom.xml   | 2 +-
 common/pom.xml| 2 +-
 contrib/pom.xml   | 2 +-
 hbase-handler/pom.xml | 2 +-
 hcatalog/core/pom.xml | 2 +-
 hcatalog/hcatalog-pig-adapter/pom.xml | 2 +-
 hcatalog/pom.xml  | 2 +-
 hcatalog/server-extensions/pom.xml| 2 +-
 hcatalog/streaming/pom.xml| 2 +-
 hcatalog/webhcat/java-client/pom.xml  | 2 +-
 hcatalog/webhcat/svr/pom.xml  | 2 +-
 hwi/pom.xml   | 2 +-
 jdbc/pom.xml  | 2 +-
 metastore/pom.xml | 2 +-
 odbc/pom.xml  | 2 +-
 packaging/pom.xml | 2 +-
 pom.xml   | 2 +-
 ql/pom.xml| 2 +-
 serde/pom.xml | 2 +-
 service/pom.xml   | 2 +-
 shims/0.20S/pom.xml   | 2 +-
 shims/0.23/pom.xml| 2 +-
 shims/aggregator/pom.xml  | 2 +-
 shims/common/pom.xml  | 2 +-
 shims/pom.xml | 2 +-
 shims/scheduler/pom.xml   | 2 +-
 spark-client/pom.xml  | 4 ++--
 testutils/pom.xml | 2 +-
 31 files changed, 32 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/18ddf46e/accumulo-handler/pom.xml
--
diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index f7c23ee..d709e8c 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.3-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/18ddf46e/ant/pom.xml
--
diff --git a/ant/pom.xml b/ant/pom.xml
index 06738c8..8fce1ea 100644
--- a/ant/pom.xml
+++ b/ant/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.3-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/18ddf46e/beeline/pom.xml
--
diff --git a/beeline/pom.xml b/beeline/pom.xml
index ab71a0c..fb0f503 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.3-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/18ddf46e/cli/pom.xml
--
diff --git a/cli/pom.xml b/cli/pom.xml
index a45af04..70bcc43 100644
--- a/cli/pom.xml
+++ b/cli/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.3-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/18ddf46e/common/pom.xml
--
diff --git a/common/pom.xml b/common/pom.xml
index 13d3d30..8468672 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.3-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/18ddf46e/contrib/pom.xml
--
diff --git a/contrib/pom.xml b/contrib/pom.xml
index 1508b68..be2b6c1 100644
--- a/contrib/pom.xml
+++ b/contrib/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.3-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/18ddf46e/hbase-handler/pom.xml
--
diff --git a/hbase-handler/pom.xml b/hbase-handler/pom.xml
index 665ac19..c4c8e4b 100644
--- a/hbase-handler/pom.xml
+++ b/hbase-handler/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.3-SNAPSHOT
 ../pom.xml
   
 


svn commit: r1009946 - /websites/production/hive/content/javadocs/r1.2.1/

2017-04-06 Thread vgumashta
Author: vgumashta
Date: Thu Apr  6 23:25:28 2017
New Revision: 1009946

Log:
Removing Apache Hive 1.2.1 API Docs after adding Apache Hive 1.2.2 API Docs

Removed:
websites/production/hive/content/javadocs/r1.2.1/



svn commit: r1009945 - /websites/production/hive/content/javadocs/r1.1.1/

2017-04-06 Thread vgumashta
Author: vgumashta
Date: Thu Apr  6 23:22:21 2017
New Revision: 1009945

Log:
Adding back Apache 1.1.1 API docs that were accidentaly removed.

Added:
websites/production/hive/content/javadocs/r1.1.1/
  - copied from r1009941, websites/production/hive/content/javadocs/r1.1.1/



svn commit: r1009940 - in /websites/production/hive/content/javadocs/r1.2.2: ./ api/ api/org/ api/org/apache/ api/org/apache/hadoop/ api/org/apache/hadoop/fs/ api/org/apache/hadoop/fs/class-use/ api/o

2017-04-06 Thread vgumashta
Author: vgumashta
Date: Thu Apr  6 22:41:00 2017
New Revision: 1009940

Log:
Apache Hive 1.2.2 API Docs


[This commit notification would consist of 2980 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]


svn commit: r1009942 - /websites/production/hive/content/javadocs/r1.1.1/

2017-04-06 Thread vgumashta
Author: vgumashta
Date: Thu Apr  6 22:45:58 2017
New Revision: 1009942

Log:
Removing Apache Hive 1.2.1 API Docs after adding Apache Hive 1.2.2 API Docs

Removed:
websites/production/hive/content/javadocs/r1.1.1/



[hive] Git Push Summary

2017-04-06 Thread vgumashta
Repository: hive
Updated Tags:  refs/tags/release-1.2.2-rc0 [deleted] f0c594d90


svn commit: r19015 - in /dev/hive/apache-hive-1.2.2-rc0: ./ apache-hive-1.2.2-bin.tar.gz apache-hive-1.2.2-bin.tar.gz.asc apache-hive-1.2.2-bin.tar.gz.md5 apache-hive-1.2.2-src.tar.gz apache-hive-1.2.

2017-04-02 Thread vgumashta
Author: vgumashta
Date: Mon Apr  3 03:09:39 2017
New Revision: 19015

Log:
Apache Hive 1.2.2 RC0

Added:
dev/hive/apache-hive-1.2.2-rc0/
dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz   (with props)
dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz.asc
dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz.md5
dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz   (with props)
dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz.asc
dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz.md5

Added: dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz.asc
==
--- dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz.asc (added)
+++ dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz.asc Mon Apr  3 
03:09:39 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v2
+
+iQIcBAABCgAGBQJY4VwTAAoJEPI4TMkIT8wwbiwP/3mQMkVhcUm6O27mCbvEF9w6
+8j4cqvdXO3/fijLI4wpIxLWtstZd4SjDLY1QugGMMfAuL+DhnJdwmfOxIRyX9ldX
+EZwK4ypNWCA0YhycdVLVYXRf9rvx9L678kNZTk6xsOVCLcgrgGmMTrpMG2atwk1k
+UbDc1fkyZ/qGlB81RJhH6HkpOY9WeymJsErCk+qBrpw+kf08roMs3tYSWaYGu2VN
+1eCQEZ1gZ9VIFCH0YYuerGJVH0cqd3FUYJul9TJUBQlJn4ggGdL7+744+K99WHqC
+aW6oZmvS9Pxle4YMT7swurFkLS73kfeuMGMOvl5X3f8kVpwT8OvtD3SHo3MTUnoe
+73MuF2pbkv73kwtDmuYOhnGBWmBZaQE/rRu3ssWhAijdYFhmQHJ7OffKhcPS7o99
+K5+y9MmvIXYS/J6TVlcwGez+sL5KI1YOQE1fsVfyzd/iosApPZXSyTzmYw4sY+BG
+i5hrMD2msZZe+LY18XaLq8aXxVPYhMnUD72KFYIGO7c/Me58B7cKbFRIJmCaO2Rh
+jfpvvhOy524qclDK0N58PiFg3TfW2x/x9o+iVQn+HCdzDRhdbu/d5P5VqWCupXIf
+tBLbocTaPyCxu8fLnyj5i52jhWF4mP1ij1ddJCIDn0QDIGxDcze8NS9dvw9nKm6W
+4DJXkNviB+OY5SStBpcO
+=1Mc2
+-END PGP SIGNATURE-

Added: dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz.md5
==
--- dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz.md5 (added)
+++ dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-bin.tar.gz.md5 Mon Apr  3 
03:09:39 2017
@@ -0,0 +1 @@
+MD5 (apache-hive-1.2.2-bin.tar.gz) = 40170843bce0c41f07f763920a6ab97d

Added: dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz.asc
==
--- dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz.asc (added)
+++ dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz.asc Mon Apr  3 
03:09:39 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v2
+
+iQIcBAABCgAGBQJY4VwiAAoJEPI4TMkIT8wweVMP/imWSbxuSoTFd1+ChLXl3eHd
+bwyH5cvwInCLkgvzojWDtVeU1PJDXaQNsSu0I+VjwED8Zo3n86xvkEWvEk+4554u
+SDsxZ8knLjdtrruoS8hQI4NEuIDVbrC0I0njw1yjHRC0k3Io86HXDZJS1Gy9aOCF
+BMp22XbLrtqgRdr7l8pVsNqNN54GAEdHKG+PiPOE8MeNxvFUiIJUNEVriOOUPWQ0
+I1JvjO9h5oGSmFEw0OuQBxmIOU+HevDkehoPSZ67k+LpAfZXJqh/9x4AFYI9M16j
+zLYPKNv2EoJrwExFp5zS2gamoJb4OQXIwQqSlV2oTp1ztxkcKLgIxId8VaNwMItu
+xeUr/TiCJrrys24x10PNHZJPG4+fgWmwPJSRGHImIbyzMkOXTokF62E7TlVnD0Ie
+/jgS4e6iyD6lrlpV/VLydn2jgy9F1iNE7dN8hFuXmFBjSt+0KO4TEBw8ZapiShJU
+Q52i9EKeSkEP3ETer7oHAEkm0H+3lD49S5aY87GkkZ7lMl8AKrcfCrCwtkQdLJUB
+1eDVbJT4oJnDvOh4kk/sNEZ1SBhYUrYFG/f9HrIX+PyNIV59LRRQwY/qkTwvgCwk
+HNVi7QU8sHZU3XwDwbN8vGrSpPTRBQUedgwPwjVlaehXqq2zgv7J5X9oD2Yywksn
+d1zk+DmH1t9DE92BtOvn
+=J91v
+-END PGP SIGNATURE-

Added: dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz.md5
==
--- dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz.md5 (added)
+++ dev/hive/apache-hive-1.2.2-rc0/apache-hive-1.2.2-src.tar.gz.md5 Mon Apr  3 
03:09:39 2017
@@ -0,0 +1 @@
+MD5 (apache-hive-1.2.2-src.tar.gz) = 8770cbbdba67b6021442d2446328e28f




[hive] Git Push Summary

2017-04-01 Thread vgumashta
Repository: hive
Updated Tags:  refs/tags/release-1.2.2-rc0 [created] f0c594d90


hive git commit: Revert "Revert "Preparing for 1.2.2 release""

2017-04-01 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 92b89e3b9 -> 395368fc6


Revert "Revert "Preparing for 1.2.2 release""

This reverts commit 92b89e3b9a90ee7703e4bc9baac09a58087b26ee.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/395368fc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/395368fc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/395368fc

Branch: refs/heads/branch-1.2
Commit: 395368fc6478c7e2a1e84a5a2a8aac45e4399a9e
Parents: 92b89e3
Author: Vaibhav Gumashta 
Authored: Sat Apr 1 11:31:53 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Sat Apr 1 11:31:53 2017 -0700

--
 accumulo-handler/pom.xml  | 2 +-
 ant/pom.xml   | 2 +-
 beeline/pom.xml   | 2 +-
 cli/pom.xml   | 2 +-
 common/pom.xml| 2 +-
 contrib/pom.xml   | 2 +-
 hbase-handler/pom.xml | 2 +-
 hcatalog/core/pom.xml | 2 +-
 hcatalog/hcatalog-pig-adapter/pom.xml | 2 +-
 hcatalog/pom.xml  | 2 +-
 hcatalog/server-extensions/pom.xml| 2 +-
 hcatalog/streaming/pom.xml| 2 +-
 hcatalog/webhcat/java-client/pom.xml  | 2 +-
 hcatalog/webhcat/svr/pom.xml  | 2 +-
 hwi/pom.xml   | 2 +-
 jdbc/pom.xml  | 2 +-
 metastore/pom.xml | 2 +-
 odbc/pom.xml  | 2 +-
 packaging/pom.xml | 2 +-
 pom.xml   | 2 +-
 ql/pom.xml| 2 +-
 serde/pom.xml | 2 +-
 service/pom.xml   | 2 +-
 shims/0.20S/pom.xml   | 2 +-
 shims/0.23/pom.xml| 2 +-
 shims/aggregator/pom.xml  | 2 +-
 shims/common/pom.xml  | 2 +-
 shims/pom.xml | 2 +-
 shims/scheduler/pom.xml   | 2 +-
 spark-client/pom.xml  | 4 ++--
 testutils/pom.xml | 2 +-
 31 files changed, 32 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/395368fc/accumulo-handler/pom.xml
--
diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index 9ff27bb..f7c23ee 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/395368fc/ant/pom.xml
--
diff --git a/ant/pom.xml b/ant/pom.xml
index 6daab0f..06738c8 100644
--- a/ant/pom.xml
+++ b/ant/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/395368fc/beeline/pom.xml
--
diff --git a/beeline/pom.xml b/beeline/pom.xml
index 7b4197d..ab71a0c 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/395368fc/cli/pom.xml
--
diff --git a/cli/pom.xml b/cli/pom.xml
index 0b7b4f9..a45af04 100644
--- a/cli/pom.xml
+++ b/cli/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/395368fc/common/pom.xml
--
diff --git a/common/pom.xml b/common/pom.xml
index b954049..13d3d30 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/395368fc/contrib/pom.xml
--
diff --git a/contrib/pom.xml b/contrib/pom.xml
index 11787de..1508b68 100644
--- a/contrib/pom.xml
+++ b/contrib/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/395368fc/hbase-handler/pom.xml
--
diff --git a/hbase-handler/pom.xml b/hbase-handler/pom.xml
index 566e2a0..665ac19 100644
--- a/hbase-handler/pom.xml
+++ b/hbase-handler/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 


hive git commit: Revert "Preparing for 1.2.2 release"

2017-04-01 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 d69dcadd6 -> 92b89e3b9


Revert "Preparing for 1.2.2 release"

This reverts commit 915171b4b62ed8072ca81f27dc7b6d6522694ba4.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/92b89e3b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/92b89e3b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/92b89e3b

Branch: refs/heads/branch-1.2
Commit: 92b89e3b9a90ee7703e4bc9baac09a58087b26ee
Parents: d69dcad
Author: Vaibhav Gumashta 
Authored: Sat Apr 1 09:43:29 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Sat Apr 1 09:43:29 2017 -0700

--
 accumulo-handler/pom.xml  | 2 +-
 ant/pom.xml   | 2 +-
 beeline/pom.xml   | 2 +-
 cli/pom.xml   | 2 +-
 common/pom.xml| 2 +-
 contrib/pom.xml   | 2 +-
 hbase-handler/pom.xml | 2 +-
 hcatalog/core/pom.xml | 2 +-
 hcatalog/hcatalog-pig-adapter/pom.xml | 2 +-
 hcatalog/pom.xml  | 2 +-
 hcatalog/server-extensions/pom.xml| 2 +-
 hcatalog/streaming/pom.xml| 2 +-
 hcatalog/webhcat/java-client/pom.xml  | 2 +-
 hcatalog/webhcat/svr/pom.xml  | 2 +-
 hwi/pom.xml   | 2 +-
 jdbc/pom.xml  | 2 +-
 metastore/pom.xml | 2 +-
 odbc/pom.xml  | 2 +-
 packaging/pom.xml | 2 +-
 pom.xml   | 2 +-
 ql/pom.xml| 2 +-
 serde/pom.xml | 2 +-
 service/pom.xml   | 2 +-
 shims/0.20S/pom.xml   | 2 +-
 shims/0.23/pom.xml| 2 +-
 shims/aggregator/pom.xml  | 2 +-
 shims/common/pom.xml  | 2 +-
 shims/pom.xml | 2 +-
 shims/scheduler/pom.xml   | 2 +-
 spark-client/pom.xml  | 4 ++--
 testutils/pom.xml | 2 +-
 31 files changed, 32 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/92b89e3b/accumulo-handler/pom.xml
--
diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index f7c23ee..9ff27bb 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.1
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/92b89e3b/ant/pom.xml
--
diff --git a/ant/pom.xml b/ant/pom.xml
index 06738c8..6daab0f 100644
--- a/ant/pom.xml
+++ b/ant/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.1
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/92b89e3b/beeline/pom.xml
--
diff --git a/beeline/pom.xml b/beeline/pom.xml
index ab71a0c..7b4197d 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.1
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/92b89e3b/cli/pom.xml
--
diff --git a/cli/pom.xml b/cli/pom.xml
index a45af04..0b7b4f9 100644
--- a/cli/pom.xml
+++ b/cli/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.1
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/92b89e3b/common/pom.xml
--
diff --git a/common/pom.xml b/common/pom.xml
index 13d3d30..b954049 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.1
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/92b89e3b/contrib/pom.xml
--
diff --git a/contrib/pom.xml b/contrib/pom.xml
index 1508b68..11787de 100644
--- a/contrib/pom.xml
+++ b/contrib/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.1
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/92b89e3b/hbase-handler/pom.xml
--
diff --git a/hbase-handler/pom.xml b/hbase-handler/pom.xml
index 665ac19..566e2a0 100644
--- a/hbase-handler/pom.xml
+++ b/hbase-handler/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.2
+1.2.1
 ../pom.xml
   
 


[hive] Git Push Summary

2017-04-01 Thread vgumashta
Repository: hive
Updated Tags:  refs/tags/release-1.2.2-rc0 [deleted] 098c920fe


[hive] Git Push Summary

2017-03-31 Thread vgumashta
Repository: hive
Updated Tags:  refs/tags/release-1.2.2-rc0 [created] 098c920fe


[1/2] hive git commit: 1.2.2 Updating release notes

2017-03-31 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 915171b4b -> d69dcadd6


http://git-wip-us.apache.org/repos/asf/hive/blob/d69dcadd/RELEASE_NOTES.txt
--
diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt
index feff103..f8ce137 100644
--- a/RELEASE_NOTES.txt
+++ b/RELEASE_NOTES.txt
@@ -1,5573 +1,62 @@
-
-Release Notes - Hive - Version 1.2.1
-
-** Sub-task
-* [HIVE-10629] - Dropping table in an encrypted zone does not drop 
warehouse directory
-* [HIVE-10630] - Renaming tables across encryption zones renames table 
even though the operation throws error
-* [HIVE-10658] - Insert with values clause may expose data that should be 
encrypted
-* [HIVE-10747] - Enable the cleanup of side effect for the Encryption 
related qfile test
-* [HIVE-10910] - Alter table drop partition queries in encrypted zone 
failing to remove data from HDFS
-
-
-** Bug
-* [HIVE-4577] - hive CLI can't handle hadoop dfs command  with space and 
quotes.
-* [HIVE-8931] - Test TestAccumuloCliDriver is not completing
-* [HIVE-9069] - Simplify filter predicates for CBO
-* [HIVE-9828] - Semantic analyzer does not capture view parent entity for 
tables referred in view with union all
-* [HIVE-9842] - Enable session/operation timeout by default in HiveServer2
-* [HIVE-10107] - Union All : Vertex missing stats resulting in OOM and 
in-efficient plans
-* [HIVE-10244] - Vectorization : TPC-DS Q80 fails with 
java.lang.ClassCastException when hive.vectorized.execution.reduce.enabled is 
enabled
-* [HIVE-10453] - HS2 leaking open file descriptors when using UDFs
-* [HIVE-10528] - Hiveserver2 in HTTP mode is not applying auth_to_local 
rules
-* [HIVE-10563] - MiniTezCliDriver tests ordering issues
-* [HIVE-10605] - Make hive version number update automatically in 
webhcat-default.xml during hive tar generation
-* [HIVE-10606] - Divide by zero error in HybridHashTableContainer
-* [HIVE-10627] - Queries fail with Failed to breakup Windowing invocations 
into Groups
-* [HIVE-10628] - Incorrect result when vectorized native mapjoin is 
enabled using null safe operators <=>
-* [HIVE-10659] - Beeline command which contains semi-colon as a 
non-command terminator will fail
-* [HIVE-10664] - Unit tests run fail in windows because of  illegal escape 
character in file path
-* [HIVE-10672] - Analyze command on a table using row format serde 
JsonSerDe fails with NoClassDefFoundError
-* [HIVE-10674] - jars should not be checked in to the source control repo
-* [HIVE-10675] - Provide option to skip Accumulo related Hive tests in 
itests directory
-* [HIVE-10677] - hive.exec.parallel=true has problem when it is used for 
analyze table column stats
-* [HIVE-10678] - update sql standard authorization configuration whitelist 
- more optimization flags
-* [HIVE-10679] - JsonSerde ignores varchar and char size limit specified 
during table creation
-* [HIVE-10684] - Fix the unit test failures for HIVE-7553 after HIVE-10674 
removed the binary jar files
-* [HIVE-10685] - Alter table concatenate oparetor will cause duplicate data
-* [HIVE-10686] - java.lang.IndexOutOfBoundsException for query with rank() 
over(partition ...)
-* [HIVE-10688] - constant folding is broken for case-when udf
-* [HIVE-10689] - HS2 metadata api calls should use HiveAuthorizer 
interface for authorization
-* [HIVE-10690] - ArrayIndexOutOfBounds exception in 
MetaStoreDirectSql.aggrColStatsForPartitions()
-* [HIVE-10696] - TestAddResource tests are non-portable
-* [HIVE-10704] - Errors in Tez HashTableLoader when estimated table size 
is 0
-* [HIVE-10711] - Tez HashTableLoader attempts to allocate more memory than 
available when HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD exceeds process max mem
-* [HIVE-10719] - Hive metastore failure when alter table rename is 
attempted.
-* [HIVE-10724] - WebHCat e2e test TestStreaming_5 fails on Windows
-* [HIVE-10727] - Import throws error message 
"org.apache.thrift.protocol.TProtocolException: Required field 'filesAdded' is 
unset!"
-* [HIVE-10735] - Cached plan race condition - VectorMapJoinCommonOperator 
has no closeOp()
-* [HIVE-10736] - HiveServer2 shutdown of cached tez app-masters is not 
clean
-* [HIVE-10741] - count distinct rewrite is not firing
-* [HIVE-10745] - Better null handling by Vectorizer
-* [HIVE-10746] -  Hive 1.2.0+Tez produces 1-byte FileSplits from 
mapred.TextInputFormat
-* [HIVE-10753] - hs2 jdbc url - wrong connection string cause  error on 
beeline/jdbc/odbc client, misleading message
-* [HIVE-10760] - Templeton: HCatalog Get Column for Non-existent column 
returns Server Error (500) rather than Not Found(404)
-* [HIVE-10768] - In QTestGenTask.execute() we should not throw an 
exception right away if we are unable to clean any old files
-* [HIVE-10776] - Schema on 

[2/2] hive git commit: 1.2.2 Updating release notes

2017-03-31 Thread vgumashta
1.2.2 Updating release notes


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d69dcadd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d69dcadd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d69dcadd

Branch: refs/heads/branch-1.2
Commit: d69dcadd6b31008fa7230e06b9d7ca476665e85e
Parents: 915171b
Author: Vaibhav Gumashta 
Authored: Fri Mar 31 15:59:30 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Mar 31 15:59:30 2017 -0700

--
 README.txt|4 +-
 RELEASE_NOTES.txt | 5635 +---
 2 files changed, 64 insertions(+), 5575 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d69dcadd/README.txt
--
diff --git a/README.txt b/README.txt
index c4d692b..8b486ca 100644
--- a/README.txt
+++ b/README.txt
@@ -1,4 +1,4 @@
-Apache Hive (TM) 1.2.1
+Apache Hive (TM) 1.2.2
 ==
 
 The Apache Hive (TM) data warehouse software facilitates querying and
@@ -87,7 +87,7 @@ Requirements
 Upgrading from older versions of Hive
 =
 
-- Hive 1.2.1 includes changes to the MetaStore schema. If you are
+- Hive 1.2.2 does not include changes to the MetaStore schema. If you are
   upgrading from an earlier version of Hive prior to Hive 1.2.0, it
   is imperative that you upgrade the MetaStore schema by running
   the appropriate schema upgrade scripts located in the



hive git commit: Preparing for 1.2.2 release

2017-03-31 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 6ebefdea6 -> 915171b4b


Preparing for 1.2.2 release


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/915171b4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/915171b4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/915171b4

Branch: refs/heads/branch-1.2
Commit: 915171b4b62ed8072ca81f27dc7b6d6522694ba4
Parents: 6ebefde
Author: Vaibhav Gumashta 
Authored: Fri Mar 31 15:46:53 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Mar 31 15:46:53 2017 -0700

--
 accumulo-handler/pom.xml  | 2 +-
 ant/pom.xml   | 2 +-
 beeline/pom.xml   | 2 +-
 cli/pom.xml   | 2 +-
 common/pom.xml| 2 +-
 contrib/pom.xml   | 2 +-
 hbase-handler/pom.xml | 2 +-
 hcatalog/core/pom.xml | 2 +-
 hcatalog/hcatalog-pig-adapter/pom.xml | 2 +-
 hcatalog/pom.xml  | 2 +-
 hcatalog/server-extensions/pom.xml| 2 +-
 hcatalog/streaming/pom.xml| 2 +-
 hcatalog/webhcat/java-client/pom.xml  | 2 +-
 hcatalog/webhcat/svr/pom.xml  | 2 +-
 hwi/pom.xml   | 2 +-
 jdbc/pom.xml  | 2 +-
 metastore/pom.xml | 2 +-
 odbc/pom.xml  | 2 +-
 packaging/pom.xml | 2 +-
 pom.xml   | 2 +-
 ql/pom.xml| 2 +-
 serde/pom.xml | 2 +-
 service/pom.xml   | 2 +-
 shims/0.20S/pom.xml   | 2 +-
 shims/0.23/pom.xml| 2 +-
 shims/aggregator/pom.xml  | 2 +-
 shims/common/pom.xml  | 2 +-
 shims/pom.xml | 2 +-
 shims/scheduler/pom.xml   | 2 +-
 spark-client/pom.xml  | 4 ++--
 testutils/pom.xml | 2 +-
 31 files changed, 32 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/915171b4/accumulo-handler/pom.xml
--
diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index 9ff27bb..f7c23ee 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/915171b4/ant/pom.xml
--
diff --git a/ant/pom.xml b/ant/pom.xml
index 6daab0f..06738c8 100644
--- a/ant/pom.xml
+++ b/ant/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/915171b4/beeline/pom.xml
--
diff --git a/beeline/pom.xml b/beeline/pom.xml
index 7b4197d..ab71a0c 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/915171b4/cli/pom.xml
--
diff --git a/cli/pom.xml b/cli/pom.xml
index 0b7b4f9..a45af04 100644
--- a/cli/pom.xml
+++ b/cli/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/915171b4/common/pom.xml
--
diff --git a/common/pom.xml b/common/pom.xml
index b954049..13d3d30 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/915171b4/contrib/pom.xml
--
diff --git a/contrib/pom.xml b/contrib/pom.xml
index 11787de..1508b68 100644
--- a/contrib/pom.xml
+++ b/contrib/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/915171b4/hbase-handler/pom.xml
--
diff --git a/hbase-handler/pom.xml b/hbase-handler/pom.xml
index 566e2a0..665ac19 100644
--- a/hbase-handler/pom.xml
+++ b/hbase-handler/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-1.2.1
+1.2.2
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/915171b4/hcatalog/core/pom.xml

[2/3] hive git commit: Revert "HIVE-12437: SMB join in tez fails when one of the tables is empty (Vikram Dixit K, reviewed by Siddharth Seth)"

2017-03-31 Thread vgumashta
Revert "HIVE-12437: SMB join in tez fails when one of the tables is empty 
(Vikram Dixit K, reviewed by Siddharth Seth)"

This reverts commit 63f0471983f68a6254f4302796c9797996ff43e3.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/abc8af02
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/abc8af02
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/abc8af02

Branch: refs/heads/branch-1.2
Commit: abc8af027edbef062989756ec5a0d0b404ba43ce
Parents: 23bcf7d
Author: Vaibhav Gumashta 
Authored: Fri Mar 31 13:49:27 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Mar 31 13:49:27 2017 -0700

--
 .../hive/ql/exec/tez/CustomPartitionVertex.java |   9 +-
 .../hive/ql/exec/tez/MapRecordProcessor.java|  14 +-
 .../test/queries/clientpositive/tez_smb_empty.q |  13 --
 .../clientpositive/tez/tez_smb_empty.q.out  | 168 ---
 4 files changed, 4 insertions(+), 200 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/abc8af02/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java
index 4de692c..cbea27c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java
@@ -319,9 +319,9 @@ public class CustomPartitionVertex extends 
VertexManagerPlugin {
   Multimap bucketToGroupedSplitMap) throws 
IOException {
 // the bucket to task map should have been setup by the big table.
 LOG.info("Processing events for input " + inputName);
-if (inputNameInputSpecMap.get(mainWorkName) == null) {
-  LOG.info("We don't have a routing table yet. Will need to wait for the 
main input "
-  + mainWorkName + " initialization");
+if (bucketToTaskMap.isEmpty()) {
+  LOG.info("We don't have a routing table yet. Will need to wait for the 
main input"
+  + " initialization");
   inputToGroupedSplitMap.put(inputName, bucketToGroupedSplitMap);
   return;
 }
@@ -351,9 +351,6 @@ public class CustomPartitionVertex extends 
VertexManagerPlugin {
 
 for (Entry entry : 
bucketToSerializedSplitMap.asMap().entrySet()) {
   Collection destTasks = bucketToTaskMap.get(entry.getKey());
-  if ((destTasks == null) || (destTasks.isEmpty())) {
-continue;
-  }
   for (Integer task : destTasks) {
 int count = 0;
 for (ByteBuffer buf : entry.getValue()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/abc8af02/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
index c6afad6..dd9b8d2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
@@ -172,19 +172,7 @@ public class MapRecordProcessor extends RecordProcessor {
 jconf.set(Utilities.INPUT_NAME, mergeMapWork.getName());
 mergeMapOp.initialize(jconf, null);
 // if there are no files/partitions to read, we need to skip 
trying to read
-MultiMRInput multiMRInput = 
multiMRInputMap.get(mergeMapWork.getName());
-boolean skipRead = false;
-if (multiMRInput == null) {
-  l4j.info("Multi MR Input for work " + mergeMapWork.getName() + " 
is null. Skipping read.");
-  skipRead = true;
-} else {
-  Collection keyValueReaders = 
multiMRInput.getKeyValueReaders();
-  if ((keyValueReaders == null) || (keyValueReaders.isEmpty())) {
-l4j.info("Key value readers are null or empty and hence 
skipping read. "
-+ "KeyValueReaders = " + keyValueReaders);
-skipRead = true;
-  }
-}
+boolean skipRead = 
mergeMapOp.getConf().getPathToAliases().isEmpty();
 if (skipRead) {
   List children = new ArrayList();
   children.addAll(mergeMapOp.getConf().getAliasToWork().values());

http://git-wip-us.apache.org/repos/asf/hive/blob/abc8af02/ql/src/test/queries/clientpositive/tez_smb_empty.q
--
diff --git a/ql/src/test/queries/clientpositive/tez_smb_empty.q 

[3/3] hive git commit: Revert "HIVE-11356: SMB join on tez fails when one of the tables is empty (Vikram Dixit K, reviewed by Gunther Hagleitner)"

2017-03-31 Thread vgumashta
Revert "HIVE-11356: SMB join on tez fails when one of the tables is empty 
(Vikram Dixit K, reviewed by Gunther Hagleitner)"

This reverts commit 05cfaa05de8cf8d30cc8afcad33c872040eeb7ef.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6ebefdea
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6ebefdea
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6ebefdea

Branch: refs/heads/branch-1.2
Commit: 6ebefdea6ba4c09f36d5c4d3372dbe272bce20e1
Parents: abc8af0
Author: Vaibhav Gumashta 
Authored: Fri Mar 31 13:49:36 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Mar 31 13:49:36 2017 -0700

--
 .../test/resources/testconfiguration.properties |   1 -
 pom.xml |   2 +-
 .../hive/ql/exec/CommonMergeJoinOperator.java   |   8 +-
 .../apache/hadoop/hive/ql/exec/MapOperator.java |  27 -
 .../hive/ql/exec/TezDummyStoreOperator.java |   9 -
 .../hive/ql/exec/tez/CustomPartitionVertex.java |   1 -
 .../hive/ql/exec/tez/MapRecordProcessor.java|  26 +-
 .../test/queries/clientpositive/tez_smb_empty.q |  55 --
 .../clientpositive/tez/tez_smb_empty.q.out  | 676 ---
 9 files changed, 5 insertions(+), 800 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6ebefdea/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 0b44c97..5f0e7ff 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -344,7 +344,6 @@ minitez.query.files=bucket_map_join_tez1.q,\
   tez_union_group_by.q,\
   tez_smb_main.q,\
   tez_smb_1.q,\
-  tez_smb_empty.q,\
   vectorized_dynamic_partition_pruning.q,\
   tez_multi_union.q,\
   tez_join.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/6ebefdea/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 06d0b77..e8811ac 100644
--- a/pom.xml
+++ b/pom.xml
@@ -156,7 +156,7 @@
 1.0.1
 1.7.5
 4.0.4
-0.5.4
+0.5.2
 2.2.0
 1.3.1
 2.10

http://git-wip-us.apache.org/repos/asf/hive/blob/6ebefdea/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
index 296a92d..24af765 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
@@ -143,13 +143,7 @@ public class CommonMergeJoinOperator extends 
AbstractMapJoinOperatorhttp://git-wip-us.apache.org/repos/asf/hive/blob/6ebefdea/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
index f8717ae..d5ea96a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.ql.exec.MapOperator.MapOpCtx;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
 import org.apache.hadoop.hive.ql.exec.tez.MapRecordProcessor;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
@@ -56,7 +55,6 @@ import 
org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
 import 
org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
@@ -332,31 +330,6 @@ public class MapOperator extends Operator 
implements Serializable, Clon
 return tableDescOI;
   }
 
-  /*
-   * This is the same as the setChildren method below but for empty tables.
-   * It takes care of the following:
-   * 1. Create the 

[1/3] hive git commit: Revert "HIVE-12947: SMB mapjoin query runtime error "FileSinkOperator cannot be cast to org.apache.hadoop.hive.ql.exec.DummyStoreOperator" (Vikram Dixit K, reviewed by Sergey Sh

2017-03-31 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 a3f718f7f -> 6ebefdea6


Revert "HIVE-12947: SMB mapjoin query runtime error "FileSinkOperator cannot be 
cast to org.apache.hadoop.hive.ql.exec.DummyStoreOperator" (Vikram Dixit K, 
reviewed by Sergey Shelukhin)"

This reverts commit 0b9b1d84f057e7fa76888ef8a9f94e753a0291df.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/23bcf7d2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/23bcf7d2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/23bcf7d2

Branch: refs/heads/branch-1.2
Commit: 23bcf7d2fd53b5fb024c581c486ea9297b465f4a
Parents: a3f718f
Author: Vaibhav Gumashta 
Authored: Fri Mar 31 13:48:53 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Mar 31 13:48:53 2017 -0700

--
 .../test/resources/testconfiguration.properties |   1 -
 .../hive/ql/exec/CommonMergeJoinOperator.java   |  19 +-
 .../hive/ql/exec/tez/MapRecordProcessor.java|  25 +-
 ql/src/test/queries/clientpositive/smb_cache.q  | 120 --
 .../results/clientpositive/tez/smb_cache.q.out  | 413 ---
 5 files changed, 19 insertions(+), 559 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/23bcf7d2/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index d2f9b58..0b44c97 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -317,7 +317,6 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
 
 
 minitez.query.files=bucket_map_join_tez1.q,\
-  smb_cache.q,\
   bucket_map_join_tez2.q,\
   dynamic_partition_pruning.q,\
   dynamic_partition_pruning_2.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/23bcf7d2/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
index d69d82e..296a92d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -34,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.persistence.RowContainer;
 import org.apache.hadoop.hive.ql.exec.tez.RecordSource;
+import org.apache.hadoop.hive.ql.exec.tez.ReduceRecordSource;
 import org.apache.hadoop.hive.ql.exec.tez.TezContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc;
@@ -631,16 +633,13 @@ public class CommonMergeJoinOperator extends 
AbstractMapJoinOperator dummyOps =
-  ((TezContext) (MapredContext.get())).getDummyOpsMap();
-  for (Entry connectOp : dummyOps.entrySet()) 
{
-if (connectOp.getValue().getChildOperators() == null
-|| connectOp.getValue().getChildOperators().isEmpty()) {
-  parentOperators.add(connectOp.getKey(), connectOp.getValue());
-  connectOp.getValue().getChildOperators().add(this);
-}
+Map dummyOps =
+((TezContext) (MapredContext.get())).getDummyOpsMap();
+for (Entry connectOp : dummyOps.entrySet()) {
+  if (connectOp.getValue().getChildOperators() == null
+  || connectOp.getValue().getChildOperators().isEmpty()) {
+parentOperators.add(connectOp.getKey(), connectOp.getValue());
+connectOp.getValue().getChildOperators().add(this);
   }
 }
 super.initializeLocalWork(hconf);

http://git-wip-us.apache.org/repos/asf/hive/blob/23bcf7d2/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
index 8b69e3c..c6afad6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
@@ -154,7 +154,6 @@ public class MapRecordProcessor extends RecordProcessor {
   

hive git commit: HIVE-16186: REPL DUMP shows last event ID of the database even if we use LIMIT option. (Sankar Hariappan reviewed by Vaibhav Gumashta)

2017-03-28 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master a1cbccb8d -> 5d8330290


HIVE-16186: REPL DUMP shows last event ID of the database even if we use LIMIT 
option. (Sankar Hariappan reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5d833029
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5d833029
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5d833029

Branch: refs/heads/master
Commit: 5d83302907887ace2bf35d1d4487f030c93d09a3
Parents: a1cbccb
Author: Vaibhav Gumashta 
Authored: Tue Mar 28 12:12:15 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Tue Mar 28 12:13:45 2017 -0700

--
 .../hive/ql/TestReplicationScenarios.java   | 65 
 .../ql/parse/ReplicationSemanticAnalyzer.java   | 14 +++--
 2 files changed, 73 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5d833029/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
index c9092b1..9e79b6a 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
@@ -1013,6 +1013,71 @@ public class TestReplicationScenarios {
   }
 
   @Test
+  public void testDumpLimit() throws IOException {
+String testName = "dumpLimit";
+LOG.info("Testing " + testName);
+String dbName = testName + "_" + tid;
+
+run("CREATE DATABASE " + dbName);
+run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
+
+advanceDumpDir();
+run("REPL DUMP " + dbName);
+String replDumpLocn = getResult(0, 0);
+String replDumpId = getResult(0, 1, true);
+LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, 
replDumpId);
+
+String[] unptn_data = new String[] { "eleven", "thirteen", "twelve" };
+String[] unptn_data_load1 = new String[] { "eleven" };
+String[] unptn_data_load2 = new String[] { "eleven", "thirteen" };
+
+// 3 events to insert, last repl ID: replDumpId+3
+run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + 
"')");
+// 3 events to insert, last repl ID: replDumpId+6
+run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + 
"')");
+// 3 events to insert, last repl ID: replDumpId+9
+run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[2] + 
"')");
+verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data);
+
+run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
+
+advanceDumpDir();
+run("REPL DUMP " + dbName + " FROM " + replDumpId + " LIMIT 3");
+String incrementalDumpLocn = getResult(0, 0);
+String incrementalDumpId = getResult(0, 1, true);
+LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", 
incrementalDumpLocn, incrementalDumpId, replDumpId);
+replDumpId = incrementalDumpId;
+
+run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
+verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data);
+verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", 
unptn_data_load1);
+
+advanceDumpDir();
+Integer lastReplID = Integer.valueOf(replDumpId);
+lastReplID += 1000;
+String toReplID = String.valueOf(lastReplID);
+
+run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + toReplID + " 
LIMIT 3");
+incrementalDumpLocn = getResult(0, 0);
+incrementalDumpId = getResult(0, 1, true);
+LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", 
incrementalDumpLocn, incrementalDumpId, replDumpId);
+replDumpId = incrementalDumpId;
+run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
+
+verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", 
unptn_data_load2);
+
+advanceDumpDir();
+run("REPL DUMP " + dbName + " FROM " + replDumpId);
+incrementalDumpLocn = getResult(0, 0);
+incrementalDumpId = getResult(0, 1, true);
+LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", 
incrementalDumpLocn, incrementalDumpId, replDumpId);
+replDumpId = incrementalDumpId;
+run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'");
+
+verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", 
unptn_data);
+  }
+
+  @Test
   public void testStatus() throws IOException {
 // first test ReplStateMap functionality
 

hive git commit: HIVE-16219: metastore notification_log contains serialized message with non functional fields (Anishek Agarwal reviewed by Vaibhav Gumashta)

2017-03-27 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 4d9810afb -> 84f4e3a3b


HIVE-16219: metastore notification_log contains serialized message with non 
functional fields (Anishek Agarwal reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/84f4e3a3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/84f4e3a3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/84f4e3a3

Branch: refs/heads/master
Commit: 84f4e3a3bf8ccc924fc6c260a47a30b6e4f1adcc
Parents: 4d9810a
Author: Vaibhav Gumashta 
Authored: Mon Mar 27 15:54:41 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Mon Mar 27 15:57:20 2017 -0700

--
 metastore/pom.xml   |  6 ++
 .../metastore/messaging/PartitionFiles.java |  3 +
 .../messaging/json/JSONMessageDeserializer.java |  4 +
 .../json/JSONMessageDeserializerTest.java   | 89 
 pom.xml |  6 ++
 5 files changed, 108 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/84f4e3a3/metastore/pom.xml
--
diff --git a/metastore/pom.xml b/metastore/pom.xml
index 35752ff..ef908ca 100644
--- a/metastore/pom.xml
+++ b/metastore/pom.xml
@@ -255,6 +255,12 @@
   ${disruptor.version}
   test
 
+
+  org.skyscreamer
+  jsonassert
+  1.4.0
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hive/blob/84f4e3a3/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/PartitionFiles.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/PartitionFiles.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/PartitionFiles.java
index b10b8a8..4fd7f8c 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/PartitionFiles.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/PartitionFiles.java
@@ -22,10 +22,13 @@ import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.collect.Lists;
+import org.codehaus.jackson.annotate.JsonProperty;
 
 public class PartitionFiles {
 
+  @JsonProperty
   private String partitionName;
+  @JsonProperty
   private List files;
 
   public PartitionFiles(String partitionName, Iterator files) {

http://git-wip-us.apache.org/repos/asf/hive/blob/84f4e3a3/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java
index 41732c7..40ef5fb 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java
@@ -36,6 +36,7 @@ import 
org.apache.hadoop.hive.metastore.messaging.InsertMessage;
 import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer;
 import org.codehaus.jackson.map.DeserializationConfig;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
 
 /**
  * MessageDeserializer implementation, for deserializing from JSON strings.
@@ -46,6 +47,9 @@ public class JSONMessageDeserializer extends 
MessageDeserializer {
 
   static {
 mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, 
false);
+mapper.configure(SerializationConfig.Feature.AUTO_DETECT_GETTERS, false);
+mapper.configure(SerializationConfig.Feature.AUTO_DETECT_IS_GETTERS, 
false);
+mapper.configure(SerializationConfig.Feature.AUTO_DETECT_FIELDS, false);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/84f4e3a3/metastore/src/test/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializerTest.java
--
diff --git 
a/metastore/src/test/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializerTest.java
 
b/metastore/src/test/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializerTest.java
new file mode 100644
index 000..b7c6304
--- /dev/null
+++ 
b/metastore/src/test/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializerTest.java
@@ -0,0 +1,89 @@
+package org.apache.hadoop.hive.metastore.messaging.json;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.json.JSONException;
+import org.junit.Test;
+import org.skyscreamer.jsonassert.JSONAssert;

hive git commit: HIVE-16107: JDBC: HttpClient should retry one more time on NoHttpResponseException (Vaibhav Gumashta reviewed by Daniel Dai, Thejas Nair)

2017-03-22 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master ce695b5d4 -> 8613ef200


HIVE-16107: JDBC: HttpClient should retry one more time on 
NoHttpResponseException (Vaibhav Gumashta reviewed by Daniel Dai, Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8613ef20
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8613ef20
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8613ef20

Branch: refs/heads/master
Commit: 8613ef200fb1e1372f41a225bd358f06e754f906
Parents: ce695b5
Author: Vaibhav Gumashta 
Authored: Wed Mar 22 11:02:23 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Wed Mar 22 11:02:23 2017 -0700

--
 .../apache/hive/jdbc/TestJdbcWithMiniHS2.java   | 32 
 .../org/apache/hive/jdbc/HiveConnection.java| 24 +--
 2 files changed, 53 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8613ef20/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index afe23f8..3780b4e 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -966,6 +966,38 @@ public class TestJdbcWithMiniHS2 {
   }
 
   /**
+   * Test for jdbc driver retry on NoHttpResponseException
+   * @throws Exception
+   */
+  @Test
+  public void testHttpRetryOnServerIdleTimeout() throws Exception {
+// Stop HiveServer2
+stopMiniHS2();
+HiveConf conf = new HiveConf();
+conf.set("hive.server2.transport.mode", "http");
+// Set server's idle timeout to a very low value
+conf.set("hive.server2.thrift.http.max.idle.time", "5");
+startMiniHS2(conf);
+String userName = System.getProperty("user.name");
+Connection conn = getConnection(miniHS2.getJdbcURL(testDbName), userName, 
"password");
+Statement stmt = conn.createStatement();
+stmt.execute("select from_unixtime(unix_timestamp())");
+// Sleep for longer than server's idletimeout and execute a query
+TimeUnit.SECONDS.sleep(10);
+try {
+  stmt.execute("select from_unixtime(unix_timestamp())");
+} catch (Exception e) {
+  fail("Not expecting exception: " + e);
+} finally {
+  if (conn != null) {
+conn.close();
+  }
+}
+// Restore original state
+restoreMiniHS2AndConnections();
+  }
+
+  /**
* Tests that DataNucleus' NucleusContext.classLoaderResolverMap clears 
cached class objects
* (& hence doesn't leak classloaders) on closing any session
*

http://git-wip-us.apache.org/repos/asf/hive/blob/8613ef20/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
--
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index 1695c5d..fb18adb 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -41,6 +41,7 @@ import org.apache.hive.service.rpc.thrift.TSessionHandle;
 import org.apache.http.HttpRequestInterceptor;
 import org.apache.http.HttpResponse;
 import org.apache.http.client.CookieStore;
+import org.apache.http.client.HttpRequestRetryHandler;
 import org.apache.http.client.ServiceUnavailableRetryStrategy;
 import org.apache.http.config.Registry;
 import org.apache.http.config.RegistryBuilder;
@@ -386,9 +387,9 @@ public class HiveConnection implements java.sql.Connection {
* Add an interceptor to pass username/password in the header.
* In https mode, the entire information is encrypted
*/
-  requestInterceptor = new HttpBasicAuthInterceptor(getUserName(), 
getPassword(),
-cookieStore, 
cookieName, useSsl,
-additionalHttpHeaders);
+requestInterceptor =
+new HttpBasicAuthInterceptor(getUserName(), getPassword(), 
cookieStore, cookieName,
+useSsl, additionalHttpHeaders);
   }
 }
 // Configure http client for cookie based authentication
@@ -421,6 +422,23 @@ public class HiveConnection implements java.sql.Connection 
{
 } else {
   httpClientBuilder = HttpClientBuilder.create();
 }
+// In case the server's idletimeout is set to a lower value, it might 
close it's side of
+// connection. However we retry one more time on NoHttpResponseException
+

hive git commit: HIVE-15766: DBNotificationlistener leaks JDOPersistenceManager (Vaibhav Gumashta reviewed by Thejas Nair, Mohit Sabharwal, Daniel Dai)

2017-03-20 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 7ea85a0a4 -> 9a47cf9f9


HIVE-15766: DBNotificationlistener leaks JDOPersistenceManager (Vaibhav 
Gumashta reviewed by Thejas Nair, Mohit Sabharwal, Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9a47cf9f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9a47cf9f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9a47cf9f

Branch: refs/heads/master
Commit: 9a47cf9f92d1c8a4e72890e3dfe2d9567f12bfb5
Parents: 7ea85a0
Author: Vaibhav Gumashta 
Authored: Mon Mar 20 16:25:05 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Mon Mar 20 16:25:05 2017 -0700

--
 .../listener/DbNotificationListener.java| 35 
 .../hadoop/hive/metastore/HiveMetaStore.java| 28 
 2 files changed, 28 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/9a47cf9f/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
--
diff --git 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
index f7e3e3a..ea6cb79 100644
--- 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
+++ 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.MetaStoreEventListener;
 import org.apache.hadoop.hive.metastore.RawStore;
 import org.apache.hadoop.hive.metastore.RawStoreProxy;
@@ -86,23 +87,17 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
   // HiveConf rather than a Configuration.
   private HiveConf hiveConf;
   private MessageFactory msgFactory;
-  private RawStore rs;
-
-  private synchronized void init(HiveConf conf) {
-try {
-  rs = RawStoreProxy.getProxy(conf, conf,
-  conf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL), 99);
-} catch (MetaException e) {
-  LOG.error("Unable to connect to raw store, notifications will not be 
tracked", e);
-  rs = null;
-}
-if (cleaner == null && rs != null) {
-  cleaner = new CleanerThread(conf, rs);
+
+  private synchronized void init(HiveConf conf) throws MetaException {
+if (cleaner == null) {
+  cleaner =
+  new CleanerThread(conf, RawStoreProxy.getProxy(conf, conf,
+  conf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL), 
99));
   cleaner.start();
 }
   }
 
-  public DbNotificationListener(Configuration config) {
+  public DbNotificationListener(Configuration config) throws MetaException {
 super(config);
 // The code in MetastoreUtils.getMetaStoreListeners() that calls this 
looks for a constructor
 // with a Configuration parameter, so we have to declare config as 
Configuration.  But it
@@ -473,16 +468,12 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
   }
 
   // Process this notification by adding it to metastore DB
-  private void process(NotificationEvent event) {
+  private void process(NotificationEvent event) throws MetaException {
 event.setMessageFormat(msgFactory.getMessageFormat());
-if (rs != null) {
-  synchronized (NOTIFICATION_TBL_LOCK) {
-LOG.debug("DbNotificationListener: Processing : {}:{}", 
event.getEventId(),
-event.getMessage());
-rs.addNotificationEvent(event);
-  }
-} else {
-  LOG.warn("Dropping event " + event + " since notification is not 
running.");
+synchronized (NOTIFICATION_TBL_LOCK) {
+  LOG.debug("DbNotificationListener: Processing : {}:{}", 
event.getEventId(),
+  event.getMessage());
+  HMSHandler.getMSForConf(hiveConf).addNotificationEvent(event);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9a47cf9f/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 07eca38..80b1e98 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ 

hive git commit: HIVE-15126: Branch-1.2: Fix TestCliDriver.join_merge_multi_expressions.q (Vaibhav Gumashta reviewed by Daniel Dai)

2017-03-19 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 8bc7daeb0 -> 07c86120e


HIVE-15126: Branch-1.2: Fix TestCliDriver.join_merge_multi_expressions.q 
(Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/07c86120
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/07c86120
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/07c86120

Branch: refs/heads/branch-1.2
Commit: 07c86120e249153475b68f4ca1b40ec62b3ac1a2
Parents: 8bc7dae
Author: Vaibhav Gumashta 
Authored: Sun Mar 19 13:46:59 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Sun Mar 19 13:46:59 2017 -0700

--
 .../join_merge_multi_expressions.q.out  | 46 
 1 file changed, 29 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/07c86120/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out 
b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
index a8bd4df..b73643e 100644
--- a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
+++ b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
@@ -21,42 +21,54 @@ STAGE PLANS:
 Filter Operator
   predicate: key is not null (type: boolean)
   Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Output Operator
-key expressions: key (type: string), hr (type: string)
-sort order: ++
-Map-reduce partition columns: key (type: string), hr (type: 
string)
+  Select Operator
+expressions: key (type: string), hr (type: string)
+outputColumnNames: _col0, _col1
 Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  key expressions: _col0 (type: string), _col1 (type: string)
+  sort order: ++
+  Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
   TableScan
-alias: b
+alias: a
 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE 
Column stats: NONE
 Filter Operator
   predicate: key is not null (type: boolean)
   Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Output Operator
-key expressions: key (type: string), hr (type: string)
-sort order: ++
-Map-reduce partition columns: key (type: string), hr (type: 
string)
+  Select Operator
+expressions: key (type: string), hr (type: string)
+outputColumnNames: _col0, _col1
 Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  key expressions: _col0 (type: string), _col1 (type: string)
+  sort order: ++
+  Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
   TableScan
-alias: c
+alias: a
 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE 
Column stats: NONE
 Filter Operator
   predicate: key is not null (type: boolean)
   Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Output Operator
-key expressions: key (type: string), hr (type: string)
-sort order: ++
-Map-reduce partition columns: key (type: string), hr (type: 
string)
+  Select Operator
+expressions: key (type: string), hr (type: string)
+outputColumnNames: _col0, _col1
 Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  key expressions: _col0 (type: string), _col1 (type: string)
+  sort order: ++
+  Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
   Reduce Operator Tree:
 

hive git commit: Revert "HIVE-15126: Branch-1.2: Fix TestCliDriver.join_merge_multi_expressions.q (Vaibhav Gumashta reviewed by Daniel Dai)"

2017-03-19 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 2efcf9a31 -> 8bc7daeb0


Revert "HIVE-15126: Branch-1.2: Fix 
TestCliDriver.join_merge_multi_expressions.q (Vaibhav Gumashta reviewed by 
Daniel Dai)"

This reverts commit 2efcf9a31fc3214a04745ae2352dfa17ae2dc0c5.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8bc7daeb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8bc7daeb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8bc7daeb

Branch: refs/heads/branch-1.2
Commit: 8bc7daeb04ee3b5d1453bccd2ed6c43706eed854
Parents: 2efcf9a
Author: Vaibhav Gumashta 
Authored: Sat Mar 18 23:43:35 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Sat Mar 18 23:43:35 2017 -0700

--
 .../join_merge_multi_expressions.q.out  | 46 
 1 file changed, 17 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8bc7daeb/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out 
b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
index b73643e..a8bd4df 100644
--- a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
+++ b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
@@ -21,54 +21,42 @@ STAGE PLANS:
 Filter Operator
   predicate: key is not null (type: boolean)
   Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: key (type: string), hr (type: string)
-outputColumnNames: _col0, _col1
+  Reduce Output Operator
+key expressions: key (type: string), hr (type: string)
+sort order: ++
+Map-reduce partition columns: key (type: string), hr (type: 
string)
 Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col1 (type: string)
-  sort order: ++
-  Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
-  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
   TableScan
-alias: a
+alias: b
 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE 
Column stats: NONE
 Filter Operator
   predicate: key is not null (type: boolean)
   Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: key (type: string), hr (type: string)
-outputColumnNames: _col0, _col1
+  Reduce Output Operator
+key expressions: key (type: string), hr (type: string)
+sort order: ++
+Map-reduce partition columns: key (type: string), hr (type: 
string)
 Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col1 (type: string)
-  sort order: ++
-  Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
-  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
   TableScan
-alias: a
+alias: c
 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE 
Column stats: NONE
 Filter Operator
   predicate: key is not null (type: boolean)
   Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: key (type: string), hr (type: string)
-outputColumnNames: _col0, _col1
+  Reduce Output Operator
+key expressions: key (type: string), hr (type: string)
+sort order: ++
+Map-reduce partition columns: key (type: string), hr (type: 
string)
 Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col1 (type: string)
-  sort order: ++
-  Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
-  Statistics: Num rows: 1000 Data size: 

hive git commit: HIVE-15126: Branch-1.2: Fix TestCliDriver.join_merge_multi_expressions.q (Vaibhav Gumashta reviewed by Daniel Dai)

2017-03-19 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 d3b88022a -> 2efcf9a31


HIVE-15126: Branch-1.2: Fix TestCliDriver.join_merge_multi_expressions.q 
(Vaibhav Gumashta reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2efcf9a3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2efcf9a3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2efcf9a3

Branch: refs/heads/branch-1.2
Commit: 2efcf9a31fc3214a04745ae2352dfa17ae2dc0c5
Parents: d3b8802
Author: Vaibhav Gumashta 
Authored: Sat Mar 18 23:41:27 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Sat Mar 18 23:41:27 2017 -0700

--
 .../join_merge_multi_expressions.q.out  | 46 
 1 file changed, 29 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2efcf9a3/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out 
b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
index a8bd4df..b73643e 100644
--- a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
+++ b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out
@@ -21,42 +21,54 @@ STAGE PLANS:
 Filter Operator
   predicate: key is not null (type: boolean)
   Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Output Operator
-key expressions: key (type: string), hr (type: string)
-sort order: ++
-Map-reduce partition columns: key (type: string), hr (type: 
string)
+  Select Operator
+expressions: key (type: string), hr (type: string)
+outputColumnNames: _col0, _col1
 Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  key expressions: _col0 (type: string), _col1 (type: string)
+  sort order: ++
+  Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
   TableScan
-alias: b
+alias: a
 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE 
Column stats: NONE
 Filter Operator
   predicate: key is not null (type: boolean)
   Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Output Operator
-key expressions: key (type: string), hr (type: string)
-sort order: ++
-Map-reduce partition columns: key (type: string), hr (type: 
string)
+  Select Operator
+expressions: key (type: string), hr (type: string)
+outputColumnNames: _col0, _col1
 Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  key expressions: _col0 (type: string), _col1 (type: string)
+  sort order: ++
+  Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
   TableScan
-alias: c
+alias: a
 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE 
Column stats: NONE
 Filter Operator
   predicate: key is not null (type: boolean)
   Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Output Operator
-key expressions: key (type: string), hr (type: string)
-sort order: ++
-Map-reduce partition columns: key (type: string), hr (type: 
string)
+  Select Operator
+expressions: key (type: string), hr (type: string)
+outputColumnNames: _col0, _col1
 Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  key expressions: _col0 (type: string), _col1 (type: string)
+  sort order: ++
+  Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
   Reduce Operator Tree:
 

hive git commit: HIVE-12469: Bump Commons-Collections dependency from 3.2.1 to 3.2.2. to address vulnerability (Ashutosh Chauhan reviewed by Sergio Peña, Reuben Kuhnert)

2017-03-17 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 229c512e6 -> d3b88022a


HIVE-12469: Bump Commons-Collections dependency from 3.2.1 to 3.2.2. to address 
vulnerability (Ashutosh Chauhan reviewed by Sergio Peña, Reuben Kuhnert)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d3b88022
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d3b88022
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d3b88022

Branch: refs/heads/branch-1.2
Commit: d3b88022a03eead63bc6e4b67a6f8a8864d757e6
Parents: 229c512
Author: Vaibhav Gumashta 
Authored: Fri Mar 17 13:43:04 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Mar 17 13:43:04 2017 -0700

--
 pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d3b88022/pom.xml
--
diff --git a/pom.xml b/pom.xml
index beacfc8..06d0b77 100644
--- a/pom.xml
+++ b/pom.xml
@@ -107,6 +107,7 @@
 3.2.9
 1.2
 1.4
+3.2.2
 1.4.1
 1.1
 3.0.1
@@ -300,6 +301,11 @@
 ${commons-codec.version}
   
   
+commons-collections
+commons-collections
+${commons-collections.version}
+  
+  
 commons-httpclient
 commons-httpclient
 ${commons-httpclient.version}



hive git commit: HIVE-13017: Child process of HiveServer2 fails to get delegation token from non default FileSystem (Sushanth Sowmyan reviewed by Thejas Nair)

2017-03-17 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 66fd257c6 -> 229c512e6


HIVE-13017: Child process of HiveServer2 fails to get delegation token from non 
default FileSystem (Sushanth Sowmyan reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/229c512e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/229c512e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/229c512e

Branch: refs/heads/branch-1.2
Commit: 229c512e625ff2e496564aed0f83b431df3c0b33
Parents: 66fd257
Author: Vaibhav Gumashta 
Authored: Fri Mar 17 13:20:37 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Mar 17 13:20:37 2017 -0700

--
 .../hadoop/hive/ql/exec/SecureCmdDoAs.java  | 20 +++-
 1 file changed, 19 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/229c512e/ql/src/java/org/apache/hadoop/hive/ql/exec/SecureCmdDoAs.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SecureCmdDoAs.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/SecureCmdDoAs.java
index 974c74e..e1228cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SecureCmdDoAs.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SecureCmdDoAs.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.Map;
 
 import org.apache.hadoop.fs.FileSystem;
@@ -28,6 +30,8 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * SecureCmdDoAs - Helper class for setting parameters and env necessary for
@@ -36,6 +40,9 @@ import org.apache.hadoop.security.UserGroupInformation;
  *
  */
 public class SecureCmdDoAs {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(SecureCmdDoAs.class);
+
   private final Path tokenPath;
   private final File tokenFile;
 
@@ -45,7 +52,18 @@ public class SecureCmdDoAs {
 String uname = UserGroupInformation.getLoginUser().getShortUserName();
 FileSystem fs = FileSystem.get(conf);
 Credentials cred = new Credentials();
-ShimLoader.getHadoopShims().addDelegationTokens(fs, cred, uname);
+
+ShimLoader.getHadoopShims().addDelegationTokens(fs, cred, uname); // ask 
default fs first
+for (String uri : conf.getStringCollection("mapreduce.job.hdfs-servers")) {
+  try {
+ShimLoader.getHadoopShims().addDelegationTokens(
+FileSystem.get(new URI(uri), conf),
+cred, uname);
+  } catch (URISyntaxException e) {
+LOG.warn("Invalid URI in mapreduce.job.hdfs-servers:["+uri+"], 
ignoring.", e);
+  }
+}
+
 tokenFile = File.createTempFile("hive_hadoop_delegation_token", null);
 tokenPath = new Path(tokenFile.toURI());
 



hive git commit: HIVE-13020: Hive Metastore and HiveServer2 to Zookeeper fails with IBM JDK (Greg Senia reviewed by Thejas Nair)

2017-03-17 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 1be6f1514 -> 66fd257c6


HIVE-13020: Hive Metastore and HiveServer2 to Zookeeper fails with IBM JDK 
(Greg Senia reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/66fd257c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/66fd257c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/66fd257c

Branch: refs/heads/branch-1.2
Commit: 66fd257c6b1b2f1ecdf243045329197eeba7858a
Parents: 1be6f15
Author: Vaibhav Gumashta 
Authored: Fri Mar 17 13:13:07 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Mar 17 13:13:07 2017 -0700

--
 .../org/apache/hadoop/hive/shims/Utils.java | 20 +++-
 1 file changed, 15 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/66fd257c/shims/common/src/main/java/org/apache/hadoop/hive/shims/Utils.java
--
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/Utils.java 
b/shims/common/src/main/java/org/apache/hadoop/hive/shims/Utils.java
index b898343..ca2bcf1 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/Utils.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/Utils.java
@@ -39,6 +39,9 @@ import org.apache.zookeeper.client.ZooKeeperSaslClient;
 
 public class Utils {
 
+  private static final boolean IBM_JAVA = System.getProperty("java.vendor")
+  .contains("IBM");
+
   public static UserGroupInformation getUGI() throws LoginException, 
IOException {
 String doAs = System.getenv("HADOOP_USER_NAME");
 if(doAs != null && doAs.length() > 0) {
@@ -143,6 +146,8 @@ public class Utils {
*/
   private static class JaasConfiguration extends 
javax.security.auth.login.Configuration {
 // Current installed Configuration
+private static final boolean IBM_JAVA = System.getProperty("java.vendor")
+  .contains("IBM");
 private final javax.security.auth.login.Configuration baseConfig = 
javax.security.auth.login.Configuration
 .getConfiguration();
 private final String loginContextName;
@@ -159,11 +164,16 @@ public class Utils {
 public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
   if (loginContextName.equals(appName)) {
 Map krbOptions = new HashMap();
-krbOptions.put("doNotPrompt", "true");
-krbOptions.put("storeKey", "true");
-krbOptions.put("useKeyTab", "true");
-krbOptions.put("principal", principal);
-krbOptions.put("keyTab", keyTabFile);
+if (IBM_JAVA) {
+   krbOptions.put("credsType", "both");
+   krbOptions.put("useKeytab", keyTabFile);
+   } else {
+   krbOptions.put("doNotPrompt", "true");
+   krbOptions.put("storeKey", "true");
+   krbOptions.put("useKeyTab", "true");
+   krbOptions.put("keyTab", keyTabFile);
+   }
+   krbOptions.put("principal", principal);
 krbOptions.put("refreshKrb5Config", "true");
 AppConfigurationEntry hiveZooKeeperClientEntry = new 
AppConfigurationEntry(
 KerberosUtil.getKrb5LoginModuleName(), 
LoginModuleControlFlag.REQUIRED, krbOptions);



hive git commit: HIVE-14210: ExecDriver should call jobclient.close() to trigger cleanup (Thomas Friedrich reviewed by Sergey Shelukhin)

2017-03-17 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/branch-1.2 643e9c0f0 -> 1be6f1514


HIVE-14210: ExecDriver should call jobclient.close() to trigger cleanup (Thomas 
Friedrich reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1be6f151
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1be6f151
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1be6f151

Branch: refs/heads/branch-1.2
Commit: 1be6f15142e9c71b27391af259e93e56cfc21949
Parents: 643e9c0
Author: Vaibhav Gumashta 
Authored: Fri Mar 17 12:52:30 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Fri Mar 17 12:52:30 2017 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1be6f151/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index b019c31..936a103 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -211,6 +211,7 @@ public class ExecDriver extends Task implements 
Serializable, Hadoop
 Context ctx = driverContext.getCtx();
 boolean ctxCreated = false;
 Path emptyScratchDir;
+JobClient jc = null;
 
 MapWork mWork = work.getMapWork();
 ReduceWork rWork = work.getReduceWork();
@@ -398,7 +399,7 @@ public class ExecDriver extends Task implements 
Serializable, Hadoop
 HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
   }
   LOG.error(job.get("mapreduce.framework.name"));
-  JobClient jc = new JobClient(job);
+  jc = new JobClient(job);
   // make this client wait if job tracker is not behaving well.
   Throttle.checkJobTracker(job, LOG);
 
@@ -465,6 +466,9 @@ public class ExecDriver extends Task implements 
Serializable, Hadoop
   HadoopJobExecHelper.runningJobs.remove(rj);
   jobID = rj.getID().toString();
 }
+if (jc!=null) {
+  jc.close();
+}
   } catch (Exception e) {
   }
 }



hive git commit: HIVE-16115: Stop printing progress info from operation logs with beeline progress bar (Anishek Agarwal reviewed by Vaibhav Gumashta)

2017-03-13 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 37ed5aa5b -> ba4f6e7b1


HIVE-16115: Stop printing progress info from operation logs with beeline 
progress bar (Anishek Agarwal reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ba4f6e7b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ba4f6e7b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ba4f6e7b

Branch: refs/heads/master
Commit: ba4f6e7b193ef701ee426fc2b271350c05d4fb5c
Parents: 37ed5aa
Author: Vaibhav Gumashta 
Authored: Mon Mar 13 00:41:02 2017 -0700
Committer: Vaibhav Gumashta 
Committed: Mon Mar 13 00:41:02 2017 -0700

--
 itests/hive-unit/pom.xml|  12 -
 .../hive/beeline/TestBeeLineWithArgs.java   | 313 ---
 .../ql/exec/tez/monitoring/RenderStrategy.java  |  22 +-
 .../org/apache/hive/service/cli/CLIService.java |   8 +-
 4 files changed, 218 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ba4f6e7b/itests/hive-unit/pom.xml
--
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index 6a190d1..789192b 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -138,12 +138,6 @@
 
 
   org.apache.hive
-  hive-jdbc
-  ${project.version}
-  test
-  
-
-  org.apache.hive
   hive-metastore
   ${project.version}
   tests
@@ -382,12 +376,6 @@
   true
 
 
-  org.apache.hadoop
-  hadoop-yarn-api
-  ${hadoop.version}
-  test
-
-
   org.apache.curator
   curator-test
   ${curator.version}

http://git-wip-us.apache.org/repos/asf/hive/blob/ba4f6e7b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
index 42ef280..650c4b7 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
@@ -20,6 +20,7 @@ package org.apache.hive.beeline;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayOutputStream;
@@ -34,12 +35,17 @@ import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import com.google.common.base.Function;
+import com.google.common.collect.Lists;
+import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -76,6 +82,7 @@ public class TestBeeLineWithArgs {
 argList.add(userName);
 return argList;
   }
+
   /**
* Start up a local Hive Server 2 for these tests
*/
@@ -119,7 +126,7 @@ public class TestBeeLineWithArgs {
 try {
   stmt.execute("drop table " + tableName);
 } catch (Exception ex) {
-  fail(ex.toString());
+  fail(ex.toString() + " " + ExceptionUtils.getStackTrace(ex));
 }
 
 // create table
@@ -150,7 +157,8 @@ public class TestBeeLineWithArgs {
* @return The stderr and stdout from running the script
* @throws Throwable
*/
-  private String testCommandLineScript(List argList, InputStream 
inputStream, OutStream streamType)
+  private static String testCommandLineScript(List argList, 
InputStream inputStream,
+  OutStream streamType)
   throws Throwable {
 BeeLine beeLine = new BeeLine();
 ByteArrayOutputStream os = new ByteArrayOutputStream();
@@ -177,7 +185,7 @@ public class TestBeeLineWithArgs {
* Attempt to execute a simple script file with the -f and -i option to
* BeeLine to test for presence of an expected pattern in the output (stdout
* or stderr), fail if not found. Print PASSED or FAILED
-   * 
+   *
* @param expectedRegex
*  Text to look for in command output (stdout)
* @param shouldMatch
@@ -185,9 +193,11 @@ public class TestBeeLineWithArgs {
* @throws Exception
*   on command execution error
*/
-  private void testScriptFile(String scriptText, String expectedRegex,
-  boolean shouldMatch, List argList) throws 

hive git commit: HIVE-16172: Switch to a fairness lock to synchronize HS2 thrift client (Tao Li reviewed by Vaibhav Gumashta)

2017-03-10 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 9cccb9556 -> 7071db4b4


HIVE-16172: Switch to a fairness lock to synchronize HS2 thrift client (Tao Li 
reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7071db4b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7071db4b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7071db4b

Branch: refs/heads/master
Commit: 7071db4b466e76dc3d19d5db763ec4116d2638d3
Parents: 9cccb95
Author: Vaibhav Gumashta 
Authored: Fri Mar 10 09:13:27 2017 -0800
Committer: Vaibhav Gumashta 
Committed: Fri Mar 10 09:13:27 2017 -0800

--
 jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/7071db4b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
--
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index ed899c6..1695c5d 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -106,6 +106,7 @@ import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.concurrent.Executor;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantLock;
 
 /**
  * HiveConnection.
@@ -1491,6 +1492,7 @@ public class HiveConnection implements 
java.sql.Connection {
 
   private static class SynchronizedHandler implements InvocationHandler {
 private final TCLIService.Iface client;
+private final ReentrantLock lock = new ReentrantLock(true);
 
 SynchronizedHandler(TCLIService.Iface client) {
   this.client = client;
@@ -1500,9 +1502,8 @@ public class HiveConnection implements 
java.sql.Connection {
 public Object invoke(Object proxy, Method method, Object [] args)
 throws Throwable {
   try {
-synchronized (client) {
-  return method.invoke(client, args);
-}
+lock.lock();
+return method.invoke(client, args);
   } catch (InvocationTargetException e) {
 // all IFace APIs throw TException
 if (e.getTargetException() instanceof TException) {
@@ -1514,6 +1515,8 @@ public class HiveConnection implements 
java.sql.Connection {
 }
   } catch (Exception e) {
 throw new TException("Error in calling method " + method.getName(), e);
+  } finally {
+lock.unlock();
   }
 }
   }



hive git commit: HIVE-16106: Upgrade to Datanucleus 4.2.12 (Vaibhav Gumashta reviewed by Daniel Dai

2017-03-06 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master de2a5d63a -> 8841bbf13


HIVE-16106: Upgrade to Datanucleus 4.2.12 (Vaibhav Gumashta reviewed by Daniel 
Dai


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8841bbf1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8841bbf1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8841bbf1

Branch: refs/heads/master
Commit: 8841bbf13e04351019f67410ee0e6bf1490a72ee
Parents: de2a5d6
Author: Vaibhav Gumashta 
Authored: Mon Mar 6 14:34:24 2017 -0800
Committer: Vaibhav Gumashta 
Committed: Mon Mar 6 14:34:24 2017 -0800

--
 pom.xml | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8841bbf1/pom.xml
--
diff --git a/pom.xml b/pom.xml
index bfa66a1..d8b4cf3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -117,9 +117,9 @@
 1.7.7
 0.8.0.RELEASE
 1.10.0
-4.2.1
-4.1.6
-4.1.7
+4.2.4
+4.1.17
+4.1.19
 3.2.0-m3
 1.2
 1.4



hive git commit: HIVE-14901: HiveServer2: Use user supplied fetch size to determine #rows serialized in tasks (Norris Lee reviewed by Vaibhav Gumashta)

2017-03-06 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 22af0eff0 -> de2a5d63a


HIVE-14901: HiveServer2: Use user supplied fetch size to determine #rows 
serialized in tasks (Norris Lee reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/de2a5d63
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/de2a5d63
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/de2a5d63

Branch: refs/heads/master
Commit: de2a5d63af78ffb6ef97062b06b9f3eb9557e361
Parents: 22af0ef
Author: Vaibhav Gumashta 
Authored: Mon Mar 6 14:26:24 2017 -0800
Committer: Vaibhav Gumashta 
Committed: Mon Mar 6 14:26:54 2017 -0800

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  3 +-
 .../apache/hive/jdbc/TestJdbcWithMiniHS2.java   | 30 
 .../org/apache/hive/jdbc/HiveConnection.java| 10 +++
 .../serde2/thrift/ThriftJDBCBinarySerDe.java|  4 ++-
 .../org/apache/hive/service/cli/CLIService.java |  7 -
 .../hive/service/cli/CLIServiceClient.java  |  4 +--
 .../hive/service/cli/session/HiveSession.java   | 10 ++-
 .../service/cli/session/HiveSessionImpl.java| 24 
 .../service/cli/thrift/ThriftCLIService.java| 19 +++--
 9 files changed, 103 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/de2a5d63/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index a8aaa5c..676c527 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2619,7 +2619,7 @@ public class HiveConf extends Configuration {
 // TODO: Make use of this config to configure fetch size
 
HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE("hive.server2.thrift.resultset.max.fetch.size",
 1, "Max number of rows sent in one Fetch RPC call by the server to 
the client."),
-
HIVE_SERVER2_RESULTSET_DEFAULT_FETCH_SIZE("hive.server2.resultset.default.fetch.size",
 1,
+
HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE("hive.server2.thrift.resultset.default.fetch.size",
 1000,
 "The number of rows sent in one Fetch RPC call by the server to the 
client, if not\n" +
 "specified by the client."),
 HIVE_SERVER2_XSRF_FILTER_ENABLED("hive.server2.xsrf.filter.enabled",false,
@@ -4237,6 +4237,7 @@ public class HiveConf extends Configuration {
 "hive\\.parquet\\..*",
 "hive\\.ppd\\..*",
 "hive\\.prewarm\\..*",
+"hive\\.server2\\.thrift\\.resultset\\.default\\.fetch\\.size",
 "hive\\.server2\\.proxy\\.user",
 "hive\\.skewjoin\\..*",
 "hive\\.smbjoin\\..*",

http://git-wip-us.apache.org/repos/asf/hive/blob/de2a5d63/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index 3d4057b..afe23f8 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -1321,4 +1321,34 @@ public class TestJdbcWithMiniHS2 {
   fs.delete(testPath, true);
 }
   }
+
+  @Test
+  public void testFetchSize() throws Exception {
+// Test setting fetch size below max
+Connection fsConn = getConnection(miniHS2.getJdbcURL("default", 
"fetchSize=50", ""),
+  System.getProperty("user.name"), "bar");
+Statement stmt = fsConn.createStatement();
+stmt.execute("set hive.server2.thrift.resultset.serialize.in.tasks=true");
+int fetchSize = stmt.getFetchSize();
+assertEquals(50, fetchSize);
+stmt.close();
+fsConn.close();
+// Test setting fetch size above max
+fsConn = getConnection(
+  miniHS2.getJdbcURL(
+"default",
+"fetchSize=" + (miniHS2.getHiveConf().getIntVar(
+  HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE) + 1),
+""),
+  System.getProperty("user.name"), "bar");
+stmt = fsConn.createStatement();
+stmt.execute("set hive.server2.thrift.resultset.serialize.in.tasks=true");
+fetchSize = stmt.getFetchSize();
+assertEquals(
+  miniHS2.getHiveConf().getIntVar(
+HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE),
+  fetchSize);
+stmt.close();
+fsConn.close();
+  }
 }


[3/3] hive git commit: HIVE-15730: JDBC should use SQLFeatureNotSupportedException where appropriate instead of SQLException (Sankar Hariappan reviewed by Vaibhav Gumashta)

2017-02-09 Thread vgumashta
HIVE-15730: JDBC should use SQLFeatureNotSupportedException where appropriate 
instead of SQLException (Sankar Hariappan reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2429bb28
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2429bb28
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2429bb28

Branch: refs/heads/master
Commit: 2429bb28ac9640cb7693c2394e2f10cdefdfabc0
Parents: fc08e3b
Author: Vaibhav Gumashta 
Authored: Thu Feb 9 14:48:54 2017 -0800
Committer: Vaibhav Gumashta 
Committed: Thu Feb 9 14:48:54 2017 -0800

--
 .../org/apache/hive/jdbc/HiveBaseResultSet.java | 297 ++---
 .../apache/hive/jdbc/HiveCallableStatement.java | 421 ++-
 .../org/apache/hive/jdbc/HiveConnection.java|  63 +--
 .../org/apache/hive/jdbc/HiveDataSource.java|  12 +-
 .../apache/hive/jdbc/HiveDatabaseMetaData.java  | 215 +-
 .../apache/hive/jdbc/HivePreparedStatement.java |  73 ++--
 .../apache/hive/jdbc/HiveQueryResultSet.java|   7 +-
 .../apache/hive/jdbc/HiveResultSetMetaData.java |  19 +-
 .../org/apache/hive/jdbc/HiveStatement.java |  32 +-
 .../org/apache/hive/jdbc/HiveStatementTest.java |  14 +-
 10 files changed, 585 insertions(+), 568 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2429bb28/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
--
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
index 6d4b2b1..ade1900 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
@@ -35,6 +35,7 @@ import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
 import java.sql.RowId;
 import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
 import java.sql.SQLWarning;
 import java.sql.SQLXML;
 import java.sql.Statement;
@@ -67,23 +68,23 @@ public abstract class HiveBaseResultSet implements 
ResultSet {
   private TableSchema schema;
 
   public boolean absolute(int row) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public void afterLast() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public void beforeFirst() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public void cancelRowUpdates() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public void deleteRow() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public int findColumn(String columnName) throws SQLException {
@@ -106,23 +107,23 @@ public abstract class HiveBaseResultSet implements 
ResultSet {
   }
 
   public boolean first() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public Array getArray(int i) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public Array getArray(String colName) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public InputStream getAsciiStream(int columnIndex) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public InputStream getAsciiStream(String columnName) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
@@ -177,11 +178,11 @@ public abstract class HiveBaseResultSet implements 
ResultSet {
   }
 
   public Blob getBlob(int i) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public Blob getBlob(String colName) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public boolean 

[2/3] hive git commit: HIVE-15730: JDBC should use SQLFeatureNotSupportedException where appropriate instead of SQLException (Sankar Hariappan reviewed by Vaibhav Gumashta)

2017-02-09 Thread vgumashta
http://git-wip-us.apache.org/repos/asf/hive/blob/2429bb28/jdbc/src/java/org/apache/hive/jdbc/HiveCallableStatement.java
--
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveCallableStatement.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveCallableStatement.java
index d04f786..8947dc8 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveCallableStatement.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveCallableStatement.java
@@ -34,6 +34,7 @@ import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
 import java.sql.RowId;
 import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
 import java.sql.SQLWarning;
 import java.sql.SQLXML;
 import java.sql.Time;
@@ -63,7 +64,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public Array getArray(int i) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -74,7 +75,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public Array getArray(String parameterName) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -85,7 +86,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public BigDecimal getBigDecimal(int parameterIndex) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -96,7 +97,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public BigDecimal getBigDecimal(String parameterName) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -107,7 +108,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public BigDecimal getBigDecimal(int parameterIndex, int scale) throws 
SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -118,7 +119,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public Blob getBlob(int i) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -129,7 +130,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public Blob getBlob(String parameterName) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -140,7 +141,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public boolean getBoolean(int parameterIndex) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -151,7 +152,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public boolean getBoolean(String parameterName) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -162,7 +163,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public byte getByte(int parameterIndex) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -173,7 +174,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public byte getByte(String parameterName) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -184,7 +185,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   public byte[] getBytes(int parameterIndex) throws SQLException {
 // TODO Auto-generated method stub
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /*
@@ -195,7 +196,7 @@ public class HiveCallableStatement implements 
java.sql.CallableStatement {
 
   

[1/3] hive git commit: HIVE-15730: JDBC should use SQLFeatureNotSupportedException where appropriate instead of SQLException (Sankar Hariappan reviewed by Vaibhav Gumashta)

2017-02-09 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master fc08e3be5 -> 2429bb28a


http://git-wip-us.apache.org/repos/asf/hive/blob/2429bb28/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
--
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
index fa984f4..e3bdb68 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
@@ -23,6 +23,7 @@ import java.sql.DatabaseMetaData;
 import java.sql.ResultSet;
 import java.sql.RowIdLifetime;
 import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.jar.Attributes;
@@ -85,7 +86,7 @@ public class HiveDatabaseMetaData implements DatabaseMetaData 
{
   }
 
   public boolean allProceduresAreCallable() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public boolean allTablesAreSelectable() throws SQLException {
@@ -93,33 +94,33 @@ public class HiveDatabaseMetaData implements 
DatabaseMetaData {
   }
 
   public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public boolean dataDefinitionCausesTransactionCommit() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public boolean dataDefinitionIgnoredInTransactions() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public boolean deletesAreDetected(int type) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public ResultSet getAttributes(String catalog, String schemaPattern,
   String typeNamePattern, String attributeNamePattern) throws SQLException 
{
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public ResultSet getBestRowIdentifier(String catalog, String schema,
   String table, int scope, boolean nullable) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public String getCatalogSeparator() throws SQLException {
@@ -148,23 +149,23 @@ public class HiveDatabaseMetaData implements 
DatabaseMetaData {
   }
 
   public ResultSet getClientInfoProperties() throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public ResultSet getColumnPrivileges(String catalog, String schema,
   String table, String columnNamePattern) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public ResultSet getPseudoColumns(String catalog, String schemaPattern,
   String tableNamePattern, String columnNamePattern) throws SQLException {
 // JDK 1.7
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public boolean generatedKeyAlwaysReturned() throws SQLException {
 // JDK 1.7
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   /**
@@ -327,7 +328,7 @@ public class HiveDatabaseMetaData implements 
DatabaseMetaData {
 
   public ResultSet getExportedKeys(String catalog, String schema, String table)
   throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public String getExtraNameCharacters() throws SQLException {
@@ -337,7 +338,7 @@ public class HiveDatabaseMetaData implements 
DatabaseMetaData {
 
   public ResultSet getFunctionColumns(String arg0, String arg1, String arg2,
   String arg3) throws SQLException {
-throw new SQLException("Method not supported");
+throw new SQLFeatureNotSupportedException("Method not supported");
   }
 
   public ResultSet getFunctions(String catalogName, String schemaPattern, 
String functionNamePattern)
@@ -428,15 +429,15 @@ public class HiveDatabaseMetaData implements 
DatabaseMetaData {
   }
 
   public int 

hive git commit: HIVE-15648: Hive throws compilation error due to powermock.version not being present in root pom (Vaibhav Gumashta reviewed by Thejas Nair)

2017-01-17 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master f2dcdaa47 -> ec82b84f3


HIVE-15648: Hive throws compilation error due to powermock.version not being 
present in root pom (Vaibhav Gumashta reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ec82b84f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ec82b84f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ec82b84f

Branch: refs/heads/master
Commit: ec82b84f309119121dc55aa84c1b9b70e7a0c334
Parents: f2dcdaa
Author: Vaibhav Gumashta 
Authored: Wed Jan 18 00:41:31 2017 +0530
Committer: Vaibhav Gumashta 
Committed: Wed Jan 18 00:41:31 2017 +0530

--
 beeline/pom.xml | 1 -
 pom.xml | 1 +
 2 files changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ec82b84f/beeline/pom.xml
--
diff --git a/beeline/pom.xml b/beeline/pom.xml
index 58ca92e..9d30a9e 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -29,7 +29,6 @@
 
   
 ..
-1.6.6
   
 
   

http://git-wip-us.apache.org/repos/asf/hive/blob/ec82b84f/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 5d5e172..b499cd2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -202,6 +202,7 @@
 3.0.0
 0.6.0
 2.2.4
+1.6.6
   
 
   



hive git commit: HIVE-15366: REPL LOAD & DUMP support for incremental INSERT events (Vaibhav Gumashta reviewed by Sushanth Sowmyan)

2017-01-09 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 01e691c5c -> 2f501a8a0


HIVE-15366: REPL LOAD & DUMP support for incremental INSERT events (Vaibhav 
Gumashta reviewed by Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2f501a8a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2f501a8a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2f501a8a

Branch: refs/heads/master
Commit: 2f501a8a024bf25701f97f4621ceda9b080be95d
Parents: 01e691c
Author: Vaibhav Gumashta 
Authored: Mon Jan 9 13:05:47 2017 -0800
Committer: Vaibhav Gumashta 
Committed: Mon Jan 9 13:05:47 2017 -0800

--
 .../listener/TestDbNotificationListener.java| 27 ++
 .../hive/ql/TestReplicationScenarios.java   | 92 
 metastore/if/hive_metastore.thrift  |  4 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  4 +-
 .../metastore/api/InsertEventRequestData.java   | 40 -
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |  2 +-
 .../hive/metastore/events/InsertEvent.java  |  5 +-
 .../hive/metastore/messaging/InsertMessage.java |  4 +-
 .../metastore/messaging/MessageFactory.java |  3 +-
 .../messaging/json/JSONInsertMessage.java   | 27 +++---
 .../messaging/json/JSONMessageFactory.java  |  2 +-
 .../hadoop/hive/ql/exec/ReplCopyTask.java   | 14 +--
 .../apache/hadoop/hive/ql/metadata/Hive.java|  9 +-
 .../apache/hadoop/hive/ql/parse/EximUtil.java   | 30 +--
 .../hive/ql/parse/ExportSemanticAnalyzer.java   |  4 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |  4 +-
 .../ql/parse/ReplicationSemanticAnalyzer.java   | 47 +-
 17 files changed, 230 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2f501a8a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
--
diff --git 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
index 39356ae..4eabb24 100644
--- 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
+++ 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
@@ -913,7 +913,7 @@ public class TestDbNotificationListener {
 assertEquals(defaultDbName, event.getDbName());
 assertEquals(tblName, event.getTableName());
 // Parse the message field
-verifyInsertJSON(event, defaultDbName, tblName, false);
+verifyInsertJSON(event, defaultDbName, tblName);
   }
 
   @Test
@@ -967,7 +967,7 @@ public class TestDbNotificationListener {
 assertEquals(defaultDbName, event.getDbName());
 assertEquals(tblName, event.getTableName());
 // Parse the message field
-verifyInsertJSON(event, defaultDbName, tblName, false);
+verifyInsertJSON(event, defaultDbName, tblName);
 ObjectNode jsonTree = JSONMessageFactory.getJsonTree(event);
 LinkedHashMap partKeyValsFromNotif =
 JSONMessageFactory.getAsMap((ObjectNode) jsonTree.get("partKeyVals"),
@@ -1057,7 +1057,7 @@ public class TestDbNotificationListener {
 assertEquals(firstEventId + 3, event.getEventId());
 assertEquals(EventType.INSERT.toString(), event.getEventType());
 // Parse the message field
-verifyInsertJSON(event, defaultDbName, tblName, true);
+verifyInsertJSON(event, defaultDbName, tblName);
 
 event = rsp.getEvents().get(4);
 assertEquals(firstEventId + 5, event.getEventId());
@@ -1090,7 +1090,7 @@ public class TestDbNotificationListener {
 assertEquals(firstEventId + 3, event.getEventId());
 assertEquals(EventType.INSERT.toString(), event.getEventType());
 // Parse the message field
-verifyInsertJSON(event, null, sourceTblName, true);
+verifyInsertJSON(event, null, sourceTblName);
 
 event = rsp.getEvents().get(4);
 assertEquals(firstEventId + 5, event.getEventId());
@@ -1165,13 +1165,13 @@ public class TestDbNotificationListener {
 assertEquals(firstEventId + 4, event.getEventId());
 assertEquals(EventType.INSERT.toString(), event.getEventType());
 // Parse the message field
-verifyInsertJSON(event, null, tblName, true);
+verifyInsertJSON(event, null, tblName);
 
 event = rsp.getEvents().get(6);
 assertEquals(firstEventId + 7, event.getEventId());
 assertEquals(EventType.INSERT.toString(), event.getEventType());
 // Parse the message field
-verifyInsertJSON(event, null, tblName, true);
+verifyInsertJSON(event, null, tblName);
 
 event = 

hive git commit: HIVE-15466: REPL LOAD & DUMP support for incremental DROP_TABLE/DROP_PTN (Sushanth Sowmyan reviewed by Vaibhav Gumashta)

2016-12-20 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 5efd20c7f -> ab9b21920


HIVE-15466: REPL LOAD & DUMP support for incremental DROP_TABLE/DROP_PTN 
(Sushanth Sowmyan reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ab9b2192
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ab9b2192
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ab9b2192

Branch: refs/heads/master
Commit: ab9b21920c0b624822850e32197e7d6575fd5fb4
Parents: 5efd20c
Author: Vaibhav Gumashta 
Authored: Tue Dec 20 14:47:55 2016 -0800
Committer: Vaibhav Gumashta 
Committed: Tue Dec 20 14:47:55 2016 -0800

--
 .../hive/ql/TestReplicationScenarios.java   | 116 +++
 .../messaging/json/JSONMessageFactory.java  |   6 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java  |   2 +-
 .../ql/parse/ReplicationSemanticAnalyzer.java   | 347 +--
 4 files changed, 435 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ab9b2192/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
index 3ac5ba7..d2696be 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
@@ -21,11 +21,15 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.util.Shell;
+import org.apache.thrift.TException;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -41,6 +45,8 @@ import java.util.ArrayList;
 import java.util.List;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 
 public class TestReplicationScenarios {
 
@@ -54,6 +60,7 @@ public class TestReplicationScenarios {
   static boolean useExternalMS = false;
   static int msPort;
   static Driver driver;
+  static HiveMetaStoreClient metaStoreClient;
 
   protected static final Logger LOG = 
LoggerFactory.getLogger(TestReplicationScenarios.class);
   private ArrayList lastResults;
@@ -91,6 +98,7 @@ public class TestReplicationScenarios {
 
 driver = new Driver(hconf);
 SessionState.start(new CliSessionState(hconf));
+metaStoreClient = new HiveMetaStoreClient(hconf);
   }
 
   @AfterClass
@@ -285,6 +293,114 @@ public class TestReplicationScenarios {
 verifyResults(ptn_data_1);
 run("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=2");
 verifyResults(ptn_data_2);
+
+  }
+
+  @Test
+  public void testDrops() throws IOException {
+
+String testName = "drops";
+LOG.info("Testing "+testName);
+String dbName = testName + "_" + tid;
+
+run("CREATE DATABASE " + dbName);
+run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
+run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) 
STORED AS TEXTFILE");
+run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b 
string) STORED AS TEXTFILE");
+
+String[] unptn_data = new String[]{ "eleven" , "twelve" };
+String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"};
+String[] ptn_data_2 = new String[]{ "fifteen", "sixteen", "seventeen"};
+String[] empty = new String[]{};
+
+String unptn_locn = new Path(TEST_PATH , testName + 
"_unptn").toUri().getPath();
+String ptn_locn_1 = new Path(TEST_PATH , testName + 
"_ptn1").toUri().getPath();
+String ptn_locn_2 = new Path(TEST_PATH , testName + 
"_ptn2").toUri().getPath();
+
+createTestDataFile(unptn_locn, unptn_data);
+createTestDataFile(ptn_locn_1, ptn_data_1);
+createTestDataFile(ptn_locn_2, ptn_data_2);
+
+run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + 
dbName + ".unptned");
+run("SELECT * from " + dbName + ".unptned");
+verifyResults(unptn_data);
+run("LOAD DATA LOCAL INPATH '" 

[1/8] hive git commit: HIVE-15294: Capture additional metadata to replicate a simple insert at destination (Vaibhav Gumashta reviewed by Thejas Nair)

2016-12-19 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 12f5550ca -> bbd99ed60


http://git-wip-us.apache.org/repos/asf/hive/blob/bbd99ed6/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
--
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py 
b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 7927a46..2f1c3cf 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -11144,15 +11144,18 @@ class InsertEventRequestData:
   """
   Attributes:
- filesAdded
+   - filesAddedChecksum
   """
 
   thrift_spec = (
 None, # 0
 (1, TType.LIST, 'filesAdded', (TType.STRING,None), None, ), # 1
+(2, TType.LIST, 'filesAddedChecksum', (TType.STRING,None), None, ), # 2
   )
 
-  def __init__(self, filesAdded=None,):
+  def __init__(self, filesAdded=None, filesAddedChecksum=None,):
 self.filesAdded = filesAdded
+self.filesAddedChecksum = filesAddedChecksum
 
   def read(self, iprot):
 if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
@@ -11173,6 +11176,16 @@ class InsertEventRequestData:
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
+  elif fid == 2:
+if ftype == TType.LIST:
+  self.filesAddedChecksum = []
+  (_etype501, _size498) = iprot.readListBegin()
+  for _i502 in xrange(_size498):
+_elem503 = iprot.readString()
+self.filesAddedChecksum.append(_elem503)
+  iprot.readListEnd()
+else:
+  iprot.skip(ftype)
   else:
 iprot.skip(ftype)
   iprot.readFieldEnd()
@@ -11186,8 +11199,15 @@ class InsertEventRequestData:
 if self.filesAdded is not None:
   oprot.writeFieldBegin('filesAdded', TType.LIST, 1)
   oprot.writeListBegin(TType.STRING, len(self.filesAdded))
-  for iter498 in self.filesAdded:
-oprot.writeString(iter498)
+  for iter504 in self.filesAdded:
+oprot.writeString(iter504)
+  oprot.writeListEnd()
+  oprot.writeFieldEnd()
+if self.filesAddedChecksum is not None:
+  oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 2)
+  oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum))
+  for iter505 in self.filesAddedChecksum:
+oprot.writeString(iter505)
   oprot.writeListEnd()
   oprot.writeFieldEnd()
 oprot.writeFieldStop()
@@ -11202,6 +11222,7 @@ class InsertEventRequestData:
   def __hash__(self):
 value = 17
 value = (value * 31) ^ hash(self.filesAdded)
+value = (value * 31) ^ hash(self.filesAddedChecksum)
 return value
 
   def __repr__(self):
@@ -11340,10 +11361,10 @@ class FireEventRequest:
   elif fid == 5:
 if ftype == TType.LIST:
   self.partitionVals = []
-  (_etype502, _size499) = iprot.readListBegin()
-  for _i503 in xrange(_size499):
-_elem504 = iprot.readString()
-self.partitionVals.append(_elem504)
+  (_etype509, _size506) = iprot.readListBegin()
+  for _i510 in xrange(_size506):
+_elem511 = iprot.readString()
+self.partitionVals.append(_elem511)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -11376,8 +11397,8 @@ class FireEventRequest:
 if self.partitionVals is not None:
   oprot.writeFieldBegin('partitionVals', TType.LIST, 5)
   oprot.writeListBegin(TType.STRING, len(self.partitionVals))
-  for iter505 in self.partitionVals:
-oprot.writeString(iter505)
+  for iter512 in self.partitionVals:
+oprot.writeString(iter512)
   oprot.writeListEnd()
   oprot.writeFieldEnd()
 oprot.writeFieldStop()
@@ -11564,12 +11585,12 @@ class GetFileMetadataByExprResult:
   if fid == 1:
 if ftype == TType.MAP:
   self.metadata = {}
-  (_ktype507, _vtype508, _size506 ) = iprot.readMapBegin()
-  for _i510 in xrange(_size506):
-_key511 = iprot.readI64()
-_val512 = MetadataPpdResult()
-_val512.read(iprot)
-self.metadata[_key511] = _val512
+  (_ktype514, _vtype515, _size513 ) = iprot.readMapBegin()
+  for _i517 in xrange(_size513):
+_key518 = iprot.readI64()
+_val519 = MetadataPpdResult()
+_val519.read(iprot)
+self.metadata[_key518] = _val519
   iprot.readMapEnd()
 else:
   iprot.skip(ftype)
@@ -11591,9 +11612,9 @@ class GetFileMetadataByExprResult:
 if self.metadata is not None:
   oprot.writeFieldBegin('metadata', TType.MAP, 1)
   oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata))
-  for kiter513,viter514 in self.metadata.items():
-oprot.writeI64(kiter513)
-

[8/8] hive git commit: HIVE-15294: Capture additional metadata to replicate a simple insert at destination (Vaibhav Gumashta reviewed by Thejas Nair)

2016-12-19 Thread vgumashta
HIVE-15294: Capture additional metadata to replicate a simple insert at 
destination (Vaibhav Gumashta reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bbd99ed6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bbd99ed6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bbd99ed6

Branch: refs/heads/master
Commit: bbd99ed60e5708af3dc329b097d4b024f73041bd
Parents: 12f5550
Author: Vaibhav Gumashta 
Authored: Mon Dec 19 14:02:01 2016 -0800
Committer: Vaibhav Gumashta 
Committed: Mon Dec 19 14:02:01 2016 -0800

--
 .../listener/DbNotificationListener.java|   55 +-
 .../listener/TestDbNotificationListener.java|  955 +---
 metastore/if/hive_metastore.thrift  |4 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2054 
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  821 ---
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   13 +
 .../metastore/api/ClearFileMetadataRequest.java |   32 +-
 .../hive/metastore/api/ClientCapabilities.java  |   32 +-
 .../hive/metastore/api/FireEventRequest.java|   32 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../api/GetFileMetadataByExprRequest.java   |   32 +-
 .../api/GetFileMetadataByExprResult.java|   48 +-
 .../metastore/api/GetFileMetadataRequest.java   |   32 +-
 .../metastore/api/GetFileMetadataResult.java|   44 +-
 .../hive/metastore/api/GetTablesRequest.java|   32 +-
 .../hive/metastore/api/GetTablesResult.java |   36 +-
 .../metastore/api/InsertEventRequestData.java   |  181 +-
 .../metastore/api/PutFileMetadataRequest.java   |   64 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 2220 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1250 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  291 ++-
 .../hive_metastore/ThriftHiveMetastore.py   |  842 +++
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  185 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |4 +-
 .../hadoop/hive/metastore/HiveMetaStore.java|   28 +-
 .../hive/metastore/events/InsertEvent.java  |   36 +-
 .../metastore/messaging/MessageFactory.java |   22 +-
 .../messaging/json/JSONInsertMessage.java   |   54 +-
 .../messaging/json/JSONMessageFactory.java  |   36 +
 .../apache/hadoop/hive/ql/metadata/Hive.java|   56 +-
 .../apache/hadoop/fs/ProxyLocalFileSystem.java  |  104 +-
 31 files changed, 5202 insertions(+), 4429 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/bbd99ed6/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
--
diff --git 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
index 119801f..8d29bfc 100644
--- 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
+++ 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hive.metastore.events.InsertEvent;
 import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
 import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
-import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -105,6 +104,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
* @param tableEvent table event.
* @throws org.apache.hadoop.hive.metastore.api.MetaException
*/
+  @Override
   public void onConfigChange(ConfigChangeEvent tableEvent) throws 
MetaException {
 String key = tableEvent.getKey();
 if 
(key.equals(HiveConf.ConfVars.METASTORE_EVENT_DB_LISTENER_TTL.toString())) {
@@ -122,6 +122,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
* @param tableEvent table event.
* @throws MetaException
*/
+  @Override
   public void onCreateTable(CreateTableEvent tableEvent) throws MetaException {
 Table t = tableEvent.getTable();
 NotificationEvent event =
@@ -129,13 +130,14 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildCreateTableMessage(t).toString());
 event.setDbName(t.getDbName());
 event.setTableName(t.getTableName());
-enqueue(event);
+process(event);
   }
 
   /**
* @param tableEvent table event.
* @throws MetaException
*/
+  @Override
   public void 

[3/8] hive git commit: HIVE-15294: Capture additional metadata to replicate a simple insert at destination (Vaibhav Gumashta reviewed by Thejas Nair)

2016-12-19 Thread vgumashta
http://git-wip-us.apache.org/repos/asf/hive/blob/bbd99ed6/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 0087205..9bfc2b2 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -11032,14 +11032,14 @@ class ThriftHiveMetastore_get_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size590 = 0;
-$_etype593 = 0;
-$xfer += $input->readListBegin($_etype593, $_size590);
-for ($_i594 = 0; $_i594 < $_size590; ++$_i594)
+$_size597 = 0;
+$_etype600 = 0;
+$xfer += $input->readListBegin($_etype600, $_size597);
+for ($_i601 = 0; $_i601 < $_size597; ++$_i601)
 {
-  $elem595 = null;
-  $xfer += $input->readString($elem595);
-  $this->success []= $elem595;
+  $elem602 = null;
+  $xfer += $input->readString($elem602);
+  $this->success []= $elem602;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -11075,9 +11075,9 @@ class ThriftHiveMetastore_get_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter596)
+  foreach ($this->success as $iter603)
   {
-$xfer += $output->writeString($iter596);
+$xfer += $output->writeString($iter603);
   }
 }
 $output->writeListEnd();
@@ -11208,14 +11208,14 @@ class ThriftHiveMetastore_get_all_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size597 = 0;
-$_etype600 = 0;
-$xfer += $input->readListBegin($_etype600, $_size597);
-for ($_i601 = 0; $_i601 < $_size597; ++$_i601)
+$_size604 = 0;
+$_etype607 = 0;
+$xfer += $input->readListBegin($_etype607, $_size604);
+for ($_i608 = 0; $_i608 < $_size604; ++$_i608)
 {
-  $elem602 = null;
-  $xfer += $input->readString($elem602);
-  $this->success []= $elem602;
+  $elem609 = null;
+  $xfer += $input->readString($elem609);
+  $this->success []= $elem609;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -11251,9 +11251,9 @@ class ThriftHiveMetastore_get_all_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter603)
+  foreach ($this->success as $iter610)
   {
-$xfer += $output->writeString($iter603);
+$xfer += $output->writeString($iter610);
   }
 }
 $output->writeListEnd();
@@ -12254,18 +12254,18 @@ class ThriftHiveMetastore_get_type_all_result {
 case 0:
   if ($ftype == TType::MAP) {
 $this->success = array();
-$_size604 = 0;
-$_ktype605 = 0;
-$_vtype606 = 0;
-$xfer += $input->readMapBegin($_ktype605, $_vtype606, $_size604);
-for ($_i608 = 0; $_i608 < $_size604; ++$_i608)
+$_size611 = 0;
+$_ktype612 = 0;
+$_vtype613 = 0;
+$xfer += $input->readMapBegin($_ktype612, $_vtype613, $_size611);
+for ($_i615 = 0; $_i615 < $_size611; ++$_i615)
 {
-  $key609 = '';
-  $val610 = new \metastore\Type();
-  $xfer += $input->readString($key609);
-  $val610 = new \metastore\Type();
-  $xfer += $val610->read($input);
-  $this->success[$key609] = $val610;
+  $key616 = '';
+  $val617 = new \metastore\Type();
+  $xfer += $input->readString($key616);
+  $val617 = new \metastore\Type();
+  $xfer += $val617->read($input);
+  $this->success[$key616] = $val617;
 }
 $xfer += $input->readMapEnd();
   } else {
@@ -12301,10 +12301,10 @@ class ThriftHiveMetastore_get_type_all_result {
   {
 $output->writeMapBegin(TType::STRING, TType::STRUCT, 
count($this->success));
 {
-  foreach ($this->success as $kiter611 => $viter612)
+  foreach ($this->success as $kiter618 => $viter619)
   {
-$xfer += $output->writeString($kiter611);
-$xfer += $viter612->write($output);
+$xfer += $output->writeString($kiter618);
+$xfer += $viter619->write($output);
  

[4/8] hive git commit: HIVE-15294: Capture additional metadata to replicate a simple insert at destination (Vaibhav Gumashta reviewed by Thejas Nair)

2016-12-19 Thread vgumashta
http://git-wip-us.apache.org/repos/asf/hive/blob/bbd99ed6/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index d41a99c..b7b7da7 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -29414,13 +29414,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list666 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list666.size);
-  String _elem667;
-  for (int _i668 = 0; _i668 < _list666.size; ++_i668)
+  org.apache.thrift.protocol.TList _list674 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list674.size);
+  String _elem675;
+  for (int _i676 = 0; _i676 < _list674.size; ++_i676)
   {
-_elem667 = iprot.readString();
-struct.success.add(_elem667);
+_elem675 = iprot.readString();
+struct.success.add(_elem675);
   }
   iprot.readListEnd();
 }
@@ -29455,9 +29455,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for (String _iter669 : struct.success)
+for (String _iter677 : struct.success)
 {
-  oprot.writeString(_iter669);
+  oprot.writeString(_iter677);
 }
 oprot.writeListEnd();
   }
@@ -29496,9 +29496,9 @@ public class ThriftHiveMetastore {
 if (struct.isSetSuccess()) {
   {
 oprot.writeI32(struct.success.size());
-for (String _iter670 : struct.success)
+for (String _iter678 : struct.success)
 {
-  oprot.writeString(_iter670);
+  oprot.writeString(_iter678);
 }
   }
 }
@@ -29513,13 +29513,13 @@ public class ThriftHiveMetastore {
 BitSet incoming = iprot.readBitSet(2);
 if (incoming.get(0)) {
   {
-org.apache.thrift.protocol.TList _list671 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
-struct.success = new ArrayList(_list671.size);
-String _elem672;
-for (int _i673 = 0; _i673 < _list671.size; ++_i673)
+org.apache.thrift.protocol.TList _list679 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
+struct.success = new ArrayList(_list679.size);
+String _elem680;
+for (int _i681 = 0; _i681 < _list679.size; ++_i681)
 {
-  _elem672 = iprot.readString();
-  struct.success.add(_elem672);
+  _elem680 = iprot.readString();
+  struct.success.add(_elem680);
 }
   }
   struct.setSuccessIsSet(true);
@@ -30173,13 +30173,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list674 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list674.size);
-  String _elem675;
-  for (int _i676 = 0; _i676 < _list674.size; ++_i676)
+  org.apache.thrift.protocol.TList _list682 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list682.size);
+  String _elem683;
+  for (int _i684 = 0; _i684 < _list682.size; ++_i684)
   {
-_elem675 = iprot.readString();
-struct.success.add(_elem675);
+_elem683 = iprot.readString();
+struct.success.add(_elem683);
   }
   iprot.readListEnd();
 }
@@ -30214,9 +30214,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for 

[5/8] hive git commit: HIVE-15294: Capture additional metadata to replicate a simple insert at destination (Vaibhav Gumashta reviewed by Thejas Nair)

2016-12-19 Thread vgumashta
http://git-wip-us.apache.org/repos/asf/hive/blob/bbd99ed6/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java
index 5cf880a..7aebede 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java
@@ -351,13 +351,13 @@ public class GetFileMetadataRequest implements 
org.apache.thrift.TBase(_list602.size);
-long _elem603;
-for (int _i604 = 0; _i604 < _list602.size; ++_i604)
+org.apache.thrift.protocol.TList _list610 = 
iprot.readListBegin();
+struct.fileIds = new ArrayList(_list610.size);
+long _elem611;
+for (int _i612 = 0; _i612 < _list610.size; ++_i612)
 {
-  _elem603 = iprot.readI64();
-  struct.fileIds.add(_elem603);
+  _elem611 = iprot.readI64();
+  struct.fileIds.add(_elem611);
 }
 iprot.readListEnd();
   }
@@ -383,9 +383,9 @@ public class GetFileMetadataRequest implements 
org.apache.thrift.TBase(_list607.size);
-long _elem608;
-for (int _i609 = 0; _i609 < _list607.size; ++_i609)
+org.apache.thrift.protocol.TList _list615 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, 
iprot.readI32());
+struct.fileIds = new ArrayList(_list615.size);
+long _elem616;
+for (int _i617 = 0; _i617 < _list615.size; ++_i617)
 {
-  _elem608 = iprot.readI64();
-  struct.fileIds.add(_elem608);
+  _elem616 = iprot.readI64();
+  struct.fileIds.add(_elem616);
 }
   }
   struct.setFileIdsIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/bbd99ed6/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java
index 8870c2f..fe83a6e 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java
@@ -433,15 +433,15 @@ public class GetFileMetadataResult implements 
org.apache.thrift.TBase(2*_map592.size);
-long _key593;
-ByteBuffer _val594;
-for (int _i595 = 0; _i595 < _map592.size; ++_i595)
+org.apache.thrift.protocol.TMap _map600 = iprot.readMapBegin();
+struct.metadata = new HashMap(2*_map600.size);
+long _key601;
+ByteBuffer _val602;
+for (int _i603 = 0; _i603 < _map600.size; ++_i603)
 {
-  _key593 = iprot.readI64();
-  _val594 = iprot.readBinary();
-  struct.metadata.put(_key593, _val594);
+  _key601 = iprot.readI64();
+  _val602 = iprot.readBinary();
+  struct.metadata.put(_key601, _val602);
 }
 iprot.readMapEnd();
   }
@@ -475,10 +475,10 @@ public class GetFileMetadataResult implements 
org.apache.thrift.TBase _iter596 : 
struct.metadata.entrySet())
+  for (Map.Entry _iter604 : 
struct.metadata.entrySet())
   {
-oprot.writeI64(_iter596.getKey());
-oprot.writeBinary(_iter596.getValue());
+oprot.writeI64(_iter604.getKey());
+oprot.writeBinary(_iter604.getValue());
   }
   oprot.writeMapEnd();
 }
@@ -506,10 +506,10 @@ public class GetFileMetadataResult implements 
org.apache.thrift.TBase _iter597 : struct.metadata.entrySet())
+for (Map.Entry _iter605 : struct.metadata.entrySet())
 {
-  oprot.writeI64(_iter597.getKey());
-  oprot.writeBinary(_iter597.getValue());
+  oprot.writeI64(_iter605.getKey());
+  oprot.writeBinary(_iter605.getValue());
 }
   }
   oprot.writeBool(struct.isSupported);
@@ -519,15 +519,15 @@ public class GetFileMetadataResult implements 
org.apache.thrift.TBase(2*_map598.size);
-long _key599;
-ByteBuffer _val600;
-for (int _i601 = 0; _i601 < _map598.size; ++_i601)

[2/8] hive git commit: HIVE-15294: Capture additional metadata to replicate a simple insert at destination (Vaibhav Gumashta reviewed by Thejas Nair)

2016-12-19 Thread vgumashta
http://git-wip-us.apache.org/repos/asf/hive/blob/bbd99ed6/metastore/src/gen/thrift/gen-php/metastore/Types.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php 
b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index 595c448..103cd86 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -15987,6 +15987,10 @@ class InsertEventRequestData {
* @var string[]
*/
   public $filesAdded = null;
+  /**
+   * @var string[]
+   */
+  public $filesAddedChecksum = null;
 
   public function __construct($vals=null) {
 if (!isset(self::$_TSPEC)) {
@@ -15999,12 +16003,23 @@ class InsertEventRequestData {
 'type' => TType::STRING,
 ),
   ),
+2 => array(
+  'var' => 'filesAddedChecksum',
+  'type' => TType::LST,
+  'etype' => TType::STRING,
+  'elem' => array(
+'type' => TType::STRING,
+),
+  ),
 );
 }
 if (is_array($vals)) {
   if (isset($vals['filesAdded'])) {
 $this->filesAdded = $vals['filesAdded'];
   }
+  if (isset($vals['filesAddedChecksum'])) {
+$this->filesAddedChecksum = $vals['filesAddedChecksum'];
+  }
 }
   }
 
@@ -16044,6 +16059,23 @@ class InsertEventRequestData {
 $xfer += $input->skip($ftype);
   }
   break;
+case 2:
+  if ($ftype == TType::LST) {
+$this->filesAddedChecksum = array();
+$_size501 = 0;
+$_etype504 = 0;
+$xfer += $input->readListBegin($_etype504, $_size501);
+for ($_i505 = 0; $_i505 < $_size501; ++$_i505)
+{
+  $elem506 = null;
+  $xfer += $input->readString($elem506);
+  $this->filesAddedChecksum []= $elem506;
+}
+$xfer += $input->readListEnd();
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
 default:
   $xfer += $input->skip($ftype);
   break;
@@ -16065,9 +16097,26 @@ class InsertEventRequestData {
   {
 $output->writeListBegin(TType::STRING, count($this->filesAdded));
 {
-  foreach ($this->filesAdded as $iter501)
+  foreach ($this->filesAdded as $iter507)
+  {
+$xfer += $output->writeString($iter507);
+  }
+}
+$output->writeListEnd();
+  }
+  $xfer += $output->writeFieldEnd();
+}
+if ($this->filesAddedChecksum !== null) {
+  if (!is_array($this->filesAddedChecksum)) {
+throw new TProtocolException('Bad type in structure.', 
TProtocolException::INVALID_DATA);
+  }
+  $xfer += $output->writeFieldBegin('filesAddedChecksum', TType::LST, 2);
+  {
+$output->writeListBegin(TType::STRING, 
count($this->filesAddedChecksum));
+{
+  foreach ($this->filesAddedChecksum as $iter508)
   {
-$xfer += $output->writeString($iter501);
+$xfer += $output->writeString($iter508);
   }
 }
 $output->writeListEnd();
@@ -16285,14 +16334,14 @@ class FireEventRequest {
 case 5:
   if ($ftype == TType::LST) {
 $this->partitionVals = array();
-$_size502 = 0;
-$_etype505 = 0;
-$xfer += $input->readListBegin($_etype505, $_size502);
-for ($_i506 = 0; $_i506 < $_size502; ++$_i506)
+$_size509 = 0;
+$_etype512 = 0;
+$xfer += $input->readListBegin($_etype512, $_size509);
+for ($_i513 = 0; $_i513 < $_size509; ++$_i513)
 {
-  $elem507 = null;
-  $xfer += $input->readString($elem507);
-  $this->partitionVals []= $elem507;
+  $elem514 = null;
+  $xfer += $input->readString($elem514);
+  $this->partitionVals []= $elem514;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -16343,9 +16392,9 @@ class FireEventRequest {
   {
 $output->writeListBegin(TType::STRING, count($this->partitionVals));
 {
-  foreach ($this->partitionVals as $iter508)
+  foreach ($this->partitionVals as $iter515)
   {
-$xfer += $output->writeString($iter508);
+$xfer += $output->writeString($iter515);
   }
 }
 $output->writeListEnd();
@@ -16573,18 +16622,18 @@ class GetFileMetadataByExprResult {
 case 1:
   if ($ftype == TType::MAP) {
 $this->metadata = array();
-$_size509 = 0;
-$_ktype510 = 0;
-$_vtype511 = 0;
-$xfer += $input->readMapBegin($_ktype510, $_vtype511, $_size509);
-for ($_i513 = 0; $_i513 < $_size509; ++$_i513)
+$_size516 = 0;
+$_ktype517 = 0;
+   

[7/8] hive git commit: HIVE-15294: Capture additional metadata to replicate a simple insert at destination (Vaibhav Gumashta reviewed by Thejas Nair)

2016-12-19 Thread vgumashta
http://git-wip-us.apache.org/repos/asf/hive/blob/bbd99ed6/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 481a2c0..42de24e 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size812;
-::apache::thrift::protocol::TType _etype815;
-xfer += iprot->readListBegin(_etype815, _size812);
-this->success.resize(_size812);
-uint32_t _i816;
-for (_i816 = 0; _i816 < _size812; ++_i816)
+uint32_t _size818;
+::apache::thrift::protocol::TType _etype821;
+xfer += iprot->readListBegin(_etype821, _size818);
+this->success.resize(_size818);
+uint32_t _i822;
+for (_i822 = 0; _i822 < _size818; ++_i822)
 {
-  xfer += iprot->readString(this->success[_i816]);
+  xfer += iprot->readString(this->success[_i822]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1286,10 +1286,10 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter817;
-  for (_iter817 = this->success.begin(); _iter817 != this->success.end(); 
++_iter817)
+  std::vector ::const_iterator _iter823;
+  for (_iter823 = this->success.begin(); _iter823 != this->success.end(); 
++_iter823)
   {
-xfer += oprot->writeString((*_iter817));
+xfer += oprot->writeString((*_iter823));
   }
   xfer += oprot->writeListEnd();
 }
@@ -1334,14 +1334,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 (*(this->success)).clear();
-uint32_t _size818;
-::apache::thrift::protocol::TType _etype821;
-xfer += iprot->readListBegin(_etype821, _size818);
-(*(this->success)).resize(_size818);
-uint32_t _i822;
-for (_i822 = 0; _i822 < _size818; ++_i822)
+uint32_t _size824;
+::apache::thrift::protocol::TType _etype827;
+xfer += iprot->readListBegin(_etype827, _size824);
+(*(this->success)).resize(_size824);
+uint32_t _i828;
+for (_i828 = 0; _i828 < _size824; ++_i828)
 {
-  xfer += iprot->readString((*(this->success))[_i822]);
+  xfer += iprot->readString((*(this->success))[_i828]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1458,14 +1458,14 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size823;
-::apache::thrift::protocol::TType _etype826;
-xfer += iprot->readListBegin(_etype826, _size823);
-this->success.resize(_size823);
-uint32_t _i827;
-for (_i827 = 0; _i827 < _size823; ++_i827)
+uint32_t _size829;
+::apache::thrift::protocol::TType _etype832;
+xfer += iprot->readListBegin(_etype832, _size829);
+this->success.resize(_size829);
+uint32_t _i833;
+for (_i833 = 0; _i833 < _size829; ++_i833)
 {
-  xfer += iprot->readString(this->success[_i827]);
+  xfer += iprot->readString(this->success[_i833]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1504,10 +1504,10 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter828;
-  for (_iter828 = this->success.begin(); _iter828 != this->success.end(); 
++_iter828)
+  std::vector ::const_iterator _iter834;
+  for (_iter834 = this->success.begin(); _iter834 != this->success.end(); 
++_iter834)
   {
-xfer += oprot->writeString((*_iter828));
+xfer += oprot->writeString((*_iter834));
   }
   xfer += oprot->writeListEnd();
 }
@@ 

[6/8] hive git commit: HIVE-15294: Capture additional metadata to replicate a simple insert at destination (Vaibhav Gumashta reviewed by Thejas Nair)

2016-12-19 Thread vgumashta
http://git-wip-us.apache.org/repos/asf/hive/blob/bbd99ed6/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index a74e28b..1311b20 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -16108,6 +16108,11 @@ void InsertEventRequestData::__set_filesAdded(const 
std::vector & v
   this->filesAdded = val;
 }
 
+void InsertEventRequestData::__set_filesAddedChecksum(const 
std::vector & val) {
+  this->filesAddedChecksum = val;
+__isset.filesAddedChecksum = true;
+}
+
 uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* 
iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -16150,6 +16155,26 @@ uint32_t 
InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr
   xfer += iprot->skip(ftype);
 }
 break;
+  case 2:
+if (ftype == ::apache::thrift::protocol::T_LIST) {
+  {
+this->filesAddedChecksum.clear();
+uint32_t _size656;
+::apache::thrift::protocol::TType _etype659;
+xfer += iprot->readListBegin(_etype659, _size656);
+this->filesAddedChecksum.resize(_size656);
+uint32_t _i660;
+for (_i660 = 0; _i660 < _size656; ++_i660)
+{
+  xfer += iprot->readBinary(this->filesAddedChecksum[_i660]);
+}
+xfer += iprot->readListEnd();
+  }
+  this->__isset.filesAddedChecksum = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
   default:
 xfer += iprot->skip(ftype);
 break;
@@ -16172,15 +16197,28 @@ uint32_t 
InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op
   xfer += oprot->writeFieldBegin("filesAdded", 
::apache::thrift::protocol::T_LIST, 1);
   {
 xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->filesAdded.size()));
-std::vector ::const_iterator _iter656;
-for (_iter656 = this->filesAdded.begin(); _iter656 != 
this->filesAdded.end(); ++_iter656)
+std::vector ::const_iterator _iter661;
+for (_iter661 = this->filesAdded.begin(); _iter661 != 
this->filesAdded.end(); ++_iter661)
 {
-  xfer += oprot->writeString((*_iter656));
+  xfer += oprot->writeString((*_iter661));
 }
 xfer += oprot->writeListEnd();
   }
   xfer += oprot->writeFieldEnd();
 
+  if (this->__isset.filesAddedChecksum) {
+xfer += oprot->writeFieldBegin("filesAddedChecksum", 
::apache::thrift::protocol::T_LIST, 2);
+{
+  xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->filesAddedChecksum.size()));
+  std::vector ::const_iterator _iter662;
+  for (_iter662 = this->filesAddedChecksum.begin(); _iter662 != 
this->filesAddedChecksum.end(); ++_iter662)
+  {
+xfer += oprot->writeBinary((*_iter662));
+  }
+  xfer += oprot->writeListEnd();
+}
+xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -16189,19 +16227,26 @@ uint32_t 
InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op
 void swap(InsertEventRequestData , InsertEventRequestData ) {
   using ::std::swap;
   swap(a.filesAdded, b.filesAdded);
+  swap(a.filesAddedChecksum, b.filesAddedChecksum);
+  swap(a.__isset, b.__isset);
 }
 
-InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& 
other657) {
-  filesAdded = other657.filesAdded;
+InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& 
other663) {
+  filesAdded = other663.filesAdded;
+  filesAddedChecksum = other663.filesAddedChecksum;
+  __isset = other663.__isset;
 }
-InsertEventRequestData& InsertEventRequestData::operator=(const 
InsertEventRequestData& other658) {
-  filesAdded = other658.filesAdded;
+InsertEventRequestData& InsertEventRequestData::operator=(const 
InsertEventRequestData& other664) {
+  filesAdded = other664.filesAdded;
+  filesAddedChecksum = other664.filesAddedChecksum;
+  __isset = other664.__isset;
   return *this;
 }
 void InsertEventRequestData::printTo(std::ostream& out) const {
   using ::apache::thrift::to_string;
   out << "InsertEventRequestData(";
   out << "filesAdded=" << to_string(filesAdded);
+  out << ", " << "filesAddedChecksum="; (__isset.filesAddedChecksum ? (out << 
to_string(filesAddedChecksum)) : (out << ""));
   out << ")";
 }
 
@@ -16275,13 +16320,13 @@ void swap(FireEventRequestData , 
FireEventRequestData ) {
   swap(a.__isset, b.__isset);
 }
 
-FireEventRequestData::FireEventRequestData(const FireEventRequestData& 
other659) {
-  insertData = other659.insertData;
-  __isset = 

hive git commit: HIVE-15333: Add a FetchTask to REPL DUMP plan for reading dump uri, last repl id as ResultSet (Vaibhav Gumashta reviewed by Sushanth Sowmyan, Thejas Nair)

2016-12-09 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 0ed01fdf8 -> 24f48f124


HIVE-15333: Add a FetchTask to REPL DUMP plan for reading dump uri, last repl 
id as ResultSet (Vaibhav Gumashta reviewed by Sushanth Sowmyan, Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/24f48f12
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/24f48f12
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/24f48f12

Branch: refs/heads/master
Commit: 24f48f12431dfa647f48ba9311676265c71c941f
Parents: 0ed01fd
Author: Vaibhav Gumashta 
Authored: Fri Dec 9 00:29:13 2016 -0800
Committer: Vaibhav Gumashta 
Committed: Fri Dec 9 00:29:13 2016 -0800

--
 .../hive/ql/TestReplicationScenarios.java   |  3 +-
 .../apache/hive/jdbc/TestJdbcWithMiniHS2.java   | 35 +
 .../hive/ql/parse/BaseSemanticAnalyzer.java | 35 -
 .../hive/ql/parse/DDLSemanticAnalyzer.java  | 28 ---
 .../apache/hadoop/hive/ql/parse/EximUtil.java   | 80 ++--
 .../ql/parse/ReplicationSemanticAnalyzer.java   | 27 +++
 .../clientnegative/authorization_import.q.out   |  2 +-
 .../exim_00_unsupported_schema.q.out|  2 +-
 .../apache/hadoop/fs/ProxyLocalFileSystem.java  | 19 +++--
 9 files changed, 135 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/24f48f12/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
index 95db9e8..9b7014b 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
@@ -286,7 +286,8 @@ public class TestReplicationScenarios {
 throw new RuntimeException(e);
   }
 }
-return (lastResults.get(rowNum).split("\\001"))[colNum];
+// Split around the 'tab' character
+return (lastResults.get(rowNum).split("\\t"))[colNum];
   }
 
   private void verifyResults(String[] data) throws IOException {

http://git-wip-us.apache.org/repos/asf/hive/blob/24f48f12/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index c84570b..3d4057b 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -28,10 +28,12 @@ import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Field;
 import java.lang.reflect.Modifier;
+import java.net.URI;
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.sql.Types;
@@ -1286,4 +1288,37 @@ public class TestJdbcWithMiniHS2 {
 }
 assertTrue("Rows returned from describe function", numRows > 0);
   }
+
+  @Test
+  public void testReplDumpResultSet() throws Exception {
+String tid =
+
TestJdbcWithMiniHS2.class.getCanonicalName().toLowerCase().replace('.', '_') + 
"_"
++ System.currentTimeMillis();
+String testPathName = System.getProperty("test.warehouse.dir", "/tmp") + 
Path.SEPARATOR + tid;
+Path testPath = new Path(testPathName);
+FileSystem fs = testPath.getFileSystem(new HiveConf());
+Statement stmt = conDefault.createStatement();
+try {
+  stmt.execute("set hive.repl.rootdir = " + testPathName);
+  ResultSet rs = stmt.executeQuery("repl dump " + testDbName);
+  ResultSetMetaData rsMeta = rs.getMetaData();
+  assertEquals(2, rsMeta.getColumnCount());
+  int numRows = 0;
+  while (rs.next()) {
+numRows++;
+URI uri = new URI(rs.getString(1));
+int notificationId = rs.getInt(2);
+assertNotNull(uri);
+assertEquals(testPath.toUri().getScheme(), uri.getScheme());
+assertEquals(testPath.toUri().getAuthority(), uri.getAuthority());
+// In test setup, we append '/next' to hive.repl.rootdir and use that 
as the dump location
+assertEquals(testPath.toUri().getPath() + "/next", uri.getPath());
+assertNotNull(notificationId);
+  }
+  assertEquals(1, numRows);
+} finally {
+  // 

hive git commit: HIVE-15332: REPL LOAD & DUMP support for incremental CREATE_TABLE/ADD_PTN (Sushanth Sowmyan reviewed by Vaibhav Gumashta)

2016-12-06 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 0c8edf053 -> 2f5889c9b


HIVE-15332: REPL LOAD & DUMP support for incremental CREATE_TABLE/ADD_PTN 
(Sushanth Sowmyan reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2f5889c9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2f5889c9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2f5889c9

Branch: refs/heads/master
Commit: 2f5889c9b94977b064ba614c89684404cbb9ca63
Parents: 0c8edf0
Author: Vaibhav Gumashta 
Authored: Tue Dec 6 11:16:12 2016 -0800
Committer: Vaibhav Gumashta 
Committed: Tue Dec 6 11:16:12 2016 -0800

--
 .../listener/DbNotificationListener.java|   1 +
 itests/hive-unit/pom.xml|   5 +
 .../hive/ql/TestReplicationScenarios.java   | 129 +++-
 .../hive/metastore/messaging/EventUtils.java|  22 ++
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  16 +
 .../apache/hadoop/hive/ql/metadata/Hive.java|  28 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |  19 +-
 .../ql/parse/ReplicationSemanticAnalyzer.java   | 320 +++
 .../hadoop/hive/ql/parse/ReplicationSpec.java   |   8 +
 .../hadoop/hive/ql/plan/AddPartitionDesc.java   |  22 ++
 .../hadoop/hive/ql/plan/CreateTableDesc.java|  21 ++
 11 files changed, 516 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2f5889c9/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
--
diff --git 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
index 494d01f..119801f 100644
--- 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
+++ 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
@@ -332,6 +332,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
   private void enqueue(NotificationEvent event) {
 if (rs != null) {
   synchronized(NOTIFICATION_TBL_LOCK) {
+LOG.debug("DbNotif:Enqueueing : 
{}:{}",event.getEventId(),event.getMessage());
 rs.addNotificationEvent(event);
   }
 } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/2f5889c9/itests/hive-unit/pom.xml
--
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index cd209b4..6a190d1 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -81,6 +81,11 @@
   ${project.version}
 
 
+  org.apache.hive.hcatalog
+  hive-hcatalog-server-extensions
+  ${project.version}
+
+
   org.apache.hive
   hive-it-util
   ${project.version}

http://git-wip-us.apache.org/repos/asf/hive/blob/2f5889c9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
index 01abe9b..95db9e8 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.util.Shell;
@@ -55,6 +56,7 @@ public class TestReplicationScenarios {
   static Driver driver;
 
   protected static final Logger LOG = 
LoggerFactory.getLogger(TestReplicationScenarios.class);
+  private ArrayList lastResults;
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -69,8 +71,8 @@ public class TestReplicationScenarios {
   WindowsPathUtil.convertPathsFromWindowsToHdfs(hconf);
 }
 
-//System.setProperty(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname,
-//DBNOTIF_LISTENER_CLASSNAME); // turn on db notification listener on 
metastore
+System.setProperty(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname,
+DBNOTIF_LISTENER_CLASSNAME); // turn on 

hive git commit: HIVE-15284: Add junit test to test replication scenarios (Sushanth Sowmyan reviewed by Vaibhav Gumashta)

2016-11-28 Thread vgumashta
Repository: hive
Updated Branches:
  refs/heads/master 1aebe9d54 -> 63bdfa687


HIVE-15284: Add junit test to test replication scenarios (Sushanth Sowmyan 
reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/63bdfa68
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/63bdfa68
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/63bdfa68

Branch: refs/heads/master
Commit: 63bdfa6870614b6ae930a715dd8711addd16d2b7
Parents: 1aebe9d
Author: Vaibhav Gumashta 
Authored: Mon Nov 28 12:28:09 2016 -0800
Committer: Vaibhav Gumashta 
Committed: Mon Nov 28 12:28:09 2016 -0800

--
 .../hive/ql/TestReplicationScenarios.java   | 238 +++
 .../ql/parse/ReplicationSemanticAnalyzer.java   |  47 ++--
 2 files changed, 267 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/63bdfa68/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
new file mode 100644
index 000..01abe9b
--- /dev/null
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
@@ -0,0 +1,238 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.util.Shell;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestReplicationScenarios {
+
+  final static String DBNOTIF_LISTENER_CLASSNAME = 
"org.apache.hive.hcatalog.listener.DbNotificationListener";
+  // FIXME : replace with hive copy once that is copied
+  final static String tid =
+  TestReplicationScenarios.class.getCanonicalName().replace('.','_') + "_" 
+ System.currentTimeMillis();
+  final static String TEST_PATH = 
System.getProperty("test.warehouse.dir","/tmp") + Path.SEPARATOR + tid;
+
+  static HiveConf hconf;
+  static boolean useExternalMS = false;
+  static int msPort;
+  static Driver driver;
+
+  protected static final Logger LOG = 
LoggerFactory.getLogger(TestReplicationScenarios.class);
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+hconf = new HiveConf(TestReplicationScenarios.class);
+String metastoreUri = 
System.getProperty("test."+HiveConf.ConfVars.METASTOREURIS.varname);
+if (metastoreUri != null) {
+  hconf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUri);
+  useExternalMS = true;
+  return;
+}
+if (Shell.WINDOWS) {
+  WindowsPathUtil.convertPathsFromWindowsToHdfs(hconf);
+}
+
+//System.setProperty(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname,
+//DBNOTIF_LISTENER_CLASSNAME); // turn on db notification listener on 
metastore
+msPort = MetaStoreUtils.startMetaStore();
+hconf.setVar(HiveConf.ConfVars.REPLDIR,TEST_PATH + "/hrepl/");
+hconf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:"
++ msPort);
+hconf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
+hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+

  1   2   3   4   >