hive git commit: HIVE-19222 : TestNegativeCliDriver tests are failing due to "java.lang.OutOfMemoryError: GC overhead limit exceeded" (Aihua Xu via Ashutosh Chauhan)
Repository: hive Updated Branches: refs/heads/branch-3 cfd43a33e -> bfd1e0c4f HIVE-19222 : TestNegativeCliDriver tests are failing due to "java.lang.OutOfMemoryError: GC overhead limit exceeded" (Aihua Xu via Ashutosh Chauhan) Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bfd1e0c4 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bfd1e0c4 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bfd1e0c4 Branch: refs/heads/branch-3 Commit: bfd1e0c4fcff6ed2efd7d905fd026cea225657a1 Parents: cfd43a3 Author: Aihua Xu Authored: Tue Apr 17 10:13:48 2018 -0700 Committer: Vineet Garg Committed: Thu Apr 19 22:51:55 2018 -0700 -- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/bfd1e0c4/pom.xml -- diff --git a/pom.xml b/pom.xml index 190f74c..32732f9 100644 --- a/pom.xml +++ b/pom.xml @@ -96,7 +96,7 @@ 1.0b3 3.3.0-release --Xmx1024m +-Xmx2048m 1.7 2.3 2.17
[1/2] hive git commit: HIVE-19155 : Day time saving cause Druid inserts to fail with org.apache.hive.druid.io.druid.java.util.common.UOE: Cannot add overlapping segments (Slim Bouguerra via Ashutosh C
Repository: hive Updated Branches: refs/heads/branch-3 34edad28c -> cfd43a33e HIVE-19155 : Day time saving cause Druid inserts to fail with org.apache.hive.druid.io.druid.java.util.common.UOE: Cannot add overlapping segments (Slim Bouguerra via Ashutosh Chauhan) Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a2e5d8bd Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a2e5d8bd Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a2e5d8bd Branch: refs/heads/branch-3 Commit: a2e5d8bde5a570fe8220895acbbc3b855e9e6e63 Parents: 34edad2 Author: Slim Bouguerra Authored: Fri Apr 13 17:48:38 2018 -0700 Committer: Vineet Garg Committed: Thu Apr 19 22:38:58 2018 -0700 -- .../hadoop/hive/druid/io/DruidRecordWriter.java | 30 +- .../clientpositive/druidmini_test_insert.q | 64 +++ .../druid/druidmini_test_insert.q.out | 533 +++ 3 files changed, 617 insertions(+), 10 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/a2e5d8bd/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java -- diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java index 7d2bb91..8ab34a8 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java @@ -29,6 +29,7 @@ import com.google.common.collect.Lists; import io.druid.data.input.Committer; import io.druid.data.input.InputRow; import io.druid.data.input.MapBasedInputRow; +import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.granularity.Granularity; import io.druid.segment.indexing.DataSchema; import io.druid.segment.indexing.RealtimeTuningConfig; @@ -110,7 +111,7 @@ public class DruidRecordWriter implements RecordWriterhttp://git-wip-us.apache.org/repos/asf/hive/blob/a2e5d8bd/ql/src/test/queries/clientpositive/druidmini_test_insert.q -- diff --git a/ql/src/test/queries/clientpositive/druidmini_test_insert.q b/ql/src/test/queries/clientpositive/druidmini_test_insert.q index 558e246..47199b9 100644 --- a/ql/src/test/queries/clientpositive/druidmini_test_insert.q +++ b/ql/src/test/queries/clientpositive/druidmini_test_insert.q @@ -51,3 +51,67 @@ SELECT cast (`ctimestamp1` as timestamp with local time zone) as `__time`, SELECT COUNT(*) FROM druid_alltypesorc; DROP TABLE druid_alltypesorc; + + +-- Day light saving time test insert into test + +create database druid_test_dst; +use druid_test_dst; + +create table test_base_table(`timecolumn` timestamp, `userid` string, `num_l` float); +insert into test_base_table values ('2015-03-08 00:00:00', 'i1-start', 4); +insert into test_base_table values ('2015-03-08 23:59:59', 'i1-end', 1); +insert into test_base_table values ('2015-03-09 00:00:00', 'i2-start', 4); +insert into test_base_table values ('2015-03-09 23:59:59', 'i2-end', 1); +insert into test_base_table values ('2015-03-10 00:00:00', 'i3-start', 2); +insert into test_base_table values ('2015-03-10 23:59:59', 'i3-end', 2); + +CREATE TABLE druid_test_table +STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' +TBLPROPERTIES ("druid.segment.granularity" = "DAY") +AS +select cast(`timecolumn` as timestamp with local time zone) as `__time`, `userid`, `num_l` FROM test_base_table; + +select * FROM druid_test_table; + +select * from druid_test_table where `__time` = cast('2015-03-08 00:00:00' as timestamp with local time zone); +select * from druid_test_table where `__time` = cast('2015-03-08 23:59:59' as timestamp with local time zone); + +select * from druid_test_table where `__time` = cast('2015-03-09 00:00:00' as timestamp with local time zone); +select * from druid_test_table where `__time` = cast('2015-03-09 23:59:59' as timestamp with local time zone); + +select * from druid_test_table where `__time` = cast('2015-03-10 00:00:00' as timestamp with local time zone); +select * from druid_test_table where `__time` = cast('2015-03-10 23:59:59' as timestamp with local time zone); + + +explain select * from druid_test_table where `__time` = cast('2015-03-08 00:00:00' as timestamp with local time zone); +explain select * from druid_test_table where `__time` = cast('2015-03-08 23:59:59' as timestamp with local time zone); + +explain select * from druid_test_table where `__time` = cast('2015-03-09 00:00:00' as timestamp with local time zone); +explain select * from druid_test_table where `__time` = cast('2015-03-09 23:59:59' as timestamp with local time zone); + +expl
[2/2] hive git commit: HIVE-19164 : TestMetastoreVersion failures (Vihang Karajgaonkar via Ashutosh Chauhan)
HIVE-19164 : TestMetastoreVersion failures (Vihang Karajgaonkar via Ashutosh Chauhan) Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cfd43a33 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cfd43a33 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cfd43a33 Branch: refs/heads/branch-3 Commit: cfd43a33e3e4d02dfec5d59c5f6ece68c3628d71 Parents: a2e5d8b Author: Vihang Karajgaonkar Authored: Tue Apr 17 09:56:15 2018 -0700 Committer: Vineet Garg Committed: Thu Apr 19 22:47:07 2018 -0700 -- .../hive/metastore/TestMetastoreVersion.java| 22 +--- 1 file changed, 10 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/cfd43a33/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java -- diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java index 6015405..4d26f3e 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java @@ -142,6 +142,7 @@ public class TestMetastoreVersion extends TestCase { ObjectStore.setSchemaVerified(false); hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION, true); +hiveConf = new HiveConf(this.getClass()); setVersion(hiveConf, metastoreSchemaInfo.getHiveSchemaVersion()); driver = DriverFactory.newDriver(hiveConf); CommandProcessorResponse proc = driver.run("show tables"); @@ -191,37 +192,34 @@ public class TestMetastoreVersion extends TestCase { } // write the given version to metastore - private String getVersion(HiveConf conf) throws HiveMetaException { + private String getVersion(HiveConf conf) throws Exception { return getMetaStoreVersion(); } // write the given version to metastore - private void setVersion(HiveConf conf, String version) throws HiveMetaException { + private void setVersion(HiveConf conf, String version) throws Exception { setMetaStoreVersion(version, "setVersion test"); } // Load the version stored in the metastore db - public String getMetaStoreVersion() throws HiveMetaException { -ObjectStore objStore = new ObjectStore(); -objStore.setConf(hiveConf); + public String getMetaStoreVersion() throws HiveMetaException, MetaException { +RawStore ms = HiveMetaStore.HMSHandler.getMSForConf(hiveConf); try { - return objStore.getMetaStoreSchemaVersion(); + return ms.getMetaStoreSchemaVersion(); } catch (MetaException e) { throw new HiveMetaException("Failed to get version", e); } } // Store the given version and comment in the metastore - public void setMetaStoreVersion(String newVersion, String comment) throws HiveMetaException { -ObjectStore objStore = new ObjectStore(); -objStore.setConf(hiveConf); + public void setMetaStoreVersion(String newVersion, String comment) + throws HiveMetaException, MetaException { +RawStore ms = HiveMetaStore.HMSHandler.getMSForConf(hiveConf); try { - objStore.setMetaStoreSchemaVersion(newVersion, comment); + ms.setMetaStoreSchemaVersion(newVersion, comment); } catch (MetaException e) { throw new HiveMetaException("Failed to set version", e); } } - - }
[1/2] hive git commit: HIVE-18410 : [Performance][Avro] Reading flat Avro tables is very expensive in Hive (Ratandeep Ratti via Anthony Hsu, Ashutosh Chauhan)
Repository: hive Updated Branches: refs/heads/branch-3 a3e535f94 -> 34edad28c HIVE-18410 : [Performance][Avro] Reading flat Avro tables is very expensive in Hive (Ratandeep Ratti via Anthony Hsu, Ashutosh Chauhan) Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8bfea2d0 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8bfea2d0 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8bfea2d0 Branch: refs/heads/branch-3 Commit: 8bfea2d09533bcf46290af140b2c44a420758987 Parents: a3e535f Author: Ratandeep Ratti Authored: Mon Jan 8 16:47:00 2018 -0800 Committer: Vineet Garg Committed: Thu Apr 19 22:32:39 2018 -0700 -- .../hive/serde2/avro/AvroDeserializer.java | 77 +++- .../hadoop/hive/serde2/avro/AvroSerDe.java | 25 ++- .../hadoop/hive/serde2/avro/AvroSerdeUtils.java | 23 -- .../hive/serde2/avro/TestAvroDeserializer.java | 67 + .../avro/TestAvroObjectInspectorGenerator.java | 11 +++ 5 files changed, 114 insertions(+), 89 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/8bfea2d0/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java -- diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java index b7b3d12..34da50d 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java @@ -198,11 +198,18 @@ class AvroDeserializer { private Object worker(Object datum, Schema fileSchema, Schema recordSchema, TypeInfo columnType) throws AvroSerdeException { -// Klaxon! Klaxon! Klaxon! -// Avro requires NULLable types to be defined as unions of some type T -// and NULL. This is annoying and we're going to hide it from the user. +if (datum == null) { + return null; +} + +// Avro requires nullable types to be defined as unions of some type T +// and NULL. This is annoying and we're going to hide it from the user. + if (AvroSerdeUtils.isNullableType(recordSchema)) { - return deserializeNullableUnion(datum, fileSchema, recordSchema, columnType); + recordSchema = AvroSerdeUtils.getOtherTypeFromNullableType(recordSchema); +} +if (fileSchema != null && AvroSerdeUtils.isNullableType(fileSchema)) { + fileSchema = AvroSerdeUtils.getOtherTypeFromNullableType(fileSchema); } switch(columnType.getCategory()) { @@ -300,68 +307,6 @@ class AvroDeserializer { } } - /** - * Extract either a null or the correct type from a Nullable type. - */ - private Object deserializeNullableUnion(Object datum, Schema fileSchema, Schema recordSchema, TypeInfo columnType) -throws AvroSerdeException { -if (recordSchema.getTypes().size() == 2) { - // A type like [NULL, T] - return deserializeSingleItemNullableUnion(datum, fileSchema, recordSchema, columnType); -} else { - // Types like [NULL, T1, T2, ...] - if (datum == null) { -return null; - } else { -Schema newRecordSchema = AvroSerdeUtils.getOtherTypeFromNullableType(recordSchema); -return worker(datum, fileSchema, newRecordSchema, columnType); - } -} - } - - private Object deserializeSingleItemNullableUnion(Object datum, -Schema fileSchema, -Schema recordSchema, -TypeInfo columnType) - throws AvroSerdeException { -int tag = GenericData.get().resolveUnion(recordSchema, datum); // Determine index of value -Schema schema = recordSchema.getTypes().get(tag); -if (schema.getType().equals(Type.NULL)) { - return null; -} - -Schema currentFileSchema = null; -if (fileSchema != null) { - if (fileSchema.getType() == Type.UNION) { -// The fileSchema may have the null value in a different position, so -// we need to get the correct tag -try { - tag = GenericData.get().resolveUnion(fileSchema, datum); - currentFileSchema = fileSchema.getTypes().get(tag); -} catch (UnresolvedUnionException e) { - if (LOG.isDebugEnabled()) { -String datumClazz = null; -if (datum != null) { - datumClazz = datum.getClass().getName(); -} -String msg = "File schema union could not resolve union. fileSchema = " + fileSchema + - ", recordSchema = " + recordSchema + ", datum class = " + datumClaz
[2/2] hive git commit: HIVE-18816 : CREATE TABLE (ACID) doesn't work with TIMESTAMPLOCALTZ column type (Igor Kryvenko via Ashutosh Chauhan)
HIVE-18816 : CREATE TABLE (ACID) doesn't work with TIMESTAMPLOCALTZ column type (Igor Kryvenko via Ashutosh Chauhan) Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/34edad28 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/34edad28 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/34edad28 Branch: refs/heads/branch-3 Commit: 34edad28c84cd84f6c34e34e00c985100307ce57 Parents: 8bfea2d Author: Igor Kryvenko Authored: Sat Apr 14 10:11:14 2018 -0700 Committer: Vineet Garg Committed: Thu Apr 19 22:35:29 2018 -0700 -- .../java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java | 2 ++ .../queries/clientpositive/orc_timestamplocaltz_type.q| 5 + .../clientpositive/orc_timestamplocaltz_type.q.out| 10 ++ 3 files changed, 17 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/34edad28/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java index 9e476fa..c81bcfe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java @@ -542,6 +542,8 @@ final public class OrcStruct implements Writable { case DECIMAL: return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector( (PrimitiveTypeInfo)info); +case TIMESTAMPLOCALTZ: + return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector((PrimitiveTypeInfo) info); default: throw new IllegalArgumentException("Unknown primitive type " + ((PrimitiveTypeInfo) info).getPrimitiveCategory()); http://git-wip-us.apache.org/repos/asf/hive/blob/34edad28/ql/src/test/queries/clientpositive/orc_timestamplocaltz_type.q -- diff --git a/ql/src/test/queries/clientpositive/orc_timestamplocaltz_type.q b/ql/src/test/queries/clientpositive/orc_timestamplocaltz_type.q new file mode 100644 index 000..fbe702e --- /dev/null +++ b/ql/src/test/queries/clientpositive/orc_timestamplocaltz_type.q @@ -0,0 +1,5 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + +CREATE TABLE table_acid(d int, tz timestamp with local time zone) +clustered by (d) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hive/blob/34edad28/ql/src/test/results/clientpositive/orc_timestamplocaltz_type.q.out -- diff --git a/ql/src/test/results/clientpositive/orc_timestamplocaltz_type.q.out b/ql/src/test/results/clientpositive/orc_timestamplocaltz_type.q.out new file mode 100644 index 000..fe3bb5c --- /dev/null +++ b/ql/src/test/results/clientpositive/orc_timestamplocaltz_type.q.out @@ -0,0 +1,10 @@ +PREHOOK: query: CREATE TABLE table_acid(d int, tz timestamp with local time zone) +clustered by (d) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table_acid +POSTHOOK: query: CREATE TABLE table_acid(d int, tz timestamp with local time zone) +clustered by (d) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table_acid
hive git commit: HIVE-19219: Incremental REPL DUMP should throw error if requested events are cleaned-up (Sankar Hariappan, reviewed by Mahesh Kumar Behera, Thejas M Nair)
Repository: hive Updated Branches: refs/heads/master 92b9ba7d3 -> 6c4adc9fc HIVE-19219: Incremental REPL DUMP should throw error if requested events are cleaned-up (Sankar Hariappan, reviewed by Mahesh Kumar Behera, Thejas M Nair) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6c4adc9f Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6c4adc9f Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6c4adc9f Branch: refs/heads/master Commit: 6c4adc9fc509d53a650e41841cfb6c37352384f5 Parents: 92b9ba7 Author: Sankar Hariappan Authored: Fri Apr 20 08:17:08 2018 +0530 Committer: Sankar Hariappan Committed: Fri Apr 20 08:17:08 2018 +0530 -- .../hive/ql/parse/TestReplicationScenarios.java | 131 ++- .../hive/metastore/HiveMetaStoreClient.java | 26 ++-- .../HiveMetaStoreClientPreCatalog.java | 26 ++-- 3 files changed, 102 insertions(+), 81 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/6c4adc9f/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java -- diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index b59833d..8b33b78 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hive.metastore.messaging.event.filters.MessageFormatFil import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.exec.repl.ReplDumpWork; +import org.apache.hadoop.hive.ql.parse.repl.load.EventDumpDirComparator; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; @@ -764,11 +765,65 @@ public class TestReplicationScenarios { run("TRUNCATE TABLE " + dbName + ".unptned", driver); run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); -// Inject a behaviour where all events will get ID less than 100 except TRUNCATE which will get ID 100. -// This enesures variable length of event ID in the incremental dump -BehaviourInjection eventIdModifier +Tuple incrementalDump = replDumpDb(dbName, replDumpId, null, null); +String incrementalDumpLocn = incrementalDump.dumpLocation; +replDumpId = incrementalDump.lastReplId; + +// Rename the event directories such a way that the length varies. +// We will encounter create_table, truncate followed by insert. +// For the insert, set the event ID longer such that old comparator picks insert before truncate +// Eg: Event IDs CREATE_TABLE - 5, TRUNCATE - 9, INSERT - 12 changed to +// CREATE_TABLE - 5, TRUNCATE - 9, INSERT - 100 +// But if TRUNCATE have ID-10, then having INSERT-100 won't be sufficient to test the scenario. +// So, we set any event comes after CREATE_TABLE starts with 20. +// Eg: Event IDs CREATE_TABLE - 5, TRUNCATE - 10, INSERT - 12 changed to +// CREATE_TABLE - 5, TRUNCATE - 20(20 <= Id < 100), INSERT - 100 +Path dumpPath = new Path(incrementalDumpLocn); +FileSystem fs = dumpPath.getFileSystem(hconf); +FileStatus[] dirsInLoadPath = fs.listStatus(dumpPath, EximUtil.getDirectoryFilter(fs)); +Arrays.sort(dirsInLoadPath, new EventDumpDirComparator()); +long nextEventId = 0; +for (FileStatus dir : dirsInLoadPath) { + Path srcPath = dir.getPath(); + if (nextEventId == 0) { +nextEventId = (long) Math.pow(10.0, (double) srcPath.getName().length()) * 2; +continue; + } + Path destPath = new Path(srcPath.getParent(), String.valueOf(nextEventId)); + fs.rename(srcPath, destPath); + LOG.info("Renamed eventDir {} to {}", srcPath.getName(), destPath.getName()); + // Once the eventId reaches 5-20-100, then just increment it sequentially. This is to avoid longer values. + if (String.valueOf(nextEventId).length() - srcPath.getName().length() >= 2) { +nextEventId++; + } else { +nextEventId = (long) Math.pow(10.0, (double) String.valueOf(nextEventId).length()); + } +} + +// Load from modified dump event directories. +run("REPL LOAD " + replDbName + " FROM '" + incrementalDumpLocn + "'", driverMirror); +verifyRun("SELECT a from " + replDbName + ".unptned ORDER BY a", unptn_data, driverMirror); + } + + @Test + public void testIncrementalReplWithEventsMissing() throws IOExce
[2/2] hive git commit: HIVE-18739 - Add support for Import/Export from Acid table (Eugene Koifman, reviewed by Sergey Shelukhin)
HIVE-18739 - Add support for Import/Export from Acid table (Eugene Koifman, reviewed by Sergey Shelukhin) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a3e535f9 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a3e535f9 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a3e535f9 Branch: refs/heads/branch-3 Commit: a3e535f944d852209ca299e703860780fbd53955 Parents: 8584947 Author: Eugene Koifman Authored: Thu Apr 19 09:21:41 2018 -0700 Committer: Eugene Koifman Committed: Thu Apr 19 09:21:41 2018 -0700 -- .../apache/hadoop/hive/common/JavaUtils.java| 3 +- .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 2 +- .../apache/hadoop/hive/ql/exec/ExportTask.java | 1 + .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 4 + .../apache/hadoop/hive/ql/metadata/Hive.java| 9 +- .../ql/metadata/SessionHiveMetaStoreClient.java | 217 .../hive/ql/parse/BaseSemanticAnalyzer.java | 7 + .../hive/ql/parse/ExportSemanticAnalyzer.java | 19 +- .../hive/ql/parse/ImportSemanticAnalyzer.java | 116 ++-- .../hive/ql/parse/SemanticAnalyzerFactory.java | 3 + .../ql/parse/UpdateDeleteSemanticAnalyzer.java | 258 - .../apache/hadoop/hive/ql/plan/CopyWork.java| 6 +- .../apache/hadoop/hive/ql/plan/ExportWork.java | 28 +- .../hadoop/hive/ql/plan/ImportTableDesc.java| 2 +- .../hadoop/hive/ql/session/SessionState.java| 10 +- .../hadoop/hive/ql/TestTxnAddPartition.java | 2 +- .../org/apache/hadoop/hive/ql/TestTxnExIm.java | 538 +++ .../apache/hadoop/hive/ql/TestTxnLoadData.java | 2 +- .../apache/hadoop/hive/ql/TestTxnNoBuckets.java | 24 +- .../hive/metastore/HiveMetaStoreClient.java | 2 +- .../hadoop/hive/metastore/ObjectStore.java | 18 +- .../hive/metastore/utils/MetaStoreUtils.java| 20 + 22 files changed, 1201 insertions(+), 90 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/a3e535f9/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java -- diff --git a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java index 75c07b4..7894ec1 100644 --- a/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/JavaUtils.java @@ -228,10 +228,11 @@ public final class JavaUtils { @Override public boolean accept(Path path) { String name = path.getName(); + //todo: what if this is a base? if (!name.startsWith(DELTA_PREFIX + "_")) return false; String idStr = name.substring(DELTA_PREFIX.length() + 1, DELTA_PREFIX.length() + 1 + DELTA_DIGITS_LEN); try { -Long.parseLong(idStr); +Long.parseLong(idStr);//what for? sanity check? } catch (NumberFormatException ex) { return false; } http://git-wip-us.apache.org/repos/asf/hive/blob/a3e535f9/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 5b26b84..9a487cd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -3965,7 +3965,7 @@ public class DDLTask extends Task implements Serializable { * how this is desirable. * * As of HIVE-14993, WriteEntity with different WriteType must be considered different. - * So WriteEntity create in DDLTask cause extra output in golden files, but only because + * So WriteEntity created in DDLTask cause extra output in golden files, but only because * DDLTask sets a different WriteType for the same Entity. * * In the spirit of bug-for-bug compatibility, this method ensures we only add new http://git-wip-us.apache.org/repos/asf/hive/blob/a3e535f9/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java index 91af814..aba6591 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java @@ -52,6 +52,7 @@ public class ExportTask extends Task implements Serializable { conf, false); Hive db = getHive(); LOG.debug("Exporting data to: {}", exportPaths.getExportRootDir()); + work.acidPostProcess(db); TableExport tableExport = new TableExport( exportPaths, work.getTableSpec(), work.getReplicationSpec(), db,
[1/2] hive git commit: HIVE-18739 - Add support for Import/Export from Acid table (Eugene Koifman, reviewed by Sergey Shelukhin)
Repository: hive Updated Branches: refs/heads/branch-3 8584947ef -> a3e535f94 http://git-wip-us.apache.org/repos/asf/hive/blob/a3e535f9/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java -- diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java index a4df509..4b2f961 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java @@ -32,8 +32,6 @@ import org.apache.hadoop.hive.ql.io.BucketCodec; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.junit.Assert; -import org.junit.Before; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; @@ -191,8 +189,9 @@ public class TestTxnNoBuckets extends TxnCommandsBaseForTests { checkExpected(rs, expected, "Unexpected row count after ctas from non acid table"); runStatementOnDriver("insert into " + Table.ACIDTBL + makeValuesClause(values)); +//todo: try this with acid default - it seem making table acid in listener is too late runStatementOnDriver("create table myctas2 stored as ORC TBLPROPERTIES ('transactional" + - "'='true', 'transactional_properties'='default') as select a, b from " + Table.ACIDTBL);//todo: try this with acid default - it seem makeing table acid in listener is too late + "'='true', 'transactional_properties'='default') as select a, b from " + Table.ACIDTBL); rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas2 order by ROW__ID"); String expected2[][] = { {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_001_001_/bucket_0"}, @@ -234,7 +233,7 @@ public class TestTxnNoBuckets extends TxnCommandsBaseForTests { /** * Insert into unbucketed acid table from union all query - * Union All is flattend so nested subdirs are created and acid move drops them since + * Union All is flattened so nested subdirs are created and acid move drops them since * delta dirs have unique names */ @Test @@ -529,11 +528,26 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver CommandProcessorResponse cpr = runStatementOnDriverNegative("create table myctas " + "clustered by (a) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') as " + "select a, b from " + Table.NONACIDORCTBL); -int j = ErrorMsg.CTAS_PARCOL_COEXISTENCE.getErrorCode();//this code doesn't propagate +int j = ErrorMsg.CTAS_PARCOL_COEXISTENCE.getErrorCode(); //this code doesn't propagate //Assert.assertEquals("Wrong msg", ErrorMsg.CTAS_PARCOL_COEXISTENCE.getErrorCode(), cpr.getErrorCode()); Assert.assertTrue(cpr.getErrorMessage().contains("CREATE-TABLE-AS-SELECT does not support")); } /** + * Currently CTAS doesn't support partitioned tables. Correspondingly Acid only supports CTAS for + * un-partitioned tables. This test is here to make sure that if CTAS is made to support + * un-partitioned tables, that it raises a red flag for Acid. + */ + @Test + public void testCtasPartitioned() throws Exception { +runStatementOnDriver("insert into " + Table.NONACIDNONBUCKET + "(a,b) values(1,2),(1,3)"); +CommandProcessorResponse cpr = runStatementOnDriverNegative("create table myctas partitioned " + +"by (b int) stored as " + +"ORC TBLPROPERTIES ('transactional'='true') as select a, b from " + Table.NONACIDORCTBL); +int j = ErrorMsg.CTAS_PARCOL_COEXISTENCE.getErrorCode();//this code doesn't propagate +Assert.assertTrue(cpr.getErrorMessage().contains("CREATE-TABLE-AS-SELECT does not support " + +"partitioning in the target table")); + } + /** * Tests to check that we are able to use vectorized acid reader, * VectorizedOrcAcidRowBatchReader, when reading "original" files, * i.e. those that were written before the table was converted to acid. http://git-wip-us.apache.org/repos/asf/hive/blob/a3e535f9/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java -- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index b0f64db..fcf34f0 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2033,7 +2033,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable { * @param partition * @return */ -
hive git commit: HIVE-19242 : CliAdapter silently ignores excluded qfiles (Vihang Karajgaonkar, reviewed by Sahil Takiar)
Repository: hive Updated Branches: refs/heads/master 9f15e22f4 -> 92b9ba7d3 HIVE-19242 : CliAdapter silently ignores excluded qfiles (Vihang Karajgaonkar, reviewed by Sahil Takiar) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/92b9ba7d Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/92b9ba7d Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/92b9ba7d Branch: refs/heads/master Commit: 92b9ba7d3437b12a2cc77db9e463c57cafa5e8c0 Parents: 9f15e22 Author: Vihang Karajgaonkar Authored: Thu Apr 19 09:10:35 2018 -0700 Committer: Vihang Karajgaonkar Committed: Thu Apr 19 16:14:03 2018 -0700 -- .../hadoop/hive/cli/control/AbstractCliConfig.java| 14 -- 1 file changed, 12 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/92b9ba7d/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java -- diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java index 01b9ed6..7151372 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java @@ -38,10 +38,13 @@ import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType; import org.apache.hive.testutils.HiveTestEnvSetup; import com.google.common.base.Splitter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public abstract class AbstractCliConfig { public static final String HIVE_ROOT = HiveTestEnvSetup.HIVE_ROOT; + private static final Logger LOG = LoggerFactory.getLogger(AbstractCliConfig.class); enum MetastoreType { sql @@ -214,11 +217,18 @@ public abstract class AbstractCliConfig { if (queryFile != null && !queryFile.equals("")) { // The user may have passed a list of files - comma separated for (String qFile : TEST_SPLITTER.split(queryFile)) { +File qF; if (null != queryDir) { - testFiles.add(new File(queryDir, qFile)); + qF = new File(queryDir, qFile); } else { - testFiles.add(new File(qFile)); + qF = new File(qFile); } +if (excludedQueryFileNames.contains(qFile)) { + LOG.warn(qF.getAbsolutePath() + " is among the excluded query files for this driver." + + " Please update CliConfigs.java or testconfiguration.properties file to" + + " include the qfile"); +} +testFiles.add(qF); } } else if (queryFileRegex != null && !queryFileRegex.equals("")) { for (String regex : TEST_SPLITTER.split(queryFileRegex)) {
hive git commit: HIVE-19009 : Retain and use runtime statistics during hs2 lifetime (Zoltan Haindrich via Ashutosh Chauhan)
Repository: hive Updated Branches: refs/heads/master 046bc646b -> 9f15e22f4 HIVE-19009 : Retain and use runtime statistics during hs2 lifetime (Zoltan Haindrich via Ashutosh Chauhan) Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9f15e22f Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9f15e22f Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9f15e22f Branch: refs/heads/master Commit: 9f15e22f4aea99891a37aa1e54d490921e6e1174 Parents: 046bc64 Author: Zoltan Haindrich Authored: Tue Apr 3 08:51:00 2018 -0700 Committer: Ashutosh Chauhan Committed: Thu Apr 19 11:44:04 2018 -0700 -- .../org/apache/hadoop/hive/conf/HiveConf.java | 11 +- .../test/resources/testconfiguration.properties | 1 + .../org/apache/hadoop/hive/ql/QTestUtil.java| 10 +- .../java/org/apache/hadoop/hive/ql/Context.java | 12 +- .../java/org/apache/hadoop/hive/ql/Driver.java | 4 + .../hive/ql/optimizer/physical/Vectorizer.java | 12 +- .../apache/hadoop/hive/ql/plan/JoinDesc.java| 2 +- .../hive/ql/plan/mapper/CachingStatsSource.java | 68 + .../hive/ql/plan/mapper/EmptyStatsSource.java | 11 ++ .../plan/mapper/SimpleRuntimeStatsSource.java | 6 + .../hadoop/hive/ql/plan/mapper/StatsSource.java | 5 +- .../hive/ql/plan/mapper/StatsSources.java | 122 .../hive/ql/reexec/IReExecutionPlugin.java | 1 + .../hadoop/hive/ql/reexec/ReExecDriver.java | 20 ++- .../ql/reexec/ReExecutionOverlayPlugin.java | 4 + .../hadoop/hive/ql/reexec/ReOptimizePlugin.java | 48 +-- .../signature/TestOperatorSignature.java| 9 +- .../ql/plan/mapping/TestCounterMapping.java | 1 - .../queries/clientpositive/runtime_stats_hs2.q | 22 +++ .../clientpositive/llap/runtime_stats_hs2.q.out | 141 +++ 20 files changed, 479 insertions(+), 31 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/9f15e22f/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java -- diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 73492ff..536c7b4 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -4263,10 +4263,19 @@ public class HiveConf extends Configuration { "comma separated list of plugin can be used:\n" + " overlay: hiveconf subtree 'reexec.overlay' is used as an overlay in case of an execution errors out\n" + " reoptimize: collects operator statistics during execution and recompile the query after a failure"), + HIVE_QUERY_REEXECUTION_STATS_PERSISTENCE("hive.query.reexecution.stats.persist.scope", "query", +new StringSet("query", "hiveserver", "metastore"), +"Sets the persistence scope of runtime statistics\n" ++ " query: runtime statistics are only used during re-execution\n" ++ " hiveserver: runtime statistics are persisted in the hiveserver - all sessions share it"), + HIVE_QUERY_MAX_REEXECUTION_COUNT("hive.query.reexecution.max.count", 1, "Maximum number of re-executions for a single query."), HIVE_QUERY_REEXECUTION_ALWAYS_COLLECT_OPERATOR_STATS("hive.query.reexecution.always.collect.operator.stats", false, -"Used during testing"), +"If sessionstats are enabled; this option can be used to collect statistics all the time"), + HIVE_QUERY_REEXECUTION_STATS_CACHE_SIZE("hive.query.reexecution.stats.cache.size", 100_000, +"Size of the runtime statistics cache. Unit is: OperatorStat entry; a query plan consist ~100"), + HIVE_QUERY_RESULTS_CACHE_ENABLED("hive.query.results.cache.enabled", true, "If the query results cache is enabled. This will keep results of previously executed queries " + http://git-wip-us.apache.org/repos/asf/hive/blob/9f15e22f/itests/src/test/resources/testconfiguration.properties -- diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index d26f0cc..4e7c519 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -480,6 +480,7 @@ minillaplocal.query.files=\ retry_failure.q,\ retry_failure_stat_changes.q,\ retry_failure_oom.q,\ + runtime_stats_hs2.q,\ bucketsortoptimize_insert_2.q,\ check_constraint.q,\ cbo_gby.q,\ http://git-wip-us.apache.org/repos/asf/hive/blob/9f15e22f/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
hive git commit: HIVE-19246 : Update golden files for negative tests
Repository: hive Updated Branches: refs/heads/master fbb186cd0 -> 046bc646b HIVE-19246 : Update golden files for negative tests Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/046bc646 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/046bc646 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/046bc646 Branch: refs/heads/master Commit: 046bc646bd3f42637ba6aba22d9699a14133eeb8 Parents: fbb186c Author: Ashutosh Chauhan Authored: Thu Apr 19 10:58:28 2018 -0700 Committer: Ashutosh Chauhan Committed: Thu Apr 19 10:58:28 2018 -0700 -- ql/src/test/results/clientnegative/avro_non_nullable_union.q.out | 1 + ql/src/test/results/clientnegative/cachingprintstream.q.out| 2 ++ ql/src/test/results/clientnegative/compute_stats_long.q.out| 1 + ql/src/test/results/clientnegative/dyn_part3.q.out | 1 + ql/src/test/results/clientnegative/dyn_part_max_per_node.q.out | 1 + .../results/clientnegative/dynamic_partitions_with_whitelist.q.out | 1 + ql/src/test/results/clientnegative/insertsel_fail.q.out| 2 +- ql/src/test/results/clientnegative/local_mapred_error_cache.q.out | 1 + ql/src/test/results/clientnegative/script_broken_pipe2.q.out | 1 + ql/src/test/results/clientnegative/script_broken_pipe3.q.out | 1 + ql/src/test/results/clientnegative/script_error.q.out | 1 + ql/src/test/results/clientnegative/serde_regex2.q.out | 1 + ql/src/test/results/clientnegative/stats_aggregator_error_2.q.out | 1 + ql/src/test/results/clientnegative/stats_publisher_error_1.q.out | 1 + ql/src/test/results/clientnegative/stats_publisher_error_2.q.out | 1 + ql/src/test/results/clientnegative/subquery_corr_in_agg.q.out | 1 + ql/src/test/results/clientnegative/subquery_in_implicit_gby.q.out | 1 + .../test/results/clientnegative/subquery_notin_implicit_gby.q.out | 1 + .../results/clientnegative/subquery_scalar_corr_multi_rows.q.out | 1 + .../test/results/clientnegative/subquery_scalar_multi_rows.q.out | 1 + ql/src/test/results/clientnegative/udf_assert_true.q.out | 1 + ql/src/test/results/clientnegative/udf_assert_true2.q.out | 1 + ql/src/test/results/clientnegative/udf_reflect_neg.q.out | 1 + ql/src/test/results/clientnegative/udf_test_error.q.out| 1 + ql/src/test/results/clientnegative/udf_test_error_reduce.q.out | 1 + 25 files changed, 26 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/046bc646/ql/src/test/results/clientnegative/avro_non_nullable_union.q.out -- diff --git a/ql/src/test/results/clientnegative/avro_non_nullable_union.q.out b/ql/src/test/results/clientnegative/avro_non_nullable_union.q.out index c933081..0149e9f 100644 --- a/ql/src/test/results/clientnegative/avro_non_nullable_union.q.out +++ b/ql/src/test/results/clientnegative/avro_non_nullable_union.q.out @@ -35,4 +35,5 @@ PREHOOK: type: QUERY PREHOOK: Input: default@union_nullable_test_text PREHOOK: Output: default@union_non_nullable_test_avro A masked pattern was here +Error during job, obtaining debugging information... FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask http://git-wip-us.apache.org/repos/asf/hive/blob/046bc646/ql/src/test/results/clientnegative/cachingprintstream.q.out -- diff --git a/ql/src/test/results/clientnegative/cachingprintstream.q.out b/ql/src/test/results/clientnegative/cachingprintstream.q.out index 0acb772..107811b 100644 --- a/ql/src/test/results/clientnegative/cachingprintstream.q.out +++ b/ql/src/test/results/clientnegative/cachingprintstream.q.out @@ -8,10 +8,12 @@ PREHOOK: query: FROM src SELECT TRANSFORM (key, value) USING 'FAKE_SCRIPT_SHOULD PREHOOK: type: QUERY PREHOOK: Input: default@src A masked pattern was here +Error during job, obtaining debugging information... Begin cached logs. PREHOOK: query: FROM src SELECT TRANSFORM (key, value) USING 'FAKE_SCRIPT_SHOULD_NOT_EXIST' AS key, value PREHOOK: type: QUERY PREHOOK: Input: default@src A masked pattern was here +Error during job, obtaining debugging information... End cached logs. FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask http://git-wip-us.apache.org/repos/asf/hive/blob/046bc646/ql/src/test/results/clientnegative/compute_stats_long.q.out -- diff --git a/ql/src/test/results/clientnegative/compute_stats_long.q.out b/ql/src/test/results/clientnegative/compute_stats_long.q.out index 79f2146..0f7cbd6 10
hive git commit: HIVE-19245: Add Apache license to TestSparkPlan.java (Sahil Takiar, reviewed by Vihang Karajgaonkar)
Repository: hive Updated Branches: refs/heads/master 1f25c46a2 -> fbb186cd0 HIVE-19245: Add Apache license to TestSparkPlan.java (Sahil Takiar, reviewed by Vihang Karajgaonkar) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fbb186cd Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fbb186cd Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fbb186cd Branch: refs/heads/master Commit: fbb186cd0662328b26f3283190347466bc7fda42 Parents: 1f25c46 Author: Sahil Takiar Authored: Thu Apr 19 11:19:42 2018 -0500 Committer: Sahil Takiar Committed: Thu Apr 19 11:19:42 2018 -0500 -- .../hadoop/hive/ql/exec/spark/TestSparkPlan.java | 18 ++ 1 file changed, 18 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/fbb186cd/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkPlan.java -- diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkPlan.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkPlan.java index 3fe32a0..5f47bb4 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkPlan.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkPlan.java @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.exec.spark; import org.apache.hadoop.fs.FileSystem;
hive git commit: HIVE-19158: Fix NPE in the HiveMetastore add partition tests (Marta Kuczora, reviewed by Peter Vary and Sahil Takiar)
Repository: hive Updated Branches: refs/heads/master fb22f576d -> 1f25c46a2 HIVE-19158: Fix NPE in the HiveMetastore add partition tests (Marta Kuczora, reviewed by Peter Vary and Sahil Takiar) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1f25c46a Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1f25c46a Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1f25c46a Branch: refs/heads/master Commit: 1f25c46a2bf50483e09c756803d78e078dc37b92 Parents: fb22f57 Author: Marta Kuczora Authored: Thu Apr 19 11:06:45 2018 -0500 Committer: Sahil Takiar Committed: Thu Apr 19 11:06:45 2018 -0500 -- .../hadoop/hive/metastore/HiveMetaStore.java| 26 +- .../hive/metastore/HiveMetaStoreClient.java | 11 ++- .../spec/CompositePartitionSpecProxy.java | 6 +- .../spec/PartitionListComposingSpecProxy.java | 22 - .../partition/spec/PartitionSpecProxy.java | 6 +- .../spec/PartitionSpecWithSharedSDProxy.java| 5 +- .../metastore/client/TestAddPartitions.java | 71 ++-- .../client/TestAddPartitionsFromPartSpec.java | 89 ++-- 8 files changed, 116 insertions(+), 120 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/1f25c46a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java -- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index ae9ec5c..9c88cf9 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -3250,6 +3250,12 @@ public class HiveMetaStore extends ThriftHiveMetastore { part.getTableName(), part.toString()); throw new MetaException(errorMsg); } + if (part.getValues() == null || part.getValues().isEmpty()) { +throw new MetaException("The partition values cannot be null or empty."); + } + if (part.getValues().contains(null)) { +throw new MetaException("Partition value cannot be null."); + } boolean shouldAdd = startAddPartition(ms, part, ifNotExists); if (!shouldAdd) { @@ -3410,7 +3416,10 @@ public class HiveMetaStore extends ThriftHiveMetastore { public int add_partitions(final List parts) throws MetaException, InvalidObjectException, AlreadyExistsException { startFunction("add_partition"); - if (parts.size() == 0) { + if (parts == null) { +throw new MetaException("Partition list cannot be null."); + } + if (parts.isEmpty()) { return 0; } @@ -3471,6 +3480,9 @@ public class HiveMetaStore extends ThriftHiveMetastore { boolean ifNotExists) throws TException { boolean success = false; + if (dbName == null || tblName == null) { +throw new MetaException("The database and table name cannot be null."); + } // Ensures that the list doesn't have dups, and keeps track of directories we have created. final Map addedPartitions = new ConcurrentHashMap<>(); PartitionSpecProxy partitionSpecProxy = PartitionSpecProxy.Factory.get(partSpecs); @@ -3496,12 +3508,18 @@ public class HiveMetaStore extends ThriftHiveMetastore { // will be created if the list contains an invalid partition. final Partition part = partitionIterator.getCurrent(); + if (part.getDbName() == null || part.getTableName() == null) { +throw new MetaException("The database and table name must be set in the partition."); + } if (!part.getTableName().equalsIgnoreCase(tblName) || !part.getDbName().equalsIgnoreCase(dbName)) { String errorMsg = String.format( "Partition does not belong to target table %s.%s. It belongs to the table %s.%s : %s", dbName, tblName, part.getDbName(), part.getTableName(), part.toString()); throw new MetaException(errorMsg); } + if (part.getValues() == null || part.getValues().isEmpty()) { +throw new MetaException("The partition values cannot be null or empty."); + } boolean shouldAdd = startAddPartition(ms, part, ifNotExists); if (!shouldAdd) { @@ -3733,6 +3751,9 @@ public class HiveMetaStore extends ThriftHiveMetastore { firePreEvent(new PreAddPartitionEvent(tbl, part, this)); +if (part.getValues() == null || part.getValues().isEmpty()) { + throw new Me
hive git commit: HIVE-19243 : Upgrade hadoop.version to 3.1.0 (Gour Saha via Sahil Takiar)
Repository: hive Updated Branches: refs/heads/master 5ae174cb2 -> fb22f576d HIVE-19243 : Upgrade hadoop.version to 3.1.0 (Gour Saha via Sahil Takiar) Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fb22f576 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fb22f576 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fb22f576 Branch: refs/heads/master Commit: fb22f576da7383f9cb1d24b66f4090d07b6bde07 Parents: 5ae174c Author: Gour Saha Authored: Thu Apr 19 07:18:21 2018 -0700 Committer: Ashutosh Chauhan Committed: Thu Apr 19 07:18:21 2018 -0700 -- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/fb22f576/pom.xml -- diff --git a/pom.xml b/pom.xml index 4fb83c9..21ce5cb 100644 --- a/pom.xml +++ b/pom.xml @@ -145,7 +145,7 @@ 19.0 2.4.11 1.3.166 -3.0.0 +3.1.0 ${basedir}/${hive.path.to.root}/testutils/hadoop 1.3 2.0.0-alpha4