http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java index 0e6ec84..a02baf9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java @@ -81,7 +81,12 @@ public class HiveIndexedInputFormat extends HiveInputFormat { // class Class inputFormatClass = part.getInputFileFormatClass(); InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job); - Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), newjob); + + try { + Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), newjob); + } catch (HiveException e) { + throw new IOException(e); + } FileInputFormat.setInputPaths(newjob, dir); newjob.setInputFormat(inputFormat.getClass());
http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 010b88c..21394c6 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -357,9 +357,13 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable> LOG.debug("Found spec for " + hsplit.getPath() + " " + part + " from " + pathToPartitionInfo); } - if ((part != null) && (part.getTableDesc() != null)) { - Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), job); - nonNative = part.getTableDesc().isNonNative(); + try { + if ((part != null) && (part.getTableDesc() != null)) { + Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), job); + nonNative = part.getTableDesc().isNonNative(); + } + } catch (HiveException e) { + throw new IOException(e); } Path splitPath = hsplit.getPath(); @@ -419,7 +423,11 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable> InputFormat inputFormat, Class<? extends InputFormat> inputFormatClass, int splits, TableDesc table, List<InputSplit> result) throws IOException { - Utilities.copyTablePropertiesToConf(table, conf); + try { + Utilities.copyTablePropertiesToConf(table, conf); + } catch (HiveException e) { + throw new IOException(e); + } if (tableScan != null) { pushFilters(conf, tableScan); http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java index 68407f5..42f9b66 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java @@ -183,9 +183,14 @@ public class ProjectionPusher { final JobConf cloneJobConf = new JobConf(jobConf); final PartitionDesc part = pathToPartitionInfo.get(path); - if ((part != null) && (part.getTableDesc() != null)) { - Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), cloneJobConf); + try { + if ((part != null) && (part.getTableDesc() != null)) { + Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), cloneJobConf); + } + } catch (Exception e) { + throw new IOException(e); } + pushProjectionsAndFilters(cloneJobConf, path.toString(), path.toUri().getPath()); return cloneJobConf; } http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultStorageHandler.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultStorageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultStorageHandler.java index 82b78b8..e87a96d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultStorageHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultStorageHandler.java @@ -93,6 +93,11 @@ public class DefaultStorageHandler implements HiveStorageHandler { } @Override + public void configureInputJobCredentials(TableDesc tableDesc, Map<String, String> secrets) { + //do nothing by default + } + + @Override public Configuration getConf() { return conf; } http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java index 5975d0c..bd8c60a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java @@ -99,6 +99,12 @@ public interface HiveStorageHandler extends Configurable { Map<String, String> jobProperties); /** + * This method is called to allow the StorageHandlers the chance to + * populate secret keys into the job's credentials. + */ + public abstract void configureInputJobCredentials(TableDesc tableDesc, Map<String, String> secrets); + + /** * This method is called to allow the StorageHandlers the chance * to populate the JobContext.getConfiguration() with properties that * maybe be needed by the handler's bundled artifacts (ie InputFormat, SerDe, etc). http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java index 4ca8329..9a0a74d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java @@ -116,7 +116,6 @@ public class UnparseTranslator { if (existingEntry.getValue().tokenStopIndex <= tokenStopIndex && existingEntry.getKey() >= tokenStartIndex) { // Collect newer entry is if a super-set of existing entry, - assert (replacementText.contains(existingEntry.getValue().replacementText)); subsetEntries.add(existingEntry.getKey()); // check if the existing entry contains the new } else if (existingEntry.getValue().tokenStopIndex >= tokenStopIndex && http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java index 68dcd0d..157a697 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java @@ -116,7 +116,11 @@ public class PartitionDesc implements Serializable, Cloneable { private void PartitionDescConstructorHelper(final Partition part,final TableDesc tblDesc, boolean setInputFileFormat) throws HiveException { + + PlanUtils.configureInputJobPropertiesForStorageHandler(tblDesc); + this.tableDesc = tblDesc; + setPartSpec(part.getSpec()); if (setInputFileFormat) { setInputFileFormatClass(part.getInputFormatClass()); @@ -367,7 +371,6 @@ public class PartitionDesc implements Serializable, Cloneable { * URI to the partition file */ public void deriveBaseFileName(Path path) { - PlanUtils.configureInputJobPropertiesForStorageHandler(tableDesc); if (path == null) { return; http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 14f2a12..d82973c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -896,6 +896,7 @@ public final class PlanUtils { org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE)); if (storageHandler != null) { Map<String, String> jobProperties = new LinkedHashMap<String, String>(); + Map<String, String> jobSecrets = new LinkedHashMap<String, String>(); if(input) { try { storageHandler.configureInputJobProperties( @@ -906,6 +907,15 @@ public final class PlanUtils { "using configureTableJobProperties",e); storageHandler.configureTableJobProperties(tableDesc, jobProperties); } + + try{ + storageHandler.configureInputJobCredentials( + tableDesc, + jobSecrets); + } catch(AbstractMethodError e) { + // ignore + LOG.info("configureInputJobSecrets not found"); + } } else { try { @@ -924,6 +934,11 @@ public final class PlanUtils { if (!jobProperties.isEmpty()) { tableDesc.setJobProperties(jobProperties); } + + // same idea, only set for non-native tables + if (!jobSecrets.isEmpty()) { + tableDesc.setJobSecrets(jobSecrets); + } } } catch (HiveException ex) { throw new RuntimeException(ex); http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java index 117aa14..8b7339d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java @@ -53,6 +53,7 @@ public class TableDesc implements Serializable, Cloneable { private Class<? extends OutputFormat> outputFileFormatClass; private java.util.Properties properties; private Map<String, String> jobProperties; + private Map<String, String> jobSecrets; public TableDesc() { } @@ -143,6 +144,14 @@ public class TableDesc implements Serializable, Cloneable { return jobProperties; } + public void setJobSecrets(Map<String, String> jobSecrets) { + this.jobSecrets = jobSecrets; + } + + public Map<String, String> getJobSecrets() { + return jobSecrets; + } + /** * @return the serdeClassName */ http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java index ed88725..18f77e0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java @@ -97,7 +97,7 @@ public final class OpProcFactory { protected static final Logger LOG = LoggerFactory.getLogger(OpProcFactory.class .getName()); - private static ExprWalkerInfo getChildWalkerInfo(Operator<?> current, OpWalkerInfo owi) { + private static ExprWalkerInfo getChildWalkerInfo(Operator<?> current, OpWalkerInfo owi) throws SemanticException { if (current.getNumChild() == 0) { return null; } @@ -875,7 +875,7 @@ public final class OpProcFactory { } protected static Object createFilter(Operator op, - ExprWalkerInfo pushDownPreds, OpWalkerInfo owi) { + ExprWalkerInfo pushDownPreds, OpWalkerInfo owi) throws SemanticException { if (pushDownPreds != null && pushDownPreds.hasAnyCandidates()) { return createFilter(op, pushDownPreds.getFinalCandidates(), owi); } @@ -883,7 +883,7 @@ public final class OpProcFactory { } protected static Object createFilter(Operator op, - Map<String, List<ExprNodeDesc>> predicates, OpWalkerInfo owi) { + Map<String, List<ExprNodeDesc>> predicates, OpWalkerInfo owi) throws SemanticException { RowSchema inputRS = op.getSchema(); // combine all predicates into a single expression @@ -970,7 +970,7 @@ public final class OpProcFactory { TableScanOperator tableScanOp, ExprNodeGenericFuncDesc originalPredicate, OpWalkerInfo owi, - HiveConf hiveConf) { + HiveConf hiveConf) throws SemanticException { TableScanDesc tableScanDesc = tableScanOp.getConf(); Table tbl = tableScanDesc.getTableMetadata(); @@ -997,9 +997,15 @@ public final class OpProcFactory { JobConf jobConf = new JobConf(owi.getParseContext().getConf()); Utilities.setColumnNameList(jobConf, tableScanOp); Utilities.setColumnTypeList(jobConf, tableScanOp); - Utilities.copyTableJobPropertiesToConf( - Utilities.getTableDesc(tbl), - jobConf); + + try { + Utilities.copyTableJobPropertiesToConf( + Utilities.getTableDesc(tbl), + jobConf); + } catch (Exception e) { + throw new SemanticException(e); + } + Deserializer deserializer = tbl.getDeserializer(); HiveStoragePredicateHandler.DecomposedPredicate decomposed = predicateHandler.decomposePredicate( http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/test/org/apache/hadoop/hive/ql/exec/InputEstimatorTestClass.java ---------------------------------------------------------------------- diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/InputEstimatorTestClass.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/InputEstimatorTestClass.java index 8c52979..f3f4388 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/InputEstimatorTestClass.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/InputEstimatorTestClass.java @@ -75,6 +75,11 @@ public class InputEstimatorTestClass implements HiveStorageHandler, InputEstimat } @Override + public void configureInputJobCredentials(TableDesc tableDesc, Map<String, String> jobProperties) { + + } + + @Override public void configureOutputJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) { } http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/test/queries/clientpositive/jdbc_handler.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/jdbc_handler.q b/ql/src/test/queries/clientpositive/jdbc_handler.q index a37e547..847f577 100644 --- a/ql/src/test/queries/clientpositive/jdbc_handler.q +++ b/ql/src/test/queries/clientpositive/jdbc_handler.q @@ -9,31 +9,24 @@ owner STRING ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' TBLPROPERTIES ( -"hive.sql.database.type" = "DERBY", -"hive.sql.jdbc.url" = "jdbc:derby:;databaseName=${test.tmp.dir}/junit_metastore_db;create=true", -"hive.sql.jdbc.driver" = "org.apache.derby.jdbc.EmbeddedDriver", +"hive.sql.database.type" = "METASTORE", "hive.sql.query" = "SELECT TBL_ID, DB_ID, TBL_NAME, TBL_TYPE, OWNER FROM TBLS", -"hive.sql.column.mapping" = "id=TBL_ID, db_id=DB_ID, name=TBL_NAME, type=TBL_TYPE, owner=OWNER", -"hive.sql.dbcp.maxActive" = "1" +"hive.sql.column.mapping" = "id=TBL_ID, db_id=DB_ID, name=TBL_NAME, type=TBL_TYPE, owner=OWNER" ); CREATE EXTERNAL TABLE dbs ( -id int, -name STRING +DB_ID int, +NAME STRING ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' TBLPROPERTIES ( -"hive.sql.database.type" = "DERBY", -"hive.sql.jdbc.url" = "jdbc:derby:;databaseName=${test.tmp.dir}/junit_metastore_db;create=true", -"hive.sql.jdbc.driver" = "org.apache.derby.jdbc.EmbeddedDriver", -"hive.sql.query" = "SELECT DB_ID, NAME FROM DBS", -"hive.sql.column.mapping" = "id=DB_ID, name=NAME", -"hive.sql.dbcp.maxActive" = "1" +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = "SELECT DB_ID, NAME FROM DBS" ); -select tables.name as tn, dbs.name as dn, tables.type as t -from tables join dbs on (tables.db_id = dbs.id) order by tn, dn, t; +select tables.name as tn, dbs.NAME as dn, tables.type as t +from tables join dbs on (tables.db_id = dbs.DB_ID) order by tn, dn, t; explain select http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/test/queries/clientpositive/sysdb.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/sysdb.q b/ql/src/test/queries/clientpositive/sysdb.q new file mode 100644 index 0000000..d94a164 --- /dev/null +++ b/ql/src/test/queries/clientpositive/sysdb.q @@ -0,0 +1,117 @@ +set hive.strict.checks.cartesian.product=false; + +set hive.compute.query.using.stats=false; + +set hive.support.concurrency=true; + +set hive.cbo.enable=false; + +create table src_buck (key int, value string) clustered by(value) into 2 buckets; + +create table src_skew (key int) skewed by (key) on (1,2,3); + +CREATE TABLE scr_txn (key int, value string) + CLUSTERED BY (key) INTO 2 BUCKETS STORED AS ORC + TBLPROPERTIES ( + "transactional"="true", + "compactor.mapreduce.map.memory.mb"="2048", + "compactorthreshold.hive.compactor.delta.num.threshold"="4", + "compactorthreshold.hive.compactor.delta.pct.threshold"="0.5"); + +CREATE TEMPORARY TABLE src_tmp (key int, value string); + +CREATE TABLE moretypes (a decimal(10,2), b tinyint, c smallint, d int, e bigint, f varchar(10), g char(3)); + +show grant user hive_test_user; + +source ../../metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql; + +use sys; + +select bucket_col_name, integer_idx from bucketing_cols order by bucket_col_name, integer_idx limit 5; + +select count(*) from cds; + +select column_name, type_name, integer_idx from columns_v2 order by column_name, integer_idx limit 5; + +select param_key, param_value from database_params order by param_key, param_value limit 5; + +select db_location_uri, name, owner_name, owner_type from dbs order by name; + +select grantor, principal_name from db_privs order by grantor, principal_name limit 5; + +select grantor, principal_name from global_privs order by grantor, principal_name limit 5; + +select index_name, index_handler_class from idxs order by index_name limit 5; + +select param_key, param_value from index_params order by param_key, param_value limit 5; + +select part_name from partitions order by part_name limit 5; + +select pkey_name, pkey_type from partition_keys order by pkey_name limit 5; + +select part_key_val, integer_idx from partition_key_vals order by part_key_val, integer_idx limit 5; + +select param_key, param_value from partition_params order by param_key, param_value limit 5; + +select grantor, principal_name from part_col_privs order by grantor, principal_name limit 5; + +select grantor, principal_name from part_privs order by grantor, principal_name limit 5; + +select role_name from roles order by role_name limit 5; + +select principal_name, grantor from role_map order by principal_name, grantor limit 5; + +select count(*) from sds; + +select param_key, param_value from sd_params order by param_key, param_value limit 5; + +select sequence_name from sequence_table order by sequence_name limit 5; + +select name, slib from serdes order by name, slib limit 5; + +select param_key, param_value from serde_params order by param_key, param_value limit 5; + +select skewed_col_name from skewed_col_names order by skewed_col_name limit 5; + +select count(*) from skewed_col_value_loc_map; + +select count(*) from skewed_string_list; + +select count(*) from skewed_string_list_values; + +select count(*) from skewed_values; + +select column_name, `order` from sort_cols order by column_name limit 5; + +select param_key, param_value from table_params order by param_key, param_value limit 5; + +select tbl_name from tbls order by tbl_name limit 5; + +select column_name, grantor, principal_name from tbl_col_privs order by column_name, principal_name limit 5; + +select grantor, principal_name from tbl_privs order by grantor, principal_name limit 5; + +select table_name, column_name, num_nulls, num_distincts from tab_col_stats order by table_name, column_name limit 10; + +select table_name, partition_name, column_name, num_nulls, num_distincts from part_col_stats order by table_name, partition_name, column_name limit 10; + +select schema_version from version order by schema_version limit 5; + +select func_name, func_type from funcs order by func_name, func_type limit 5; + +select constraint_name from key_constraints order by constraint_name limit 5; + +use INFORMATION_SCHEMA; + +select count(*) from SCHEMATA; + +select * from TABLES order by TABLE_SCHEMA, TABLE_NAME; + +select * from TABLE_PRIVILEGES order by GRANTOR, GRANTEE, TABLE_SCHEMA, TABLE_NAME limit 10; + +select * from COLUMNS where TABLE_NAME = 'alltypesorc' or TABLE_NAME = 'moretypes' order by TABLE_SCHEMA, TABLE_NAME, ORDINAL_POSITION ; + +select * from COLUMN_PRIVILEGES order by GRANTOR, GRANTEE, TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME limit 10; + +select TABLE_SCHEMA, TABLE_NAME from views order by TABLE_SCHEMA, TABLE_NAME; http://git-wip-us.apache.org/repos/asf/hive/blob/77f44b66/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out b/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out index 483b7f9..7c428e8 100644 --- a/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out +++ b/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out @@ -8,12 +8,9 @@ type STRING, ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' TBLPROPERTIES ( -"hive.sql.database.type" = "DERBY", -"hive.sql.jdbc.url" = "jdbc:derby:;databaseName=${test.tmp.dir}/junit_metastore_db;create=true", -"hive.sql.jdbc.driver" = "org.apache.derby.jdbc.EmbeddedDriver", +"hive.sql.database.type" = "METASTORE", "hive.sql.query" = "SELECT TBL_ID, DB_ID, TBL_NAME, TBL_TYPE, OWNER FROM TBLS", #### A masked pattern was here #### -"hive.sql.dbcp.maxActive" = "1" ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -28,58 +25,47 @@ type STRING, ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' TBLPROPERTIES ( -"hive.sql.database.type" = "DERBY", -"hive.sql.jdbc.url" = "jdbc:derby:;databaseName=${test.tmp.dir}/junit_metastore_db;create=true", -"hive.sql.jdbc.driver" = "org.apache.derby.jdbc.EmbeddedDriver", +"hive.sql.database.type" = "METASTORE", "hive.sql.query" = "SELECT TBL_ID, DB_ID, TBL_NAME, TBL_TYPE, OWNER FROM TBLS", #### A masked pattern was here #### -"hive.sql.dbcp.maxActive" = "1" ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tables PREHOOK: query: CREATE EXTERNAL TABLE dbs ( -id int, -name STRING +DB_ID int, +NAME STRING ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' TBLPROPERTIES ( -"hive.sql.database.type" = "DERBY", -"hive.sql.jdbc.url" = "jdbc:derby:;databaseName=${test.tmp.dir}/junit_metastore_db;create=true", -"hive.sql.jdbc.driver" = "org.apache.derby.jdbc.EmbeddedDriver", -"hive.sql.query" = "SELECT DB_ID, NAME FROM DBS", -"hive.sql.column.mapping" = "id=DB_ID, name=NAME", -"hive.sql.dbcp.maxActive" = "1" +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = "SELECT DB_ID, NAME FROM DBS" ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dbs POSTHOOK: query: CREATE EXTERNAL TABLE dbs ( -id int, -name STRING +DB_ID int, +NAME STRING ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' TBLPROPERTIES ( -"hive.sql.database.type" = "DERBY", -"hive.sql.jdbc.url" = "jdbc:derby:;databaseName=${test.tmp.dir}/junit_metastore_db;create=true", -"hive.sql.jdbc.driver" = "org.apache.derby.jdbc.EmbeddedDriver", -"hive.sql.query" = "SELECT DB_ID, NAME FROM DBS", -"hive.sql.column.mapping" = "id=DB_ID, name=NAME", -"hive.sql.dbcp.maxActive" = "1" +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = "SELECT DB_ID, NAME FROM DBS" ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dbs -PREHOOK: query: select tables.name as tn, dbs.name as dn, tables.type as t -from tables join dbs on (tables.db_id = dbs.id) order by tn, dn, t +PREHOOK: query: select tables.name as tn, dbs.NAME as dn, tables.type as t +from tables join dbs on (tables.db_id = dbs.DB_ID) order by tn, dn, t PREHOOK: type: QUERY PREHOOK: Input: default@dbs PREHOOK: Input: default@tables #### A masked pattern was here #### -POSTHOOK: query: select tables.name as tn, dbs.name as dn, tables.type as t -from tables join dbs on (tables.db_id = dbs.id) order by tn, dn, t +POSTHOOK: query: select tables.name as tn, dbs.NAME as dn, tables.type as t +from tables join dbs on (tables.db_id = dbs.DB_ID) order by tn, dn, t POSTHOOK: type: QUERY POSTHOOK: Input: default@dbs POSTHOOK: Input: default@tables