hive git commit: HIVE-19323 Create metastore SQL install and upgrade scripts for 3.1 (Alan Gates reviewed by Vihang Karajgaonkar)

2018-05-29 Thread gates
Repository: hive
Updated Branches:
  refs/heads/branch-3 2334a0ddf -> 6d31e4d54


HIVE-19323 Create metastore SQL install and upgrade scripts for 3.1 (Alan Gates 
reviewed by Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6d31e4d5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6d31e4d5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6d31e4d5

Branch: refs/heads/branch-3
Commit: 6d31e4d54c9bc5c4df35b17efe8f83d63a624b07
Parents: 2334a0d
Author: Alan Gates 
Authored: Tue May 29 16:58:47 2018 -0700
Committer: Alan Gates 
Committed: Tue May 29 16:58:47 2018 -0700

--
 itests/hive-unit/pom.xml   |  2 +-
 packaging/src/main/assembly/bin.xml|  3 +++
 .../src/main/sql/mssql/hive-schema-3.1.0.mssql.sql |  4 ++--
 .../main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql|  2 +-
 .../src/main/sql/mysql/hive-schema-3.1.0.mysql.sql |  2 +-
 .../src/main/sql/oracle/hive-schema-3.1.0.oracle.sql   |  2 +-
 .../main/sql/oracle/upgrade-3.0.0-to-3.1.0.oracle.sql  |  3 +--
 .../main/sql/postgres/hive-schema-3.1.0.postgres.sql   |  4 ++--
 .../sql/postgres/upgrade-3.0.0-to-3.1.0.postgres.sql   | 13 ++---
 9 files changed, 18 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6d31e4d5/itests/hive-unit/pom.xml
--
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index 26e423c..bd8ea60 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -561,7 +561,7 @@
 
   
 
-
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hive/blob/6d31e4d5/packaging/src/main/assembly/bin.xml
--
diff --git a/packaging/src/main/assembly/bin.xml 
b/packaging/src/main/assembly/bin.xml
index 5d934ac..b7f6809 100644
--- a/packaging/src/main/assembly/bin.xml
+++ b/packaging/src/main/assembly/bin.xml
@@ -214,6 +214,9 @@
   
 **/*
   
+  
+**/upgrade.order.* 
+  
   scripts/metastore/upgrade
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6d31e4d5/standalone-metastore/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql
--
diff --git 
a/standalone-metastore/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql 
b/standalone-metastore/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql
index 5b52320..1bb3c1a 100644
--- a/standalone-metastore/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql
@@ -763,7 +763,7 @@ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT 
PART_COL_PRIVS_FK1 FOREIGN KEY (PART_I
 
 CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
 
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS 
(AUTHORIZE,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS 
(AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
 
 
 -- Constraints for table DB_PRIVS for class(es) 
[org.apache.hadoop.hive.metastore.model.MDBPrivilege]
@@ -1249,4 +1249,4 @@ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON 
RUNTIME_STATS(CREATE_TIME);
 -- -
 -- Record schema version. Should be the last step in the init script
 -- -
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, 
'3.0.0', 'Hive release version 3.0.0');
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, 
'3.1.0', 'Hive release version 3.1.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/6d31e4d5/standalone-metastore/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql
--
diff --git 
a/standalone-metastore/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql 
b/standalone-metastore/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql
index 4a35426..d3f2794 100644
--- a/standalone-metastore/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql
@@ -23,7 +23,7 @@ CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS 
(AUTHORIZER,TBL_ID,"COLU
 
 ALTER TABLE PART_COL_PRIVS ADD AUTHORIZER nvarchar(128) NULL;
 DROP INDEX PART_COL_PRIVS.PARTITIONCOLUMNPRIVILEGEINDEX;
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS 

[2/4] hive git commit: HIVE-19323 Create metastore SQL install and upgrade scripts for 3.1 and 4.0 (Alan Gates, reviewed by Vihang Karajgaonkar)

2018-05-29 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/0cb2a6cd/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
--
diff --git 
a/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql 
b/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
new file mode 100644
index 000..2877c79
--- /dev/null
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@ -0,0 +1,1147 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY 
(SEQUENCE_NAME);
+
+INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES 
('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY 
(CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes 
[org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+"COLUMN_NAME" VARCHAR2(767) NULL,
+CREATE_TIME NUMBER (10) NOT NULL,
+GRANT_OPTION NUMBER (5) NOT NULL,
+GRANTOR VARCHAR2(128) NULL,
+GRANTOR_TYPE VARCHAR2(128) NULL,
+PART_ID NUMBER NULL,
+PRINCIPAL_NAME VARCHAR2(128) NULL,
+PRINCIPAL_TYPE VARCHAR2(128) NULL,
+PART_COL_PRIV VARCHAR2(128) NULL,
+AUTHORIZER VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY 
(PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+CD_ID NUMBER NOT NULL,
+"COMMENT" VARCHAR2(256) NULL,
+"COLUMN_NAME" VARCHAR2(767) NOT NULL,
+TYPE_NAME CLOB NOT NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY 
(CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+PART_ID NUMBER NOT NULL,
+PART_KEY_VAL VARCHAR2(256) NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY 
KEY (PART_ID,INTEGER_IDX);
+
+CREATE TABLE CTLGS (
+CTLG_ID NUMBER PRIMARY KEY,
+"NAME" VARCHAR2(256),
+"DESC" VARCHAR2(4000),
+LOCATION_URI VARCHAR2(4000) NOT NULL,
+UNIQUE ("NAME")
+);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+DB_ID NUMBER NOT NULL,
+"DESC" VARCHAR2(4000) NULL,
+DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+"NAME" VARCHAR2(128) NULL,
+OWNER_NAME VARCHAR2(128) NULL,
+OWNER_TYPE VARCHAR2(10) NULL,
+CTLG_NAME VARCHAR2(256)
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+PART_ID NUMBER NOT NULL,
+PARAM_KEY VARCHAR2(256) NOT NULL,
+PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY 
(PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+SERDE_ID NUMBER NOT NULL,
+"NAME" VARCHAR2(128) NULL,
+SLIB VARCHAR2(4000) NULL,
+"DESCRIPTION" VARCHAR2(4000),
+"SERIALIZER_CLASS" VARCHAR2(4000),
+"DESERIALIZER_CLASS" VARCHAR2(4000),
+"SERDE_TYPE" NUMBER
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+TYPES_ID NUMBER NOT NULL,
+TYPE_NAME VARCHAR2(128) NULL,
+TYPE1 VARCHAR2(767) NULL,
+TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+TBL_ID NUMBER NOT NULL,
+PKEY_COMMENT VARCHAR2(4000) NULL,
+PKEY_NAME VARCHAR2(128) NOT NULL,
+PKEY_TYPE VARCHAR2(767) NOT NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE 

[4/4] hive git commit: HIVE-19323 Create metastore SQL install and upgrade scripts for 3.1 and 4.0 (Alan Gates, reviewed by Vihang Karajgaonkar)

2018-05-29 Thread gates
HIVE-19323 Create metastore SQL install and upgrade scripts for 3.1 and 4.0 
(Alan Gates, reviewed by Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0cb2a6cd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0cb2a6cd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0cb2a6cd

Branch: refs/heads/master
Commit: 0cb2a6cd5e097738432ad4ed79f7927cf5a88e6d
Parents: b55b521
Author: Alan Gates 
Authored: Tue May 29 16:51:44 2018 -0700
Committer: Alan Gates 
Committed: Tue May 29 16:51:44 2018 -0700

--
 itests/hive-unit/pom.xml|2 +-
 packaging/src/main/assembly/bin.xml |3 +
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |  692 +++
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |6 +
 .../src/main/sql/derby/upgrade.order.derby  |1 +
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |4 +-
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  | 1252 
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |2 +-
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |6 +
 .../src/main/sql/mssql/upgrade.order.mssql  |1 +
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |2 +-
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  | 1190 
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |6 +
 .../src/main/sql/mysql/upgrade.order.mysql  |1 +
 .../sql/oracle/hive-schema-3.1.0.oracle.sql |2 +-
 .../sql/oracle/hive-schema-4.0.0.oracle.sql | 1147 +++
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql|3 +-
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql|6 +
 .../src/main/sql/oracle/upgrade.order.oracle|1 +
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |4 +-
 .../sql/postgres/hive-schema-4.0.0.postgres.sql | 1835 ++
 .../upgrade-3.0.0-to-3.1.0.postgres.sql |   13 +-
 .../upgrade-3.1.0-to-4.0.0.postgres.sql |6 +
 .../main/sql/postgres/upgrade.order.postgres|1 +
 24 files changed, 6169 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0cb2a6cd/itests/hive-unit/pom.xml
--
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index 933b493..49e9c60 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -567,7 +567,7 @@
 
   
 
-
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hive/blob/0cb2a6cd/packaging/src/main/assembly/bin.xml
--
diff --git a/packaging/src/main/assembly/bin.xml 
b/packaging/src/main/assembly/bin.xml
index 6b2d678..a9557cf 100644
--- a/packaging/src/main/assembly/bin.xml
+++ b/packaging/src/main/assembly/bin.xml
@@ -214,6 +214,9 @@
   
 **/*
   
+  
+**/upgrade.order.* 
+  
   scripts/metastore/upgrade
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0cb2a6cd/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
--
diff --git 
a/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql 
b/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
new file mode 100644
index 000..24740f9
--- /dev/null
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@ -0,0 +1,692 @@
+-- Timestamp: 2011-09-22 15:32:02.024
+-- Source database is: 
/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Connection URL is: 
jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Specified schema is: APP
+-- appendLogs: false
+
+-- --
+-- DDL Statements for functions
+-- --
+
+CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE 
JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 
'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
+
+CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN 
VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL 
DATA CALLED ON NULL INPUT EXTERNAL NAME 
'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
+
+-- --
+-- DDL Statements for tables
+-- --
+CREATE TABLE "APP"."DBS" (
+  "DB_ID" BIGINT NOT NULL,
+  "DESC" VARCHAR(4000),
+  "DB_LOCATION_URI" VARCHAR(4000) NOT NULL,
+  "NAME" VARCHAR(128),
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  "CTLG_NAME" VARCHAR(256) NOT 

[3/4] hive git commit: HIVE-19323 Create metastore SQL install and upgrade scripts for 3.1 and 4.0 (Alan Gates, reviewed by Vihang Karajgaonkar)

2018-05-29 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/0cb2a6cd/standalone-metastore/src/main/sql/mssql/upgrade.order.mssql
--
diff --git a/standalone-metastore/src/main/sql/mssql/upgrade.order.mssql 
b/standalone-metastore/src/main/sql/mssql/upgrade.order.mssql
index 5572c26..4643b0c 100644
--- a/standalone-metastore/src/main/sql/mssql/upgrade.order.mssql
+++ b/standalone-metastore/src/main/sql/mssql/upgrade.order.mssql
@@ -9,3 +9,4 @@
 2.2.0-to-2.3.0
 2.3.0-to-3.0.0
 3.0.0-to-3.1.0
+3.1.0-to-4.0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/0cb2a6cd/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
--
diff --git 
a/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql 
b/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
index 1cca25a..1f04503 100644
--- a/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
@@ -1174,7 +1174,7 @@ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON 
RUNTIME_STATS(CREATE_TIME);
 -- -
 -- Record schema version. Should be the last step in the init script
 -- -
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, 
'3.0.0', 'Hive release version 3.0.0');
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, 
'3.1.0', 'Hive release version 3.1.0');
 
 /*!40101 SET character_set_client = @saved_cs_client */;
 /*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;

http://git-wip-us.apache.org/repos/asf/hive/blob/0cb2a6cd/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
--
diff --git 
a/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql 
b/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
new file mode 100644
index 000..f0d2fa1
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@ -0,0 +1,1190 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhostDatabase: test
+-- --
+-- Server version  5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, 
FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin 
DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` 
(`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,

[1/4] hive git commit: HIVE-19323 Create metastore SQL install and upgrade scripts for 3.1 and 4.0 (Alan Gates, reviewed by Vihang Karajgaonkar)

2018-05-29 Thread gates
Repository: hive
Updated Branches:
  refs/heads/master b55b521c7 -> 0cb2a6cd5


http://git-wip-us.apache.org/repos/asf/hive/blob/0cb2a6cd/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
--
diff --git 
a/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql 
b/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
new file mode 100644
index 000..5f93ae0
--- /dev/null
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@ -0,0 +1,1835 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+"SD_ID" bigint NOT NULL,
+"BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+"INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+"CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+"CD_ID" bigint NOT NULL,
+"COMMENT" character varying(4000),
+"COLUMN_NAME" character varying(767) NOT NULL,
+"TYPE_NAME" text,
+"INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+"DB_ID" bigint NOT NULL,
+"PARAM_KEY" character varying(180) NOT NULL,
+"PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+CREATE TABLE "CTLGS" (
+"CTLG_ID" BIGINT PRIMARY KEY,
+"NAME" VARCHAR(256) UNIQUE,
+"DESC" VARCHAR(4000),
+"LOCATION_URI" VARCHAR(4000) NOT NULL
+);
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+"DB_ID" bigint NOT NULL,
+"DESC" character varying(4000) DEFAULT NULL::character varying,
+"DB_LOCATION_URI" character varying(4000) NOT NULL,
+"NAME" character varying(128) DEFAULT NULL::character varying,
+"OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+"OWNER_TYPE" character varying(10) DEFAULT NULL::character varying,
+"CTLG_NAME" varchar(256)
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+"DB_GRANT_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"DB_ID" bigint,
+"GRANT_OPTION" smallint NOT NULL,
+"GRANTOR" character varying(128) DEFAULT NULL::character varying,
+"GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+"DB_PRIV" character varying(128) DEFAULT NULL::character varying,
+"AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+"USER_GRANT_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"GRANT_OPTION" smallint NOT NULL,
+"GRANTOR" character varying(128) DEFAULT NULL::character varying,
+"GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+"USER_PRIV" character varying(128) DEFAULT NULL::character varying,
+"AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+"INDEX_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"DEFERRED_REBUILD" boolean NOT NULL,
+"INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character 
varying,
+"INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+"INDEX_TBL_ID" bigint,
+"LAST_ACCESS_TIME" bigint NOT NULL,
+"ORIG_TBL_ID" bigint,
+"SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+"INDEX_ID" bigint NOT NULL,
+"PARAM_KEY" character varying(256) NOT NULL,
+"PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+"CLASS_NAME" 

[5/6] hive git commit: HIVE-19308: Provide an Arrow stream reader for external LLAP clients (Eric Wohlstadter, reviewed by Jason Dere)

2018-05-29 Thread vgarg
http://git-wip-us.apache.org/repos/asf/hive/blob/2334a0dd/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowWrapperWritable.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowWrapperWritable.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowWrapperWritable.java
index df7b53f..dd490b1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowWrapperWritable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowWrapperWritable.java
@@ -15,26 +15,32 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.hadoop.hive.ql.io.arrow;
 
 import org.apache.arrow.vector.VectorSchemaRoot;
-import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
-public class ArrowWrapperWritable implements Writable {
+public class ArrowWrapperWritable implements WritableComparable {
   private VectorSchemaRoot vectorSchemaRoot;
 
   public ArrowWrapperWritable(VectorSchemaRoot vectorSchemaRoot) {
 this.vectorSchemaRoot = vectorSchemaRoot;
   }
+  public ArrowWrapperWritable() {}
 
   public VectorSchemaRoot getVectorSchemaRoot() {
 return vectorSchemaRoot;
   }
 
+  public void setVectorSchemaRoot(VectorSchemaRoot vectorSchemaRoot) {
+this.vectorSchemaRoot = vectorSchemaRoot;
+  }
+
   @Override
   public void write(DataOutput dataOutput) throws IOException {
 throw new UnsupportedOperationException();
@@ -44,4 +50,12 @@ public class ArrowWrapperWritable implements Writable {
   public void readFields(DataInput dataInput) throws IOException {
 throw new UnsupportedOperationException();
   }
+
+  @Override public int compareTo(Object o) {
+return 0;
+  }
+
+  @Override public boolean equals(Object o) {
+return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/2334a0dd/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/RootAllocatorFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/RootAllocatorFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/RootAllocatorFactory.java
index 78cc188..7aa732b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/RootAllocatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/RootAllocatorFactory.java
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.hadoop.hive.ql.io.arrow;
 
 import org.apache.arrow.memory.RootAllocator;
@@ -41,4 +42,12 @@ public enum RootAllocatorFactory {
 }
 return rootAllocator;
   }
+
+  //arrowAllocatorLimit is ignored if an allocator was previously created
+  public synchronized RootAllocator getOrCreateRootAllocator(long 
arrowAllocatorLimit) {
+if (rootAllocator == null) {
+  rootAllocator = new RootAllocator(arrowAllocatorLimit);
+}
+return rootAllocator;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/2334a0dd/ql/src/test/org/apache/hadoop/hive/llap/TestLlapOutputFormat.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/llap/TestLlapOutputFormat.java 
b/ql/src/test/org/apache/hadoop/hive/llap/TestLlapOutputFormat.java
index 13a3070..f27cdf4 100644
--- a/ql/src/test/org/apache/hadoop/hive/llap/TestLlapOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/llap/TestLlapOutputFormat.java
@@ -54,6 +54,7 @@ public class TestLlapOutputFormat {
 Configuration conf = new Configuration();
 // Pick random avail port
 HiveConf.setIntVar(conf, 
HiveConf.ConfVars.LLAP_DAEMON_OUTPUT_SERVICE_PORT, 0);
+HiveConf.setBoolVar(conf, HiveConf.ConfVars.LLAP_OUTPUT_FORMAT_ARROW, 
false);
 LlapOutputFormatService.initializeAndStart(conf, null);
 service = LlapOutputFormatService.get();
 LlapProxy.setDaemon(true);



[1/6] hive git commit: HIVE-19306: Arrow batch serializer (Teddy Choi, reviewed by Matt McCline and Eric Wohlstadter (non-binding))

2018-05-29 Thread vgarg
Repository: hive
Updated Branches:
  refs/heads/branch-3 7156df66f -> 2334a0ddf


HIVE-19306: Arrow batch serializer (Teddy Choi, reviewed by Matt McCline and 
Eric Wohlstadter (non-binding))


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0e090e58
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0e090e58
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0e090e58

Branch: refs/heads/branch-3
Commit: 0e090e58772516070e472713422aa8566df81b50
Parents: 7156df6
Author: Matt McCline 
Authored: Thu May 10 16:42:50 2018 -0500
Committer: Vineet Garg 
Committed: Tue May 29 13:56:07 2018 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |5 +
 .../ql/io/arrow/ArrowColumnarBatchSerDe.java| 1179 ++
 .../hive/ql/io/arrow/ArrowWrapperWritable.java  |   47 +
 .../hive/ql/io/arrow/RootAllocatorFactory.java  |   44 +
 .../io/arrow/TestArrowColumnarBatchSerDe.java   |  815 
 serde/pom.xml   |5 +
 6 files changed, 2095 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0e090e58/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 60d5f04..128e892 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2625,6 +2625,11 @@ public class HiveConf extends Configuration {
   "Set to true to ensure that each SQL Merge statement ensures that for 
each row in the target\n" +
 "table there is at most 1 matching row in the source table per SQL 
Specification."),
 
+// For Arrow SerDe
+HIVE_ARROW_ROOT_ALLOCATOR_LIMIT("hive.arrow.root.allocator.limit", 
Long.MAX_VALUE,
+"Arrow root allocator memory size limitation in bytes."),
+HIVE_ARROW_BATCH_SIZE("hive.arrow.batch.size", 1000, "The number of rows 
sent in one Arrow batch."),
+
 // For Druid storage handler
 HIVE_DRUID_INDEXING_GRANULARITY("hive.druid.indexer.segments.granularity", 
"DAY",
 new PatternSet("YEAR", "MONTH", "WEEK", "DAY", "HOUR", "MINUTE", 
"SECOND"),

http://git-wip-us.apache.org/repos/asf/hive/blob/0e090e58/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowColumnarBatchSerDe.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowColumnarBatchSerDe.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowColumnarBatchSerDe.java
new file mode 100644
index 000..330fa58
--- /dev/null
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowColumnarBatchSerDe.java
@@ -0,0 +1,1179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.io.arrow;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import io.netty.buffer.ArrowBuf;
+import org.apache.arrow.memory.BufferAllocator;
+import org.apache.arrow.vector.FieldVector;
+import org.apache.arrow.vector.VectorSchemaRoot;
+import org.apache.arrow.vector.complex.impl.UnionListWriter;
+import org.apache.arrow.vector.complex.impl.UnionReader;
+import org.apache.arrow.vector.complex.impl.UnionWriter;
+import org.apache.arrow.vector.complex.reader.FieldReader;
+import org.apache.arrow.vector.complex.writer.BaseWriter;
+import org.apache.arrow.vector.complex.writer.BigIntWriter;
+import org.apache.arrow.vector.complex.writer.BitWriter;
+import org.apache.arrow.vector.complex.writer.DateDayWriter;
+import org.apache.arrow.vector.complex.writer.DecimalWriter;
+import org.apache.arrow.vector.complex.writer.FieldWriter;
+import org.apache.arrow.vector.complex.writer.Float4Writer;
+import org.apache.arrow.vector.complex.writer.Float8Writer;
+import org.apache.arrow.vector.complex.writer.IntWriter;
+import org.apache.arrow.vector.complex.writer.IntervalDayWriter;
+import 

[2/6] hive git commit: HIVE-19495: Arrow SerDe itest failure (Teddy Choi, reviewed by Matt McCline)

2018-05-29 Thread vgarg
http://git-wip-us.apache.org/repos/asf/hive/blob/2726f302/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Serializer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Serializer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Serializer.java
new file mode 100644
index 000..bd23011
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Serializer.java
@@ -0,0 +1,537 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.io.arrow;
+
+import io.netty.buffer.ArrowBuf;
+import org.apache.arrow.vector.BigIntVector;
+import org.apache.arrow.vector.BitVector;
+import org.apache.arrow.vector.BitVectorHelper;
+import org.apache.arrow.vector.DateDayVector;
+import org.apache.arrow.vector.DecimalVector;
+import org.apache.arrow.vector.FieldVector;
+import org.apache.arrow.vector.Float4Vector;
+import org.apache.arrow.vector.Float8Vector;
+import org.apache.arrow.vector.IntVector;
+import org.apache.arrow.vector.IntervalDayVector;
+import org.apache.arrow.vector.IntervalYearVector;
+import org.apache.arrow.vector.SmallIntVector;
+import org.apache.arrow.vector.TimeStampNanoVector;
+import org.apache.arrow.vector.TinyIntVector;
+import org.apache.arrow.vector.VarBinaryVector;
+import org.apache.arrow.vector.VarCharVector;
+import org.apache.arrow.vector.VectorSchemaRoot;
+import org.apache.arrow.vector.complex.ListVector;
+import org.apache.arrow.vector.complex.MapVector;
+import org.apache.arrow.vector.complex.NullableMapVector;
+import org.apache.arrow.vector.types.Types;
+import org.apache.arrow.vector.types.pojo.ArrowType;
+import org.apache.arrow.vector.types.pojo.FieldType;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAssignRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_ARROW_BATCH_SIZE;
+import static 
org.apache.hadoop.hive.ql.exec.vector.VectorizedBatchUtil.createColumnVector;
+import static 
org.apache.hadoop.hive.ql.io.arrow.ArrowColumnarBatchSerDe.MS_PER_SECOND;
+import static 
org.apache.hadoop.hive.ql.io.arrow.ArrowColumnarBatchSerDe.NS_PER_MS;
+import static 
org.apache.hadoop.hive.ql.io.arrow.ArrowColumnarBatchSerDe.SECOND_PER_DAY;
+import static 
org.apache.hadoop.hive.ql.io.arrow.ArrowColumnarBatchSerDe.toStructListTypeInfo;
+import static 
org.apache.hadoop.hive.ql.io.arrow.ArrowColumnarBatchSerDe.toStructListVector;
+import static 

[3/6] hive git commit: HIVE-19495: Arrow SerDe itest failure (Teddy Choi, reviewed by Matt McCline)

2018-05-29 Thread vgarg
HIVE-19495: Arrow SerDe itest failure (Teddy Choi, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2726f302
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2726f302
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2726f302

Branch: refs/heads/branch-3
Commit: 2726f3028c8963b5a5ae2f8a3bd49f5ae03767a5
Parents: 0e090e5
Author: Teddy Choi 
Authored: Tue May 15 20:44:21 2018 -0500
Committer: Vineet Garg 
Committed: Tue May 29 13:58:01 2018 -0700

--
 .../ql/io/arrow/ArrowColumnarBatchSerDe.java| 990 +--
 .../hadoop/hive/ql/io/arrow/Deserializer.java   | 423 
 .../hadoop/hive/ql/io/arrow/Serializer.java | 537 ++
 .../io/arrow/TestArrowColumnarBatchSerDe.java   | 208 ++--
 4 files changed, 1087 insertions(+), 1071 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2726f302/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowColumnarBatchSerDe.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowColumnarBatchSerDe.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowColumnarBatchSerDe.java
index 330fa58..b093ebb 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowColumnarBatchSerDe.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/ArrowColumnarBatchSerDe.java
@@ -18,78 +18,26 @@
 package org.apache.hadoop.hive.ql.io.arrow;
 
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import io.netty.buffer.ArrowBuf;
 import org.apache.arrow.memory.BufferAllocator;
-import org.apache.arrow.vector.FieldVector;
-import org.apache.arrow.vector.VectorSchemaRoot;
 import org.apache.arrow.vector.complex.impl.UnionListWriter;
-import org.apache.arrow.vector.complex.impl.UnionReader;
-import org.apache.arrow.vector.complex.impl.UnionWriter;
-import org.apache.arrow.vector.complex.reader.FieldReader;
 import org.apache.arrow.vector.complex.writer.BaseWriter;
-import org.apache.arrow.vector.complex.writer.BigIntWriter;
-import org.apache.arrow.vector.complex.writer.BitWriter;
-import org.apache.arrow.vector.complex.writer.DateDayWriter;
-import org.apache.arrow.vector.complex.writer.DecimalWriter;
-import org.apache.arrow.vector.complex.writer.FieldWriter;
-import org.apache.arrow.vector.complex.writer.Float4Writer;
-import org.apache.arrow.vector.complex.writer.Float8Writer;
-import org.apache.arrow.vector.complex.writer.IntWriter;
-import org.apache.arrow.vector.complex.writer.IntervalDayWriter;
-import org.apache.arrow.vector.complex.writer.IntervalYearWriter;
-import org.apache.arrow.vector.complex.writer.SmallIntWriter;
-import org.apache.arrow.vector.complex.writer.TimeStampMilliWriter;
-import org.apache.arrow.vector.complex.writer.TinyIntWriter;
-import org.apache.arrow.vector.complex.writer.VarBinaryWriter;
-import org.apache.arrow.vector.complex.writer.VarCharWriter;
-import org.apache.arrow.vector.holders.NullableBigIntHolder;
-import org.apache.arrow.vector.holders.NullableBitHolder;
-import org.apache.arrow.vector.holders.NullableDateDayHolder;
-import org.apache.arrow.vector.holders.NullableFloat4Holder;
-import org.apache.arrow.vector.holders.NullableFloat8Holder;
-import org.apache.arrow.vector.holders.NullableIntHolder;
-import org.apache.arrow.vector.holders.NullableIntervalDayHolder;
-import org.apache.arrow.vector.holders.NullableIntervalYearHolder;
-import org.apache.arrow.vector.holders.NullableSmallIntHolder;
-import org.apache.arrow.vector.holders.NullableTimeStampMilliHolder;
-import org.apache.arrow.vector.holders.NullableTinyIntHolder;
-import org.apache.arrow.vector.holders.NullableVarBinaryHolder;
-import org.apache.arrow.vector.holders.NullableVarCharHolder;
 import org.apache.arrow.vector.types.TimeUnit;
-import org.apache.arrow.vector.types.Types;
+import org.apache.arrow.vector.types.Types.MinorType;
 import org.apache.arrow.vector.types.pojo.ArrowType;
 import org.apache.arrow.vector.types.pojo.Field;
 import org.apache.arrow.vector.types.pojo.FieldType;
-import org.apache.arrow.vector.types.pojo.Schema;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.type.HiveDecimal;
-import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
-import 

[6/6] hive git commit: HIVE-19308: Provide an Arrow stream reader for external LLAP clients (Eric Wohlstadter, reviewed by Jason Dere)

2018-05-29 Thread vgarg
HIVE-19308: Provide an Arrow stream reader for external LLAP clients (Eric 
Wohlstadter, reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2334a0dd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2334a0dd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2334a0dd

Branch: refs/heads/branch-3
Commit: 2334a0ddfbd1a96d5fa5891a51be57f6cf408789
Parents: f7f90a0
Author: Jason Dere 
Authored: Mon May 21 13:47:43 2018 -0700
Committer: Vineet Garg 
Committed: Tue May 29 13:58:34 2018 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +-
 .../hive/jdbc/AbstractJdbcTriggersTest.java |   4 +-
 .../apache/hive/jdbc/BaseJdbcWithMiniLlap.java  | 615 +++
 .../apache/hive/jdbc/TestJdbcWithMiniLlap.java  | 603 --
 .../hive/jdbc/TestJdbcWithMiniLlapArrow.java| 230 +++
 .../hive/jdbc/TestJdbcWithMiniLlapRow.java  |  45 ++
 .../hadoop/hive/llap/LlapBaseRecordReader.java  | 101 ++-
 .../hadoop/hive/llap/LlapRowRecordReader.java   |  26 +-
 llap-ext-client/pom.xml |   5 +
 .../hive/llap/LlapArrowBatchRecordReader.java   |  82 +++
 .../hive/llap/LlapArrowRowInputFormat.java  |  53 ++
 .../hive/llap/LlapArrowRowRecordReader.java | 107 
 .../hadoop/hive/llap/LlapBaseInputFormat.java   |  27 +-
 pom.xml |   1 +
 .../hive/ql/io/arrow/ArrowWrapperWritable.java  |  18 +-
 .../hive/ql/io/arrow/RootAllocatorFactory.java  |   9 +
 .../hadoop/hive/llap/TestLlapOutputFormat.java  |   1 +
 17 files changed, 1254 insertions(+), 675 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2334a0dd/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 8780374..8347f7f 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -4161,7 +4161,7 @@ public class HiveConf extends Configuration {
 Constants.LLAP_LOGGER_NAME_RFA,
 Constants.LLAP_LOGGER_NAME_CONSOLE),
 "logger used for llap-daemons."),
-LLAP_OUTPUT_FORMAT_ARROW("hive.llap.output.format.arrow", false,
+LLAP_OUTPUT_FORMAT_ARROW("hive.llap.output.format.arrow", true,
   "Whether LLapOutputFormatService should output arrow batches"),
 
 HIVE_TRIGGER_VALIDATION_INTERVAL("hive.trigger.validation.interval", 
"500ms",

http://git-wip-us.apache.org/repos/asf/hive/blob/2334a0dd/itests/hive-unit/src/test/java/org/apache/hive/jdbc/AbstractJdbcTriggersTest.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/AbstractJdbcTriggersTest.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/AbstractJdbcTriggersTest.java
index 17e44bb..7d5172b 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/AbstractJdbcTriggersTest.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/AbstractJdbcTriggersTest.java
@@ -90,7 +90,7 @@ public abstract class AbstractJdbcTriggersTest {
 
   @Before
   public void setUp() throws Exception {
-hs2Conn = TestJdbcWithMiniLlap.getConnection(miniHS2.getJdbcURL(), 
System.getProperty("user.name"), "bar");
+hs2Conn = BaseJdbcWithMiniLlap.getConnection(miniHS2.getJdbcURL(), 
System.getProperty("user.name"), "bar");
   }
 
   @After
@@ -124,7 +124,7 @@ public abstract class AbstractJdbcTriggersTest {
 throws Exception {
 
 Connection con = hs2Conn;
-TestJdbcWithMiniLlap.createTestTable(con, null, tableName, 
kvDataFilePath.toString());
+BaseJdbcWithMiniLlap.createTestTable(con, null, tableName, 
kvDataFilePath.toString());
 createSleepUDF();
 
 final ByteArrayOutputStream baos = new ByteArrayOutputStream();

http://git-wip-us.apache.org/repos/asf/hive/blob/2334a0dd/itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java
new file mode 100644
index 000..11017f6
--- /dev/null
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java
@@ -0,0 +1,615 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache 

[4/6] hive git commit: HIVE-19307: Support ArrowOutputStream in LlapOutputFormatService (Eric Wohlstadter, reviewed by Jason Dere)

2018-05-29 Thread vgarg
HIVE-19307: Support ArrowOutputStream in LlapOutputFormatService (Eric 
Wohlstadter, reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f7f90a04
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f7f90a04
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f7f90a04

Branch: refs/heads/branch-3
Commit: f7f90a044499739a2bd6a3ea543f70cb59e3f870
Parents: 2726f30
Author: Jason Dere 
Authored: Tue May 15 14:25:40 2018 -0700
Committer: Vineet Garg 
Committed: Tue May 29 13:58:16 2018 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   3 +
 .../hadoop/hive/llap/LlapArrowRecordWriter.java |  70 +++
 .../hive/llap/LlapOutputFormatService.java  |  11 +-
 .../hive/llap/WritableByteChannelAdapter.java   | 125 +++
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |  26 ++--
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  28 +++--
 .../hadoop/hive/ql/plan/FileSinkDesc.java   |  12 +-
 7 files changed, 251 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f7f90a04/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 128e892..8780374 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -397,6 +397,7 @@ public class HiveConf extends Configuration {
 llapDaemonVarsSetLocal.add(ConfVars.LLAP_VALIDATE_ACLS.varname);
 llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_LOGGER.varname);
 llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_USE_FQDN.varname);
+llapDaemonVarsSetLocal.add(ConfVars.LLAP_OUTPUT_FORMAT_ARROW.varname);
   }
 
   /**
@@ -4160,6 +4161,8 @@ public class HiveConf extends Configuration {
 Constants.LLAP_LOGGER_NAME_RFA,
 Constants.LLAP_LOGGER_NAME_CONSOLE),
 "logger used for llap-daemons."),
+LLAP_OUTPUT_FORMAT_ARROW("hive.llap.output.format.arrow", false,
+  "Whether LLapOutputFormatService should output arrow batches"),
 
 HIVE_TRIGGER_VALIDATION_INTERVAL("hive.trigger.validation.interval", 
"500ms",
   new TimeValidator(TimeUnit.MILLISECONDS),

http://git-wip-us.apache.org/repos/asf/hive/blob/f7f90a04/ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java 
b/ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java
new file mode 100644
index 000..1b3a3eb
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap;
+
+import java.io.IOException;
+
+import org.apache.arrow.vector.VectorSchemaRoot;
+import org.apache.arrow.vector.ipc.ArrowStreamWriter;
+import org.apache.hadoop.hive.ql.io.arrow.ArrowWrapperWritable;
+import org.apache.hadoop.io.Writable;
+import java.nio.channels.WritableByteChannel;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.apache.hadoop.mapred.Reporter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Writes Arrow batches to an {@link 
org.apache.arrow.vector.ipc.ArrowStreamWriter}.
+ * The byte stream will be formatted according to the Arrow Streaming format.
+ * Because ArrowStreamWriter is bound to a {@link 
org.apache.arrow.vector.VectorSchemaRoot}
+ * when it is created,
+ * calls to the {@link #write(Writable, Writable)} method only serve as a 
signal that
+ * a new batch has been loaded to the associated VectorSchemaRoot.
+ * Payload data for writing is indirectly made available by reference:
+ * ArrowStreamWriter -> VectorSchemaRoot -> List
+ * i.e. both they key and value are ignored once a reference to the 

hive git commit: HIVE-19704 : LLAP IO retries on branch-2 should be stoppable (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

2018-05-29 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/branch-2 da84a1d39 -> d988d4aef


HIVE-19704 : LLAP IO retries on branch-2 should be stoppable (Sergey Shelukhin, 
reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d988d4ae
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d988d4ae
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d988d4ae

Branch: refs/heads/branch-2
Commit: d988d4aef6405b18652cf1b7304f616894c72a8e
Parents: da84a1d
Author: sergey 
Authored: Tue May 29 13:28:42 2018 -0700
Committer: sergey 
Committed: Tue May 29 13:28:42 2018 -0700

--
 .../hadoop/hive/llap/cache/BuddyAllocator.java  | 14 ++--
 .../llap/cache/LowLevelCacheMemoryManager.java  | 23 ++---
 .../hadoop/hive/llap/cache/MemoryManager.java   |  4 ++-
 .../llap/io/encoded/OrcEncodedDataReader.java   | 17 +
 .../llap/io/encoded/SerDeEncodedDataReader.java | 36 +++-
 .../hive/llap/io/metadata/OrcMetadataCache.java | 14 
 .../hive/llap/cache/TestBuddyAllocator.java |  3 +-
 .../llap/cache/TestLowLevelLrfuCachePolicy.java | 12 +++
 .../hive/llap/cache/TestOrcMetadataCache.java   | 16 +
 .../hive/ql/io/orc/encoded/EncodedReader.java   |  3 ++
 .../ql/io/orc/encoded/EncodedReaderImpl.java| 28 ---
 .../ql/io/orc/encoded/StoppableAllocator.java   | 29 
 12 files changed, 150 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d988d4ae/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
index 302918a..af9243a 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.llap.cache;
 
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -44,9 +45,10 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
+import org.apache.hadoop.hive.ql.io.orc.encoded.StoppableAllocator;
 
 public final class BuddyAllocator
-  implements EvictionAwareAllocator, BuddyAllocatorMXBean, LlapOomDebugDump {
+  implements EvictionAwareAllocator, StoppableAllocator, BuddyAllocatorMXBean, 
LlapOomDebugDump {
   private final Arena[] arenas;
   private final AtomicInteger allocatedArenas = new AtomicInteger(0);
 
@@ -183,10 +185,16 @@ public final class BuddyAllocator
 metrics.incrAllocatedArena();
   }
 
-  // TODO: would it make sense to return buffers asynchronously?
+
   @Override
   public void allocateMultiple(MemoryBuffer[] dest, int size)
   throws AllocatorOutOfMemoryException {
+allocateMultiple(dest, size, null);
+  }
+
+  @Override
+  public void allocateMultiple(MemoryBuffer[] dest, int size, AtomicBoolean 
isStopped)
+  throws AllocatorOutOfMemoryException {
 assert size > 0 : "size is " + size;
 if (size > maxAllocation) {
   throw new RuntimeException("Trying to allocate " + size + "; max is " + 
maxAllocation);
@@ -197,7 +205,7 @@ public final class BuddyAllocator
 int allocLog2 = freeListIx + minAllocLog2;
 int allocationSize = 1 << allocLog2;
 // TODO: reserving the entire thing is not ideal before we alloc anything. 
Interleave?
-memoryManager.reserveMemory(dest.length << allocLog2);
+memoryManager.reserveMemory(dest.length << allocLog2, isStopped);
 int destAllocIx = 0;
 for (int i = 0; i < dest.length; ++i) {
   if (dest[i] != null) continue;

http://git-wip-us.apache.org/repos/asf/hive/blob/d988d4ae/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
index e331f1b..e30acb0 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheMemoryManager.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.llap.cache;
 
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 

hive git commit: HIVE-19666 : SQL standard auth for create fn may make an impossible privilege check (branch-2) (Sergey Shelukhin, reviewed by Thejas M Nair)

2018-05-29 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/branch-2 977ea4559 -> da84a1d39


HIVE-19666 : SQL standard auth for create fn may make an impossible privilege 
check (branch-2) (Sergey Shelukhin, reviewed by Thejas M Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/da84a1d3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/da84a1d3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/da84a1d3

Branch: refs/heads/branch-2
Commit: da84a1d39d657d9c1a99ab524b4791740c77d02f
Parents: 977ea45
Author: sergey 
Authored: Tue May 29 13:10:05 2018 -0700
Committer: sergey 
Committed: Tue May 29 13:10:05 2018 -0700

--
 .../sqlstd/SQLStdHiveAuthorizationValidator.java  |  4 
 .../clientpositive/authorization_create_func1.q   |  2 ++
 .../authorization_create_func1.q.out  | 18 ++
 3 files changed, 24 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/da84a1d3/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java
index 2977675..0dac476 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java
@@ -113,6 +113,10 @@ public class SQLStdHiveAuthorizationValidator implements 
HiveAuthorizationValida
   case DFS_URI:
 availPrivs = SQLAuthorizationUtils.getPrivilegesFromFS(new 
Path(hiveObj.getObjectName()),
 conf, userName);
+// For operations like create fn, we require admin privilege from the 
FS but never get it.
+if (privController.isUserAdmin()) {
+  availPrivs.addPrivilege(SQLPrivTypeGrant.ADMIN_PRIV);
+}
 break;
   case PARTITION:
 // sql std authorization is managing privileges at the table/view 
levels

http://git-wip-us.apache.org/repos/asf/hive/blob/da84a1d3/ql/src/test/queries/clientpositive/authorization_create_func1.q
--
diff --git a/ql/src/test/queries/clientpositive/authorization_create_func1.q 
b/ql/src/test/queries/clientpositive/authorization_create_func1.q
index 6c7ebc7..f9f77ce 100644
--- a/ql/src/test/queries/clientpositive/authorization_create_func1.q
+++ b/ql/src/test/queries/clientpositive/authorization_create_func1.q
@@ -9,6 +9,8 @@ set role ADMIN;
 
 create temporary function temp_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii';
 create function perm_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii';
+create function perm_fn_using as 'IdentityStringUDF' using jar 
'../../data/files/identity_udf.jar';
 
 drop temporary function temp_fn;
 drop function perm_fn;
+drop function perm_fn_using;

http://git-wip-us.apache.org/repos/asf/hive/blob/da84a1d3/ql/src/test/results/clientpositive/authorization_create_func1.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/authorization_create_func1.q.out 
b/ql/src/test/results/clientpositive/authorization_create_func1.q.out
index d7de21a..3e60e4d 100644
--- a/ql/src/test/results/clientpositive/authorization_create_func1.q.out
+++ b/ql/src/test/results/clientpositive/authorization_create_func1.q.out
@@ -16,6 +16,16 @@ POSTHOOK: query: create function perm_fn as 
'org.apache.hadoop.hive.ql.udf.UDFAs
 POSTHOOK: type: CREATEFUNCTION
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default.perm_fn
+PREHOOK: query: create function perm_fn_using as 'IdentityStringUDF' using jar 
'../../data/files/identity_udf.jar'
+PREHOOK: type: CREATEFUNCTION
+PREHOOK: Output: database:default
+PREHOOK: Output: default.perm_fn_using
+ A masked pattern was here 
+POSTHOOK: query: create function perm_fn_using as 'IdentityStringUDF' using 
jar '../../data/files/identity_udf.jar'
+POSTHOOK: type: CREATEFUNCTION
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default.perm_fn_using
+ A masked pattern was here 
 PREHOOK: query: drop temporary function temp_fn
 PREHOOK: type: DROPFUNCTION
 PREHOOK: Output: temp_fn
@@ -30,3 +40,11 @@ POSTHOOK: query: drop function perm_fn
 POSTHOOK: type: DROPFUNCTION
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default.perm_fn
+PREHOOK: query: drop function perm_fn_using
+PREHOOK: type: DROPFUNCTION
+PREHOOK: Output: database:default
+PREHOOK: Output: default.perm_fn_using
+POSTHOOK: query: drop 

hive git commit: HIVE-19576 IHMSHandler.getTable not always fetching the right catalog (Alan Gates, reviewed by Daniel Dai)

2018-05-29 Thread gates
Repository: hive
Updated Branches:
  refs/heads/branch-3.0 2073cdebe -> 84f18e4ea


HIVE-19576 IHMSHandler.getTable not always fetching the right catalog (Alan 
Gates, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/84f18e4e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/84f18e4e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/84f18e4e

Branch: refs/heads/branch-3.0
Commit: 84f18e4eabdc33812e320e3440912425f45f5423
Parents: 2073cde
Author: Alan Gates 
Authored: Tue May 29 12:21:32 2018 -0700
Committer: Alan Gates 
Committed: Tue May 29 12:31:09 2018 -0700

--
 .../authorization/AuthorizationPreEventListener.java |  4 +++-
 .../apache/hadoop/hive/metastore/IHMSHandler.java| 15 ---
 2 files changed, 3 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/84f18e4e/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
index 16efb72..2cc057e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
@@ -464,8 +464,10 @@ public class AuthorizationPreEventListener extends 
MetaStorePreEventListener {
 public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition 
mapiPart,
 PreEventContext context) throws HiveException, NoSuchObjectException, 
MetaException {
   org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = 
mapiPart.deepCopy();
+  String catName = mapiPart.isSetCatName() ? mapiPart.getCatName() :
+  MetaStoreUtils.getDefaultCatalog(context.getHandler().getConf());
   org.apache.hadoop.hive.metastore.api.Table t = 
context.getHandler().get_table_core(
-  mapiPart.getDbName(), mapiPart.getTableName());
+  catName, mapiPart.getDbName(), mapiPart.getTableName());
   if (wrapperApiPart.getSd() == null){
 // In the cases of create partition, by the time this event fires, the 
partition
 // object has not yet come into existence, and thus will not yet have a

http://git-wip-us.apache.org/repos/asf/hive/blob/84f18e4e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 1a81dc9..29c98d1 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@ -91,21 +91,6 @@ public interface IHMSHandler extends 
ThriftHiveMetastore.Iface, Configurable {
   throws MetaException, NoSuchObjectException;
 
   /**
-   * Equivalent of get_table, but does not log audits and fire pre-event 
listener.
-   * Meant to be used for calls made by other hive classes, that are not using 
the
-   * thrift interface.  Uses the configured catalog.
-   * @param dbName database name
-   * @param name table name
-   * @return Table object
-   * @throws NoSuchObjectException If the table does not exist.
-   * @throws MetaException  If another error occurs.
-   */
-  default Table get_table_core(final String dbName, final String name)
-  throws MetaException, NoSuchObjectException {
-return get_table_core(MetaStoreUtils.getDefaultCatalog(getConf()), dbName, 
name);
-  }
-
-  /**
* Get a list of all transactional listeners.
* @return list of listeners.
*/



hive git commit: HIVE-19576 IHMSHandler.getTable not always fetching the right catalog (Alan Gates, reviewed by Daniel Dai)

2018-05-29 Thread gates
Repository: hive
Updated Branches:
  refs/heads/branch-3 80eafb4fa -> 7156df66f


HIVE-19576 IHMSHandler.getTable not always fetching the right catalog (Alan 
Gates, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7156df66
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7156df66
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7156df66

Branch: refs/heads/branch-3
Commit: 7156df66fcf05549c2c0c7f5cbfd6539cf035f16
Parents: 80eafb4
Author: Alan Gates 
Authored: Tue May 29 12:21:32 2018 -0700
Committer: Alan Gates 
Committed: Tue May 29 12:23:13 2018 -0700

--
 .../authorization/AuthorizationPreEventListener.java |  4 +++-
 .../apache/hadoop/hive/metastore/IHMSHandler.java| 15 ---
 2 files changed, 3 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/7156df66/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
index 16efb72..2cc057e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
@@ -464,8 +464,10 @@ public class AuthorizationPreEventListener extends 
MetaStorePreEventListener {
 public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition 
mapiPart,
 PreEventContext context) throws HiveException, NoSuchObjectException, 
MetaException {
   org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = 
mapiPart.deepCopy();
+  String catName = mapiPart.isSetCatName() ? mapiPart.getCatName() :
+  MetaStoreUtils.getDefaultCatalog(context.getHandler().getConf());
   org.apache.hadoop.hive.metastore.api.Table t = 
context.getHandler().get_table_core(
-  mapiPart.getDbName(), mapiPart.getTableName());
+  catName, mapiPart.getDbName(), mapiPart.getTableName());
   if (wrapperApiPart.getSd() == null){
 // In the cases of create partition, by the time this event fires, the 
partition
 // object has not yet come into existence, and thus will not yet have a

http://git-wip-us.apache.org/repos/asf/hive/blob/7156df66/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 1a81dc9..29c98d1 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@ -91,21 +91,6 @@ public interface IHMSHandler extends 
ThriftHiveMetastore.Iface, Configurable {
   throws MetaException, NoSuchObjectException;
 
   /**
-   * Equivalent of get_table, but does not log audits and fire pre-event 
listener.
-   * Meant to be used for calls made by other hive classes, that are not using 
the
-   * thrift interface.  Uses the configured catalog.
-   * @param dbName database name
-   * @param name table name
-   * @return Table object
-   * @throws NoSuchObjectException If the table does not exist.
-   * @throws MetaException  If another error occurs.
-   */
-  default Table get_table_core(final String dbName, final String name)
-  throws MetaException, NoSuchObjectException {
-return get_table_core(MetaStoreUtils.getDefaultCatalog(getConf()), dbName, 
name);
-  }
-
-  /**
* Get a list of all transactional listeners.
* @return list of listeners.
*/



hive git commit: HIVE-19576 IHMSHandler.getTable not always fetching the right catalog (Alan Gates, reviewed by Daniel Dai)

2018-05-29 Thread gates
Repository: hive
Updated Branches:
  refs/heads/master 83afdb4d5 -> b55b521c7


HIVE-19576 IHMSHandler.getTable not always fetching the right catalog (Alan 
Gates, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b55b521c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b55b521c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b55b521c

Branch: refs/heads/master
Commit: b55b521c7d0a3c7e0049fc5d5803647c31c28918
Parents: 83afdb4
Author: Alan Gates 
Authored: Tue May 29 12:21:32 2018 -0700
Committer: Alan Gates 
Committed: Tue May 29 12:21:32 2018 -0700

--
 .../authorization/AuthorizationPreEventListener.java |  4 +++-
 .../apache/hadoop/hive/metastore/IHMSHandler.java| 15 ---
 2 files changed, 3 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b55b521c/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
index 16efb72..2cc057e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
@@ -464,8 +464,10 @@ public class AuthorizationPreEventListener extends 
MetaStorePreEventListener {
 public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition 
mapiPart,
 PreEventContext context) throws HiveException, NoSuchObjectException, 
MetaException {
   org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = 
mapiPart.deepCopy();
+  String catName = mapiPart.isSetCatName() ? mapiPart.getCatName() :
+  MetaStoreUtils.getDefaultCatalog(context.getHandler().getConf());
   org.apache.hadoop.hive.metastore.api.Table t = 
context.getHandler().get_table_core(
-  mapiPart.getDbName(), mapiPart.getTableName());
+  catName, mapiPart.getDbName(), mapiPart.getTableName());
   if (wrapperApiPart.getSd() == null){
 // In the cases of create partition, by the time this event fires, the 
partition
 // object has not yet come into existence, and thus will not yet have a

http://git-wip-us.apache.org/repos/asf/hive/blob/b55b521c/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 1a81dc9..29c98d1 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@ -91,21 +91,6 @@ public interface IHMSHandler extends 
ThriftHiveMetastore.Iface, Configurable {
   throws MetaException, NoSuchObjectException;
 
   /**
-   * Equivalent of get_table, but does not log audits and fire pre-event 
listener.
-   * Meant to be used for calls made by other hive classes, that are not using 
the
-   * thrift interface.  Uses the configured catalog.
-   * @param dbName database name
-   * @param name table name
-   * @return Table object
-   * @throws NoSuchObjectException If the table does not exist.
-   * @throws MetaException  If another error occurs.
-   */
-  default Table get_table_core(final String dbName, final String name)
-  throws MetaException, NoSuchObjectException {
-return get_table_core(MetaStoreUtils.getDefaultCatalog(getConf()), dbName, 
name);
-  }
-
-  /**
* Get a list of all transactional listeners.
* @return list of listeners.
*/



[5/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/80eafb4f/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index f9bd64b..3f18605 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -810,7 +810,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
   PrivilegeBag privs = new PrivilegeBag();
   privs.addToPrivileges(new HiveObjectPrivilege( new 
HiveObjectRef(HiveObjectType.GLOBAL, null,
 null, null, null), ADMIN, PrincipalType.ROLE, new 
PrivilegeGrantInfo("All", 0, ADMIN,
-PrincipalType.ROLE, true)));
+  PrincipalType.ROLE, true), "SQL"));
   try {
 ms.grantPrivileges(privs);
   } catch (InvalidObjectException e) {
@@ -6242,14 +6242,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 }
 
 @Override
-public GrantRevokePrivilegeResponse refresh_privileges(HiveObjectRef 
objToRefresh,
+public GrantRevokePrivilegeResponse refresh_privileges(HiveObjectRef 
objToRefresh, String authorizer,
 GrantRevokePrivilegeRequest grantRequest)
 throws TException {
   incrementCounter("refresh_privileges");
   firePreEvent(new PreAuthorizationCallEvent(this));
   GrantRevokePrivilegeResponse response = new 
GrantRevokePrivilegeResponse();
   try {
-boolean result = getMS().refreshPrivileges(objToRefresh, 
grantRequest.getPrivileges());
+boolean result = getMS().refreshPrivileges(objToRefresh, authorizer, 
grantRequest.getPrivileges());
 response.setSuccess(result);
   } catch (MetaException e) {
 throw e;

http://git-wip-us.apache.org/repos/asf/hive/blob/80eafb4f/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 23cf7e4..2965e72 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -2277,7 +2277,7 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
   }
 
   @Override
-  public boolean refresh_privileges(HiveObjectRef objToRefresh,
+  public boolean refresh_privileges(HiveObjectRef objToRefresh, String 
authorizer,
   PrivilegeBag grantPrivileges) throws MetaException,
   TException {
 String defaultCat = getDefaultCatalog(conf);
@@ -2294,7 +2294,7 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
 grantReq.setRequestType(GrantRevokeType.GRANT);
 grantReq.setPrivileges(grantPrivileges);
 
-GrantRevokePrivilegeResponse res = client.refresh_privileges(objToRefresh, 
grantReq);
+GrantRevokePrivilegeResponse res = client.refresh_privileges(objToRefresh, 
authorizer, grantReq);
 if (!res.isSetSuccess()) {
   throw new MetaException("GrantRevokePrivilegeResponse missing success 
field");
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/80eafb4f/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 09f9bb1..7ba286a 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -2567,12 +2567,13 @@ public interface IMetaStoreClient {
 
   /**
* @param revokePrivileges
+   * @param authorizer
* @param objToRefresh
* @return true on success
* @throws MetaException
* @throws TException
*/
-  boolean refresh_privileges(HiveObjectRef objToRefresh, PrivilegeBag 
grantPrivileges)
+  boolean refresh_privileges(HiveObjectRef objToRefresh, String authorizer, 
PrivilegeBag grantPrivileges)
   throws MetaException, TException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/80eafb4f/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
--
diff --git 

[2/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/80eafb4f/standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql
--
diff --git 
a/standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql 
b/standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql
new file mode 100644
index 000..33ccace
--- /dev/null
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql
@@ -0,0 +1,1147 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY 
(SEQUENCE_NAME);
+
+INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES 
('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY 
(CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes 
[org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+"COLUMN_NAME" VARCHAR2(767) NULL,
+CREATE_TIME NUMBER (10) NOT NULL,
+GRANT_OPTION NUMBER (5) NOT NULL,
+GRANTOR VARCHAR2(128) NULL,
+GRANTOR_TYPE VARCHAR2(128) NULL,
+PART_ID NUMBER NULL,
+PRINCIPAL_NAME VARCHAR2(128) NULL,
+PRINCIPAL_TYPE VARCHAR2(128) NULL,
+PART_COL_PRIV VARCHAR2(128) NULL,
+AUTHORIZER VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY 
(PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+CD_ID NUMBER NOT NULL,
+"COMMENT" VARCHAR2(256) NULL,
+"COLUMN_NAME" VARCHAR2(767) NOT NULL,
+TYPE_NAME CLOB NOT NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY 
(CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+PART_ID NUMBER NOT NULL,
+PART_KEY_VAL VARCHAR2(256) NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY 
KEY (PART_ID,INTEGER_IDX);
+
+CREATE TABLE CTLGS (
+CTLG_ID NUMBER PRIMARY KEY,
+"NAME" VARCHAR2(256),
+"DESC" VARCHAR2(4000),
+LOCATION_URI VARCHAR2(4000) NOT NULL,
+UNIQUE ("NAME")
+);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+DB_ID NUMBER NOT NULL,
+"DESC" VARCHAR2(4000) NULL,
+DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+"NAME" VARCHAR2(128) NULL,
+OWNER_NAME VARCHAR2(128) NULL,
+OWNER_TYPE VARCHAR2(10) NULL,
+CTLG_NAME VARCHAR2(256)
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+PART_ID NUMBER NOT NULL,
+PARAM_KEY VARCHAR2(256) NOT NULL,
+PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY 
(PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+SERDE_ID NUMBER NOT NULL,
+"NAME" VARCHAR2(128) NULL,
+SLIB VARCHAR2(4000) NULL,
+"DESCRIPTION" VARCHAR2(4000),
+"SERIALIZER_CLASS" VARCHAR2(4000),
+"DESERIALIZER_CLASS" VARCHAR2(4000),
+"SERDE_TYPE" NUMBER
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+TYPES_ID NUMBER NOT NULL,
+TYPE_NAME VARCHAR2(128) NULL,
+TYPE1 VARCHAR2(767) NULL,
+TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+TBL_ID NUMBER NOT NULL,
+PKEY_COMMENT VARCHAR2(4000) NULL,
+PKEY_NAME VARCHAR2(128) NOT NULL,
+PKEY_TYPE VARCHAR2(767) NOT NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE 

[6/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/80eafb4f/ql/src/test/results/clientpositive/llap/resourceplan.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/resourceplan.q.out 
b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
index 8e58b16..c7e9638 100644
--- a/ql/src/test/results/clientpositive/llap/resourceplan.q.out
+++ b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
@@ -300,6 +300,7 @@ PREHOOK: query: CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `DB_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -315,7 +316,8 @@ TBLPROPERTIES (
   \"GRANTOR_TYPE\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"DB_PRIV\"
+  \"DB_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"DB_PRIVS\""
 )
@@ -332,6 +334,7 @@ POSTHOOK: query: CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `DB_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -347,7 +350,8 @@ TBLPROPERTIES (
   \"GRANTOR_TYPE\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"DB_PRIV\"
+  \"DB_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"DB_PRIVS\""
 )
@@ -363,6 +367,7 @@ PREHOOK: query: CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `USER_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -377,7 +382,8 @@ TBLPROPERTIES (
   \"GRANTOR_TYPE\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"USER_PRIV\"
+  \"USER_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"GLOBAL_PRIVS\""
 )
@@ -393,6 +399,7 @@ POSTHOOK: query: CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `USER_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -407,7 +414,8 @@ TBLPROPERTIES (
   \"GRANTOR_TYPE\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"USER_PRIV\"
+  \"USER_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"GLOBAL_PRIVS\""
 )
@@ -605,6 +613,7 @@ PREHOOK: query: CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` 
(
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `PART_COL_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) 
DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -621,7 +630,8 @@ TBLPROPERTIES (
   \"PART_ID\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"PART_COL_PRIV\"
+  \"PART_COL_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"PART_COL_PRIVS\""
 )
@@ -639,6 +649,7 @@ POSTHOOK: query: CREATE TABLE IF NOT EXISTS 
`PART_COL_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `PART_COL_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) 
DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -655,7 +666,8 @@ TBLPROPERTIES (
   \"PART_ID\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"PART_COL_PRIV\"
+  \"PART_COL_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"PART_COL_PRIVS\""
 )
@@ -672,6 +684,7 @@ PREHOOK: query: CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `PART_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -687,7 +700,8 @@ TBLPROPERTIES (
   \"PART_ID\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"PART_PRIV\"
+  \"PART_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"PART_PRIVS\""
 )
@@ -704,6 +718,7 @@ POSTHOOK: query: CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `PART_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -719,7 +734,8 @@ TBLPROPERTIES (
   \"PART_ID\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"PART_PRIV\"
+  \"PART_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"PART_PRIVS\""
 )
@@ -1485,6 +1501,7 @@ PREHOOK: query: CREATE TABLE IF NOT EXISTS 
`TBL_COL_PRIVS` (
   `PRINCIPAL_TYPE` string,
   `TBL_COL_PRIV` string,
   `TBL_ID` bigint,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_TBL_COL_PRIVS` PRIMARY KEY (`TBL_COLUMN_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -1501,7 +1518,8 @@ TBLPROPERTIES (
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
   \"TBL_COL_PRIV\",
-  \"TBL_ID\"
+  \"TBL_ID\",
+  

[3/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/80eafb4f/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
--
diff --git 
a/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql 
b/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
new file mode 100644
index 000..1cca25a
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
@@ -0,0 +1,1190 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhostDatabase: test
+-- --
+-- Server version  5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, 
FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin 
DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` 
(`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT 
NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` 
(`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+CREATE TABLE `CTLGS` (
+`CTLG_ID` BIGINT PRIMARY KEY,
+`NAME` VARCHAR(256),
+`DESC` VARCHAR(4000),
+`LOCATION_URI` VARCHAR(4000) NOT NULL,
+UNIQUE KEY `UNIQUE_CATALOG` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT 
NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT 
NULL,
+  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT 
NULL,
+  `CTLG_NAME` varchar(256) NOT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`),
+  CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET 

[1/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/branch-3 22c173d67 -> 80eafb4fa


http://git-wip-us.apache.org/repos/asf/hive/blob/80eafb4f/standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql
--
diff --git 
a/standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql 
b/standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql
new file mode 100644
index 000..f6b641b
--- /dev/null
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql
@@ -0,0 +1,1835 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+"SD_ID" bigint NOT NULL,
+"BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+"INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+"CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+"CD_ID" bigint NOT NULL,
+"COMMENT" character varying(4000),
+"COLUMN_NAME" character varying(767) NOT NULL,
+"TYPE_NAME" text,
+"INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+"DB_ID" bigint NOT NULL,
+"PARAM_KEY" character varying(180) NOT NULL,
+"PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+CREATE TABLE "CTLGS" (
+"CTLG_ID" BIGINT PRIMARY KEY,
+"NAME" VARCHAR(256) UNIQUE,
+"DESC" VARCHAR(4000),
+"LOCATION_URI" VARCHAR(4000) NOT NULL
+);
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+"DB_ID" bigint NOT NULL,
+"DESC" character varying(4000) DEFAULT NULL::character varying,
+"DB_LOCATION_URI" character varying(4000) NOT NULL,
+"NAME" character varying(128) DEFAULT NULL::character varying,
+"OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+"OWNER_TYPE" character varying(10) DEFAULT NULL::character varying,
+"CTLG_NAME" varchar(256)
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+"DB_GRANT_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"DB_ID" bigint,
+"GRANT_OPTION" smallint NOT NULL,
+"GRANTOR" character varying(128) DEFAULT NULL::character varying,
+"GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+"DB_PRIV" character varying(128) DEFAULT NULL::character varying,
+"AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+"USER_GRANT_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"GRANT_OPTION" smallint NOT NULL,
+"GRANTOR" character varying(128) DEFAULT NULL::character varying,
+"GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+"USER_PRIV" character varying(128) DEFAULT NULL::character varying,
+"AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+"INDEX_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"DEFERRED_REBUILD" boolean NOT NULL,
+"INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character 
varying,
+"INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+"INDEX_TBL_ID" bigint,
+"LAST_ACCESS_TIME" bigint NOT NULL,
+"ORIG_TBL_ID" bigint,
+"SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+"INDEX_ID" bigint NOT NULL,
+"PARAM_KEY" character varying(256) NOT NULL,
+"PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+"CLASS_NAME" 

[7/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel 
Dai, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/80eafb4f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/80eafb4f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/80eafb4f

Branch: refs/heads/branch-3
Commit: 80eafb4fa83b5f6c326f7213ded9075dce761687
Parents: 22c173d
Author: Daniel Dai 
Authored: Tue May 29 12:07:09 2018 -0700
Committer: Daniel Dai 
Committed: Tue May 29 12:07:09 2018 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |2 -
 .../storagehandler/DummyHCatAuthProvider.java   |7 +
 .../listener/DummyRawStoreFailEvent.java|4 +-
 .../TestHDFSPermissionPolicyProvider.java   |  189 ++
 .../apache/hive/jdbc/TestRestrictedList.java|1 -
 .../TestInformationSchemaWithPrivilege.java |   22 +-
 ...DummyHiveMetastoreAuthorizationProvider.java |8 +-
 .../jdbc/dao/DatabaseAccessorFactory.java   |3 +-
 .../scripts/upgrade/derby/upgrade.order.derby   |1 +
 .../upgrade/hive/hive-schema-3.0.0.hive.sql |   41 +-
 .../scripts/upgrade/mssql/upgrade.order.mssql   |1 +
 .../scripts/upgrade/mysql/upgrade.order.mysql   |1 +
 .../scripts/upgrade/oracle/upgrade.order.oracle |1 +
 .../upgrade/postgres/upgrade.order.postgres |1 +
 pom.xml |2 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |1 +
 .../ql/metadata/SessionHiveMetaStoreClient.java |2 +-
 .../HDFSPermissionPolicyProvider.java   |  120 ++
 .../HiveAuthorizationProviderBase.java  |6 +
 .../HiveMetastoreAuthorizationProvider.java |7 +
 .../authorization/PolicyProviderContainer.java  |   77 +
 .../authorization/PrivilegeSynchonizer.java |   70 +-
 .../StorageBasedAuthorizationProvider.java  |7 +
 .../authorization/plugin/HiveV1Authorizer.java  |   18 +-
 .../plugin/sqlstd/SQLAuthorizationUtils.java|2 +-
 .../generic/GenericUDFCurrentAuthorizer.java|  120 ++
 .../GenericUDFRestrictInformationSchema.java|   16 +-
 .../clientpositive/llap/resourceplan.q.out  |   78 +-
 .../results/clientpositive/show_functions.q.out |2 +
 .../apache/hive/service/server/HiveServer2.java |   37 +-
 standalone-metastore/pom.xml|2 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |   36 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h|   29 +-
 .../ThriftHiveMetastore_server.skeleton.cpp |2 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |   20 +
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   10 +-
 .../hive/metastore/api/HiveObjectPrivilege.java |  112 +-
 .../hive/metastore/api/ThriftHiveMetastore.java |  142 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   |   35 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |   23 +
 .../hive_metastore/ThriftHiveMetastore-remote   |8 +-
 .../hive_metastore/ThriftHiveMetastore.py   |   32 +-
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |   15 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |4 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   14 +-
 .../hadoop/hive/metastore/HiveMetaStore.java|6 +-
 .../hive/metastore/HiveMetaStoreClient.java |4 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |3 +-
 .../hadoop/hive/metastore/ObjectStore.java  |  386 ++--
 .../apache/hadoop/hive/metastore/RawStore.java  |2 +-
 .../hive/metastore/cache/CachedStore.java   |4 +-
 .../builder/HiveObjectPrivilegeBuilder.java |8 +-
 .../hive/metastore/model/MDBPrivilege.java  |   12 +-
 .../hive/metastore/model/MGlobalPrivilege.java  |   12 +-
 .../model/MPartitionColumnPrivilege.java|   12 +-
 .../metastore/model/MPartitionPrivilege.java|   12 +-
 .../metastore/model/MTableColumnPrivilege.java  |   12 +-
 .../hive/metastore/model/MTablePrivilege.java   |   12 +-
 .../src/main/resources/package.jdo  |   24 +
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |  692 +++
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |   28 +
 .../src/main/sql/derby/upgrade.order.derby  |1 +
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  | 1252 
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |   30 +
 .../src/main/sql/mssql/upgrade.order.mssql  |1 +
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  | 1190 
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |   30 +
 .../src/main/sql/mysql/upgrade.order.mysql  |1 +
 .../sql/oracle/hive-schema-3.1.0.oracle.sql | 1147 +++
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql|   31 +
 .../src/main/sql/oracle/upgrade.order.oracle|1 +
 .../sql/postgres/hive-schema-3.1.0.postgres.sql | 1835 ++
 .../upgrade-3.0.0-to-3.1.0.postgres.sql |   33 +
 

[4/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/80eafb4f/standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql
--
diff --git 
a/standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql 
b/standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql
new file mode 100644
index 000..d679658
--- /dev/null
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql
@@ -0,0 +1,692 @@
+-- Timestamp: 2011-09-22 15:32:02.024
+-- Source database is: 
/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Connection URL is: 
jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Specified schema is: APP
+-- appendLogs: false
+
+-- --
+-- DDL Statements for functions
+-- --
+
+CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE 
JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 
'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
+
+CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN 
VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL 
DATA CALLED ON NULL INPUT EXTERNAL NAME 
'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
+
+-- --
+-- DDL Statements for tables
+-- --
+CREATE TABLE "APP"."DBS" (
+  "DB_ID" BIGINT NOT NULL,
+  "DESC" VARCHAR(4000),
+  "DB_LOCATION_URI" VARCHAR(4000) NOT NULL,
+  "NAME" VARCHAR(128),
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  "CTLG_NAME" VARCHAR(256) NOT NULL
+);
+
+CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" 
INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), 
"GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" 
VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" 
VARCHAR(128));
+
+CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" 
VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, 
"COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" 
SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), 
"PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" 
VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128));
+
+CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" 
VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+
+CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" 
VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, 
"INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" 
VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, 
"PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" 
INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" 
VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), 
"PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128), "AUTHORIZER" 
VARCHAR(128));
+
+CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER 
NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" 
VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, 
"LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
+
+CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" 
VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" 
INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" 
VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), 
"SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" 
VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
+
+CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, 
"CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" 
VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" 
VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128), 
"AUTHORIZER" VARCHAR(128));
+
+CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" 
INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), 
"GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" 
VARCHAR(128), "ROLE_ID" BIGINT);
+
+CREATE TABLE 

[6/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/83afdb4d/ql/src/test/results/clientpositive/llap/resourceplan.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/resourceplan.q.out 
b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
index 8e58b16..c7e9638 100644
--- a/ql/src/test/results/clientpositive/llap/resourceplan.q.out
+++ b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
@@ -300,6 +300,7 @@ PREHOOK: query: CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `DB_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -315,7 +316,8 @@ TBLPROPERTIES (
   \"GRANTOR_TYPE\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"DB_PRIV\"
+  \"DB_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"DB_PRIVS\""
 )
@@ -332,6 +334,7 @@ POSTHOOK: query: CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `DB_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -347,7 +350,8 @@ TBLPROPERTIES (
   \"GRANTOR_TYPE\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"DB_PRIV\"
+  \"DB_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"DB_PRIVS\""
 )
@@ -363,6 +367,7 @@ PREHOOK: query: CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `USER_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -377,7 +382,8 @@ TBLPROPERTIES (
   \"GRANTOR_TYPE\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"USER_PRIV\"
+  \"USER_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"GLOBAL_PRIVS\""
 )
@@ -393,6 +399,7 @@ POSTHOOK: query: CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `USER_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -407,7 +414,8 @@ TBLPROPERTIES (
   \"GRANTOR_TYPE\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"USER_PRIV\"
+  \"USER_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"GLOBAL_PRIVS\""
 )
@@ -605,6 +613,7 @@ PREHOOK: query: CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` 
(
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `PART_COL_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) 
DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -621,7 +630,8 @@ TBLPROPERTIES (
   \"PART_ID\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"PART_COL_PRIV\"
+  \"PART_COL_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"PART_COL_PRIVS\""
 )
@@ -639,6 +649,7 @@ POSTHOOK: query: CREATE TABLE IF NOT EXISTS 
`PART_COL_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `PART_COL_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) 
DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -655,7 +666,8 @@ TBLPROPERTIES (
   \"PART_ID\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"PART_COL_PRIV\"
+  \"PART_COL_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"PART_COL_PRIVS\""
 )
@@ -672,6 +684,7 @@ PREHOOK: query: CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `PART_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -687,7 +700,8 @@ TBLPROPERTIES (
   \"PART_ID\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"PART_PRIV\"
+  \"PART_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"PART_PRIVS\""
 )
@@ -704,6 +718,7 @@ POSTHOOK: query: CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
   `PRINCIPAL_NAME` string,
   `PRINCIPAL_TYPE` string,
   `PART_PRIV` string,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -719,7 +734,8 @@ TBLPROPERTIES (
   \"PART_ID\",
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
-  \"PART_PRIV\"
+  \"PART_PRIV\",
+  \"AUTHORIZER\"
 FROM
   \"PART_PRIVS\""
 )
@@ -1485,6 +1501,7 @@ PREHOOK: query: CREATE TABLE IF NOT EXISTS 
`TBL_COL_PRIVS` (
   `PRINCIPAL_TYPE` string,
   `TBL_COL_PRIV` string,
   `TBL_ID` bigint,
+  `AUTHORIZER` string,
   CONSTRAINT `SYS_PK_TBL_COL_PRIVS` PRIMARY KEY (`TBL_COLUMN_GRANT_ID`) DISABLE
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
@@ -1501,7 +1518,8 @@ TBLPROPERTIES (
   \"PRINCIPAL_NAME\",
   \"PRINCIPAL_TYPE\",
   \"TBL_COL_PRIV\",
-  \"TBL_ID\"
+  \"TBL_ID\",
+  

[4/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/83afdb4d/standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql
--
diff --git 
a/standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql 
b/standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql
new file mode 100644
index 000..d679658
--- /dev/null
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-3.1.0.derby.sql
@@ -0,0 +1,692 @@
+-- Timestamp: 2011-09-22 15:32:02.024
+-- Source database is: 
/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Connection URL is: 
jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Specified schema is: APP
+-- appendLogs: false
+
+-- --
+-- DDL Statements for functions
+-- --
+
+CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE 
JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 
'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
+
+CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN 
VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL 
DATA CALLED ON NULL INPUT EXTERNAL NAME 
'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
+
+-- --
+-- DDL Statements for tables
+-- --
+CREATE TABLE "APP"."DBS" (
+  "DB_ID" BIGINT NOT NULL,
+  "DESC" VARCHAR(4000),
+  "DB_LOCATION_URI" VARCHAR(4000) NOT NULL,
+  "NAME" VARCHAR(128),
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  "CTLG_NAME" VARCHAR(256) NOT NULL
+);
+
+CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" 
INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), 
"GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" 
VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" 
VARCHAR(128));
+
+CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" 
VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, 
"COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" 
SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), 
"PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" 
VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128));
+
+CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" 
VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+
+CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" 
VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, 
"INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" 
VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, 
"PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" 
INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" 
VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), 
"PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128), "AUTHORIZER" 
VARCHAR(128));
+
+CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER 
NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" 
VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, 
"LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
+
+CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" 
VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" 
INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" 
VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), 
"SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" 
VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
+
+CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, 
"CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" 
VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" 
VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128), 
"AUTHORIZER" VARCHAR(128));
+
+CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" 
INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), 
"GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" 
VARCHAR(128), "ROLE_ID" BIGINT);
+
+CREATE TABLE 

[1/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master 2811d0af7 -> 83afdb4d5


http://git-wip-us.apache.org/repos/asf/hive/blob/83afdb4d/standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql
--
diff --git 
a/standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql 
b/standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql
new file mode 100644
index 000..f6b641b
--- /dev/null
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql
@@ -0,0 +1,1835 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+"SD_ID" bigint NOT NULL,
+"BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+"INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+"CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+"CD_ID" bigint NOT NULL,
+"COMMENT" character varying(4000),
+"COLUMN_NAME" character varying(767) NOT NULL,
+"TYPE_NAME" text,
+"INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+"DB_ID" bigint NOT NULL,
+"PARAM_KEY" character varying(180) NOT NULL,
+"PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+CREATE TABLE "CTLGS" (
+"CTLG_ID" BIGINT PRIMARY KEY,
+"NAME" VARCHAR(256) UNIQUE,
+"DESC" VARCHAR(4000),
+"LOCATION_URI" VARCHAR(4000) NOT NULL
+);
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+"DB_ID" bigint NOT NULL,
+"DESC" character varying(4000) DEFAULT NULL::character varying,
+"DB_LOCATION_URI" character varying(4000) NOT NULL,
+"NAME" character varying(128) DEFAULT NULL::character varying,
+"OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+"OWNER_TYPE" character varying(10) DEFAULT NULL::character varying,
+"CTLG_NAME" varchar(256)
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+"DB_GRANT_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"DB_ID" bigint,
+"GRANT_OPTION" smallint NOT NULL,
+"GRANTOR" character varying(128) DEFAULT NULL::character varying,
+"GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+"DB_PRIV" character varying(128) DEFAULT NULL::character varying,
+"AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+"USER_GRANT_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"GRANT_OPTION" smallint NOT NULL,
+"GRANTOR" character varying(128) DEFAULT NULL::character varying,
+"GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+"USER_PRIV" character varying(128) DEFAULT NULL::character varying,
+"AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+"INDEX_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"DEFERRED_REBUILD" boolean NOT NULL,
+"INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character 
varying,
+"INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+"INDEX_TBL_ID" bigint,
+"LAST_ACCESS_TIME" bigint NOT NULL,
+"ORIG_TBL_ID" bigint,
+"SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+"INDEX_ID" bigint NOT NULL,
+"PARAM_KEY" character varying(256) NOT NULL,
+"PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+"CLASS_NAME" 

[2/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/83afdb4d/standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql
--
diff --git 
a/standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql 
b/standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql
new file mode 100644
index 000..33ccace
--- /dev/null
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql
@@ -0,0 +1,1147 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY 
(SEQUENCE_NAME);
+
+INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES 
('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY 
(CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes 
[org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+"COLUMN_NAME" VARCHAR2(767) NULL,
+CREATE_TIME NUMBER (10) NOT NULL,
+GRANT_OPTION NUMBER (5) NOT NULL,
+GRANTOR VARCHAR2(128) NULL,
+GRANTOR_TYPE VARCHAR2(128) NULL,
+PART_ID NUMBER NULL,
+PRINCIPAL_NAME VARCHAR2(128) NULL,
+PRINCIPAL_TYPE VARCHAR2(128) NULL,
+PART_COL_PRIV VARCHAR2(128) NULL,
+AUTHORIZER VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY 
(PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+CD_ID NUMBER NOT NULL,
+"COMMENT" VARCHAR2(256) NULL,
+"COLUMN_NAME" VARCHAR2(767) NOT NULL,
+TYPE_NAME CLOB NOT NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY 
(CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+PART_ID NUMBER NOT NULL,
+PART_KEY_VAL VARCHAR2(256) NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY 
KEY (PART_ID,INTEGER_IDX);
+
+CREATE TABLE CTLGS (
+CTLG_ID NUMBER PRIMARY KEY,
+"NAME" VARCHAR2(256),
+"DESC" VARCHAR2(4000),
+LOCATION_URI VARCHAR2(4000) NOT NULL,
+UNIQUE ("NAME")
+);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+DB_ID NUMBER NOT NULL,
+"DESC" VARCHAR2(4000) NULL,
+DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+"NAME" VARCHAR2(128) NULL,
+OWNER_NAME VARCHAR2(128) NULL,
+OWNER_TYPE VARCHAR2(10) NULL,
+CTLG_NAME VARCHAR2(256)
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+PART_ID NUMBER NOT NULL,
+PARAM_KEY VARCHAR2(256) NOT NULL,
+PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY 
(PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+SERDE_ID NUMBER NOT NULL,
+"NAME" VARCHAR2(128) NULL,
+SLIB VARCHAR2(4000) NULL,
+"DESCRIPTION" VARCHAR2(4000),
+"SERIALIZER_CLASS" VARCHAR2(4000),
+"DESERIALIZER_CLASS" VARCHAR2(4000),
+"SERDE_TYPE" NUMBER
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+TYPES_ID NUMBER NOT NULL,
+TYPE_NAME VARCHAR2(128) NULL,
+TYPE1 VARCHAR2(767) NULL,
+TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+TBL_ID NUMBER NOT NULL,
+PKEY_COMMENT VARCHAR2(4000) NULL,
+PKEY_NAME VARCHAR2(128) NOT NULL,
+PKEY_TYPE VARCHAR2(767) NOT NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE 

[3/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/83afdb4d/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
--
diff --git 
a/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql 
b/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
new file mode 100644
index 000..1cca25a
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
@@ -0,0 +1,1190 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhostDatabase: test
+-- --
+-- Server version  5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, 
FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin 
DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` 
(`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT 
NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` 
(`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+CREATE TABLE `CTLGS` (
+`CTLG_ID` BIGINT PRIMARY KEY,
+`NAME` VARCHAR(256),
+`DESC` VARCHAR(4000),
+`LOCATION_URI` VARCHAR(4000) NOT NULL,
+UNIQUE KEY `UNIQUE_CATALOG` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT 
NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT 
NULL,
+  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT 
NULL,
+  `CTLG_NAME` varchar(256) NOT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`),
+  CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET 

[5/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/83afdb4d/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index c1d25db..d8b8414 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -814,7 +814,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
   PrivilegeBag privs = new PrivilegeBag();
   privs.addToPrivileges(new HiveObjectPrivilege( new 
HiveObjectRef(HiveObjectType.GLOBAL, null,
 null, null, null), ADMIN, PrincipalType.ROLE, new 
PrivilegeGrantInfo("All", 0, ADMIN,
-PrincipalType.ROLE, true)));
+  PrincipalType.ROLE, true), "SQL"));
   try {
 ms.grantPrivileges(privs);
   } catch (InvalidObjectException e) {
@@ -6226,14 +6226,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 }
 
 @Override
-public GrantRevokePrivilegeResponse refresh_privileges(HiveObjectRef 
objToRefresh,
+public GrantRevokePrivilegeResponse refresh_privileges(HiveObjectRef 
objToRefresh, String authorizer,
 GrantRevokePrivilegeRequest grantRequest)
 throws TException {
   incrementCounter("refresh_privileges");
   firePreEvent(new PreAuthorizationCallEvent(this));
   GrantRevokePrivilegeResponse response = new 
GrantRevokePrivilegeResponse();
   try {
-boolean result = getMS().refreshPrivileges(objToRefresh, 
grantRequest.getPrivileges());
+boolean result = getMS().refreshPrivileges(objToRefresh, authorizer, 
grantRequest.getPrivileges());
 response.setSuccess(result);
   } catch (MetaException e) {
 throw e;

http://git-wip-us.apache.org/repos/asf/hive/blob/83afdb4d/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 6af2aa5..fd7546e 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -2288,7 +2288,7 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
   }
 
   @Override
-  public boolean refresh_privileges(HiveObjectRef objToRefresh,
+  public boolean refresh_privileges(HiveObjectRef objToRefresh, String 
authorizer,
   PrivilegeBag grantPrivileges) throws MetaException,
   TException {
 String defaultCat = getDefaultCatalog(conf);
@@ -2305,7 +2305,7 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
 grantReq.setRequestType(GrantRevokeType.GRANT);
 grantReq.setPrivileges(grantPrivileges);
 
-GrantRevokePrivilegeResponse res = client.refresh_privileges(objToRefresh, 
grantReq);
+GrantRevokePrivilegeResponse res = client.refresh_privileges(objToRefresh, 
authorizer, grantReq);
 if (!res.isSetSuccess()) {
   throw new MetaException("GrantRevokePrivilegeResponse missing success 
field");
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/83afdb4d/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
--
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 09f9bb1..7ba286a 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -2567,12 +2567,13 @@ public interface IMetaStoreClient {
 
   /**
* @param revokePrivileges
+   * @param authorizer
* @param objToRefresh
* @return true on success
* @throws MetaException
* @throws TException
*/
-  boolean refresh_privileges(HiveObjectRef objToRefresh, PrivilegeBag 
grantPrivileges)
+  boolean refresh_privileges(HiveObjectRef objToRefresh, String authorizer, 
PrivilegeBag grantPrivileges)
   throws MetaException, TException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/83afdb4d/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
--
diff --git 

[7/7] hive git commit: HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel Dai, reviewed by Thejas Nair)

2018-05-29 Thread daijy
HIVE-19440: Make StorageBasedAuthorizer work with information schema (Daniel 
Dai, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/83afdb4d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/83afdb4d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/83afdb4d

Branch: refs/heads/master
Commit: 83afdb4d52d8ee9c6ac4006a1808233609c85298
Parents: 2811d0a
Author: Daniel Dai 
Authored: Tue May 29 12:05:29 2018 -0700
Committer: Daniel Dai 
Committed: Tue May 29 12:05:29 2018 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |2 -
 .../storagehandler/DummyHCatAuthProvider.java   |7 +
 .../listener/DummyRawStoreFailEvent.java|4 +-
 .../TestHDFSPermissionPolicyProvider.java   |  189 ++
 .../apache/hive/jdbc/TestRestrictedList.java|1 -
 .../TestInformationSchemaWithPrivilege.java |   22 +-
 ...DummyHiveMetastoreAuthorizationProvider.java |8 +-
 .../jdbc/dao/DatabaseAccessorFactory.java   |3 +-
 .../scripts/upgrade/derby/upgrade.order.derby   |1 +
 .../upgrade/hive/hive-schema-3.0.0.hive.sql |   41 +-
 .../scripts/upgrade/mssql/upgrade.order.mssql   |1 +
 .../scripts/upgrade/mysql/upgrade.order.mysql   |1 +
 .../scripts/upgrade/oracle/upgrade.order.oracle |1 +
 .../upgrade/postgres/upgrade.order.postgres |1 +
 pom.xml |2 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |1 +
 .../ql/metadata/SessionHiveMetaStoreClient.java |2 +-
 .../HDFSPermissionPolicyProvider.java   |  120 ++
 .../HiveAuthorizationProviderBase.java  |6 +
 .../HiveMetastoreAuthorizationProvider.java |7 +
 .../authorization/PolicyProviderContainer.java  |   77 +
 .../authorization/PrivilegeSynchonizer.java |   70 +-
 .../StorageBasedAuthorizationProvider.java  |7 +
 .../authorization/plugin/HiveV1Authorizer.java  |   18 +-
 .../plugin/sqlstd/SQLAuthorizationUtils.java|2 +-
 .../generic/GenericUDFCurrentAuthorizer.java|  120 ++
 .../GenericUDFRestrictInformationSchema.java|   16 +-
 .../clientpositive/llap/resourceplan.q.out  |   78 +-
 .../results/clientpositive/show_functions.q.out |2 +
 .../apache/hive/service/server/HiveServer2.java |   37 +-
 standalone-metastore/pom.xml|2 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |   36 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h|   29 +-
 .../ThriftHiveMetastore_server.skeleton.cpp |2 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |   20 +
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   10 +-
 .../hive/metastore/api/HiveObjectPrivilege.java |  112 +-
 .../hive/metastore/api/ThriftHiveMetastore.java |  142 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   |   35 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |   23 +
 .../hive_metastore/ThriftHiveMetastore-remote   |8 +-
 .../hive_metastore/ThriftHiveMetastore.py   |   32 +-
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |   15 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |4 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   14 +-
 .../hadoop/hive/metastore/HiveMetaStore.java|6 +-
 .../hive/metastore/HiveMetaStoreClient.java |4 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |3 +-
 .../hadoop/hive/metastore/ObjectStore.java  |  386 ++--
 .../apache/hadoop/hive/metastore/RawStore.java  |2 +-
 .../hive/metastore/cache/CachedStore.java   |4 +-
 .../builder/HiveObjectPrivilegeBuilder.java |8 +-
 .../hive/metastore/model/MDBPrivilege.java  |   12 +-
 .../hive/metastore/model/MGlobalPrivilege.java  |   12 +-
 .../model/MPartitionColumnPrivilege.java|   12 +-
 .../metastore/model/MPartitionPrivilege.java|   12 +-
 .../metastore/model/MTableColumnPrivilege.java  |   12 +-
 .../hive/metastore/model/MTablePrivilege.java   |   12 +-
 .../src/main/resources/package.jdo  |   24 +
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |  692 +++
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |   28 +
 .../src/main/sql/derby/upgrade.order.derby  |1 +
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  | 1252 
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |   30 +
 .../src/main/sql/mssql/upgrade.order.mssql  |1 +
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  | 1190 
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |   30 +
 .../src/main/sql/mysql/upgrade.order.mysql  |1 +
 .../sql/oracle/hive-schema-3.1.0.oracle.sql | 1147 +++
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql|   31 +
 .../src/main/sql/oracle/upgrade.order.oracle|1 +
 .../sql/postgres/hive-schema-3.1.0.postgres.sql | 1835 ++
 .../upgrade-3.0.0-to-3.1.0.postgres.sql |   33 +
 

hive git commit: HIVE-19686 schematool --createCatalog option fails when using Oracle as the RDBMS (Alan Gates, reviewed by Daniel Dai)

2018-05-29 Thread gates
Repository: hive
Updated Branches:
  refs/heads/branch-3.0 6112d57e3 -> 2073cdebe


HIVE-19686 schematool --createCatalog option fails when using Oracle as the 
RDBMS (Alan Gates, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2073cdeb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2073cdeb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2073cdeb

Branch: refs/heads/branch-3.0
Commit: 2073cdebefc458fdae4b06da5e544aabdb964672
Parents: 6112d57
Author: Alan Gates 
Authored: Tue May 29 11:36:33 2018 -0700
Committer: Alan Gates 
Committed: Tue May 29 11:46:57 2018 -0700

--
 .../java/org/apache/hive/beeline/HiveSchemaTool.java   | 13 +++--
 1 file changed, 3 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2073cdeb/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java 
b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index a469cd4..856b0ac 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -34,7 +34,6 @@ import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.HiveMetaException;
 import org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo;
 import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory;
@@ -44,7 +43,6 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper;
 import 
org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo;
 import 
org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser;
-import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -909,12 +907,7 @@ public class HiveSchemaTool {
 return;
   }
 }
-SQLGenerator sqlGenerator = new SQLGenerator(
-DatabaseProduct.determineDatabaseProduct(
-conn.getMetaData().getDatabaseProductName()
-), hiveConf);
-String query = sqlGenerator.addForUpdateClause("select max(" + 
quoteIf("CTLG_ID") + ") " +
-"from " + quoteIf("CTLGS"));
+String query = "select max(" + quoteIf("CTLG_ID") + ") from " + 
quoteIf("CTLGS");
 LOG.debug("Going to run " + query);
 ResultSet rs = stmt.executeQuery(query);
 if (!rs.next()) {
@@ -930,14 +923,14 @@ public class HiveSchemaTool {
 conn.commit();
 success = true;
   }
-} catch (MetaException|SQLException e) {
+} catch (SQLException e) {
   throw new HiveMetaException("Failed to add catalog", e);
 } finally {
   try {
 if (!success) conn.rollback();
   } catch (SQLException e) {
 // Not really much we can do here.
-LOG.error("Failed to rollback, everything will probably go bad from 
here.");
+LOG.error("Failed to rollback, everything will probably go bad from 
here.", e);
   }
 }
   }



[1/2] hive git commit: HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt McCline, reviewed by Teddy Choi)

2018-05-29 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-3 5d3dfc2af -> 22c173d67


http://git-wip-us.apache.org/repos/asf/hive/blob/22c173d6/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out 
b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index d37a27e..c5d0214 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -457,7 +457,7 @@ STAGE PLANS:
 Map Operator Tree:
 TableScan
   alias: decimal_vgby_small
-  Statistics: Num rows: 12289 Data size: 346472 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 12289 Data size: 346462 Basic stats: 
COMPLETE Column stats: NONE
   TableScan Vectorization:
   native: true
   vectorizationSchemaColumns: [0:cdouble:double, 
1:cdecimal1:decimal(11,5)/DECIMAL_64, 2:cdecimal2:decimal(16,0)/DECIMAL_64, 
3:cint:int, 4:ROW__ID:struct]
@@ -468,7 +468,7 @@ STAGE PLANS:
 className: VectorSelectOperator
 native: true
 projectedOutputColumnNums: [1, 2, 3]
-Statistics: Num rows: 12289 Data size: 346472 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 12289 Data size: 346462 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
   aggregations: count(cdecimal1), max(cdecimal1), 
min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), 
min(cdecimal2), sum(cdecimal2), count()
   Group By Vectorization:
@@ -482,7 +482,7 @@ STAGE PLANS:
   keys: cint (type: int)
   mode: hash
   outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9
-  Statistics: Num rows: 12289 Data size: 346472 Basic 
stats: COMPLETE Column stats: NONE
+  Statistics: Num rows: 12289 Data size: 346462 Basic 
stats: COMPLETE Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: int)
 sort order: +
@@ -493,7 +493,7 @@ STAGE PLANS:
 native: true
 nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS 
true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
 valueColumnNums: [1, 2, 3, 4, 5, 6, 7, 8, 9]
-Statistics: Num rows: 12289 Data size: 346472 Basic 
stats: COMPLETE Column stats: NONE
+Statistics: Num rows: 12289 Data size: 346462 Basic 
stats: COMPLETE Column stats: NONE
 value expressions: _col1 (type: bigint), _col2 (type: 
decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: decimal(21,5)), _col5 
(type: bigint), _col6 (type: decimal(16,0)), _col7 (type: decimal(16,0)), _col8 
(type: decimal(26,0)), _col9 (type: bigint)
 Execution mode: vectorized
 Map Vectorization:
@@ -540,14 +540,14 @@ STAGE PLANS:
 keys: KEY._col0 (type: int)
 mode: mergepartial
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col9
-Statistics: Num rows: 6144 Data size: 173221 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 6144 Data size: 173216 Basic stats: 
COMPLETE Column stats: NONE
 Filter Operator
   Filter Vectorization:
   className: VectorFilterOperator
   native: true
   predicateExpression: FilterLongColGreaterLongScalar(col 
9:bigint, val 1)
   predicate: (_col9 > 1L) (type: boolean)
-  Statistics: Num rows: 2048 Data size: 57740 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 2048 Data size: 57738 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
 expressions: _col0 (type: int), _col1 (type: bigint), 
_col2 (type: decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: 
decimal(21,5)), _col5 (type: bigint), _col6 (type: decimal(16,0)), _col7 (type: 
decimal(16,0)), _col8 (type: decimal(26,0))
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
@@ -555,13 +555,13 @@ STAGE PLANS:
 className: VectorSelectOperator
   

[2/2] hive git commit: HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt McCline, reviewed by Teddy Choi)

2018-05-29 Thread mmccline
HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt 
McCline, reviewed by Teddy Choi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/22c173d6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/22c173d6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/22c173d6

Branch: refs/heads/branch-3
Commit: 22c173d67c18ad22a28995d7ab6956ab4ea90fbc
Parents: 5d3dfc2
Author: Matt McCline 
Authored: Tue May 29 13:47:20 2018 -0500
Committer: Matt McCline 
Committed: Tue May 29 13:48:03 2018 -0500

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   4 +
 .../UDAFTemplates/VectorUDAFAvgDecimal.txt  |   2 +-
 .../UDAFTemplates/VectorUDAFAvgDecimalMerge.txt |   2 +-
 .../UDAFTemplates/VectorUDAFMinMaxDecimal.txt   |   2 +-
 .../exec/vector/VectorExpressionDescriptor.java |  72 +--
 .../exec/vector/VectorHashKeyWrapperBatch.java  |   2 +-
 .../ql/exec/vector/VectorizationContext.java|  26 +-
 .../ql/exec/vector/VectorizedBatchUtil.java |   4 +
 .../vector/expressions/CastDateToBoolean.java   |  61 +++
 .../expressions/CastDecimalToDecimal.java   |   2 +-
 .../vector/expressions/CastDoubleToDecimal.java |  15 +-
 .../vector/expressions/CastFloatToDecimal.java  |  65 +++
 .../vector/expressions/CastLongToDecimal.java   |   2 +-
 .../vector/expressions/CastStringToDecimal.java |   2 +-
 .../vector/expressions/CastTimestampToLong.java |  60 ++-
 .../expressions/NullVectorExpression.java   |  56 +++
 .../aggregates/VectorUDAFSumDecimal.java|   2 +-
 .../VectorUDAFSumDecimal64ToDecimal.java|   2 +-
 .../VectorPTFEvaluatorDecimalFirstValue.java|   2 +-
 .../exec/vector/ptf/VectorPTFGroupBatches.java  |   2 +-
 .../hive/ql/plan/ExprNodeGenericFuncDesc.java   |   5 +-
 .../apache/hadoop/hive/ql/udf/UDFToBoolean.java |   4 +-
 .../apache/hadoop/hive/ql/udf/UDFToByte.java|   7 +-
 .../apache/hadoop/hive/ql/udf/UDFToInteger.java |   7 +-
 .../apache/hadoop/hive/ql/udf/UDFToShort.java   |   7 +-
 .../ql/exec/vector/VectorRandomRowSource.java   |  68 ++-
 .../expressions/TestVectorCastStatement.java| 502 +++
 .../vector/expressions/TestVectorTypeCasts.java |   4 +
 .../llap/vector_decimal_aggregate.q.out |   4 +-
 .../clientpositive/spark/timestamp_1.q.out  |  24 +-
 .../clientpositive/spark/timestamp_2.q.out  |  24 +-
 .../clientpositive/spark/timestamp_3.q.out  |   4 +-
 .../spark/vector_decimal_aggregate.q.out|  36 +-
 .../results/clientpositive/timestamp_1.q.out|  24 +-
 .../results/clientpositive/timestamp_2.q.out|  24 +-
 .../results/clientpositive/timestamp_3.q.out|   4 +-
 .../vector_decimal_aggregate.q.out  |  32 +-
 .../apache/hadoop/hive/tools/GenVectorCode.java |   2 -
 38 files changed, 970 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/22c173d6/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 66c2831..fb81ca9 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3540,6 +3540,10 @@ public class HiveConf extends Configuration {
 "1. chosen : use VectorUDFAdaptor for a small set of UDFs that were 
chosen for good performance\n" +
 "2. all: use VectorUDFAdaptor for all UDFs"
 ),
+HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE("hive.test.vectorized.adaptor.override", 
false,
+"internal use only, used to force always using the 
VectorUDFAdaptor.\n" +
+"The default is false, of course",
+true),
 HIVE_VECTORIZATION_PTF_ENABLED("hive.vectorized.execution.ptf.enabled", 
true,
 "This flag should be set to true to enable vectorized mode of the PTF 
of query execution.\n" +
 "The default value is true."),

http://git-wip-us.apache.org/repos/asf/hive/blob/22c173d6/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
--
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt 
b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
index fa72171..f512639 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
@@ -522,7 +522,7 @@ public class  extends VectorAggregateExpression {
 fields[AVERAGE_COUNT_FIELD_INDEX].isNull[batchIndex] = false;
 ((LongColumnVector) fields[AVERAGE_COUNT_FIELD_INDEX]).vector[batchIndex] 
= myagg.count;
 fields[AVERAGE_SUM_FIELD_INDEX].isNull[batchIndex] = false;

hive git commit: HIVE-19686 schematool --createCatalog option fails when using Oracle as the RDBMS (Alan Gates, reviewed by Daniel Dai)

2018-05-29 Thread gates
Repository: hive
Updated Branches:
  refs/heads/branch-3 c7f713d57 -> 5d3dfc2af


HIVE-19686 schematool --createCatalog option fails when using Oracle as the 
RDBMS (Alan Gates, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5d3dfc2a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5d3dfc2a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5d3dfc2a

Branch: refs/heads/branch-3
Commit: 5d3dfc2afc21c7e01be17fef027e23e120c1645a
Parents: c7f713d
Author: Alan Gates 
Authored: Tue May 29 11:36:33 2018 -0700
Committer: Alan Gates 
Committed: Tue May 29 11:38:07 2018 -0700

--
 .../java/org/apache/hive/beeline/HiveSchemaTool.java   | 13 +++--
 1 file changed, 3 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5d3dfc2a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java 
b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index a469cd4..856b0ac 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -34,7 +34,6 @@ import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.HiveMetaException;
 import org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo;
 import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory;
@@ -44,7 +43,6 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper;
 import 
org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo;
 import 
org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser;
-import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -909,12 +907,7 @@ public class HiveSchemaTool {
 return;
   }
 }
-SQLGenerator sqlGenerator = new SQLGenerator(
-DatabaseProduct.determineDatabaseProduct(
-conn.getMetaData().getDatabaseProductName()
-), hiveConf);
-String query = sqlGenerator.addForUpdateClause("select max(" + 
quoteIf("CTLG_ID") + ") " +
-"from " + quoteIf("CTLGS"));
+String query = "select max(" + quoteIf("CTLG_ID") + ") from " + 
quoteIf("CTLGS");
 LOG.debug("Going to run " + query);
 ResultSet rs = stmt.executeQuery(query);
 if (!rs.next()) {
@@ -930,14 +923,14 @@ public class HiveSchemaTool {
 conn.commit();
 success = true;
   }
-} catch (MetaException|SQLException e) {
+} catch (SQLException e) {
   throw new HiveMetaException("Failed to add catalog", e);
 } finally {
   try {
 if (!success) conn.rollback();
   } catch (SQLException e) {
 // Not really much we can do here.
-LOG.error("Failed to rollback, everything will probably go bad from 
here.");
+LOG.error("Failed to rollback, everything will probably go bad from 
here.", e);
   }
 }
   }



hive git commit: HIVE-19686 schematool --createCatalog option fails when using Oracle as the RDBMS (Alan Gates, reviewed by Daniel Dai)

2018-05-29 Thread gates
Repository: hive
Updated Branches:
  refs/heads/master 5ced7bf92 -> 2811d0af7


HIVE-19686 schematool --createCatalog option fails when using Oracle as the 
RDBMS (Alan Gates, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2811d0af
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2811d0af
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2811d0af

Branch: refs/heads/master
Commit: 2811d0af76e5df62fd151edb4ac5e6ccebeb44cd
Parents: 5ced7bf
Author: Alan Gates 
Authored: Tue May 29 11:36:33 2018 -0700
Committer: Alan Gates 
Committed: Tue May 29 11:36:33 2018 -0700

--
 .../java/org/apache/hive/beeline/HiveSchemaTool.java   | 13 +++--
 1 file changed, 3 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2811d0af/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java 
b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index 7aad265..4245fa3 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -34,7 +34,6 @@ import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.HiveMetaException;
 import org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo;
 import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory;
@@ -44,7 +43,6 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper;
 import 
org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo;
 import 
org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser;
-import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -916,12 +914,7 @@ public class HiveSchemaTool {
 return;
   }
 }
-SQLGenerator sqlGenerator = new SQLGenerator(
-DatabaseProduct.determineDatabaseProduct(
-conn.getMetaData().getDatabaseProductName()
-), hiveConf);
-String query = sqlGenerator.addForUpdateClause("select max(" + 
quoteIf("CTLG_ID") + ") " +
-"from " + quoteIf("CTLGS"));
+String query = "select max(" + quoteIf("CTLG_ID") + ") from " + 
quoteIf("CTLGS");
 LOG.debug("Going to run " + query);
 ResultSet rs = stmt.executeQuery(query);
 if (!rs.next()) {
@@ -937,14 +930,14 @@ public class HiveSchemaTool {
 conn.commit();
 success = true;
   }
-} catch (MetaException|SQLException e) {
+} catch (SQLException e) {
   throw new HiveMetaException("Failed to add catalog", e);
 } finally {
   try {
 if (!success) conn.rollback();
   } catch (SQLException e) {
 // Not really much we can do here.
-LOG.error("Failed to rollback, everything will probably go bad from 
here.");
+LOG.error("Failed to rollback, everything will probably go bad from 
here.", e);
   }
 }
   }



hive git commit: HIVE-19577: CREATE TEMPORARY TABLE LIKE and INSERT generate output format mismatch errors (Steve Yeom, reviewed by Jason Dere)

2018-05-29 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/master 068d007b8 -> 5ced7bf92


HIVE-19577: CREATE TEMPORARY TABLE LIKE and INSERT generate output format 
mismatch errors (Steve Yeom, reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5ced7bf9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5ced7bf9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5ced7bf9

Branch: refs/heads/master
Commit: 5ced7bf923c4ec4477ebfd68ff50aa2047be4eae
Parents: 068d007
Author: Jason Dere 
Authored: Tue May 29 10:58:24 2018 -0700
Committer: Jason Dere 
Committed: Tue May 29 10:58:24 2018 -0700

--
 data/files/students.txt | 199 +++
 .../test/resources/testconfiguration.properties |   3 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  12 +-
 ql/src/test/queries/clientpositive/cttl.q   |  29 +++
 .../test/results/clientpositive/llap/cttl.q.out |  78 
 5 files changed, 315 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5ced7bf9/data/files/students.txt
--
diff --git a/data/files/students.txt b/data/files/students.txt
new file mode 100644
index 000..f2d8a2d
--- /dev/null
+++ b/data/files/students.txt
@@ -0,0 +1,199 @@
+ulysses thompson   64  1.90
+katie carson   25  3.65
+luke king  65  0.73
+holly davidson 57  2.43
+fred miller55  3.77
+holly white43  0.24
+luke steinbeck 51  1.14
+nick underhill 31  2.46
+holly davidson 59  1.26
+calvin brown   56  0.72
+rachel robinson62  2.25
+tom carson 35  0.56
+tom johnson72  0.99
+irene garcia   54  1.06
+oscar nixon39  3.60
+holly allen32  2.58
+oscar hernandez19  0.05
+alice ichabod  65  2.25
+wendy thompson 30  2.39
+priscilla hernandez73  0.23
+gabriella van buren68  1.32
+yuri thompson  42  3.65
+yuri laertes   60  1.16
+sarah young23  2.76
+zach white 32  0.20
+nick van buren 68  1.75
+xavier underhill   41  1.51
+bob ichabod56  2.81
+zach steinbeck 61  2.22
+alice garcia   42  2.03
+jessica king   29  3.61
+calvin nixon   37  0.30
+fred polk  66  3.69
+bob zipper 40  0.28
+alice young75  0.31
+nick underhill 37  1.65
+mike white 57  0.69
+calvin ovid41  3.02
+fred steinbeck 47  3.57
+sarah ovid 65  0.00
+wendy nixon63  0.62
+gabriella zipper   77  1.51
+david king 40  1.99
+jessica white  30  3.82
+alice robinson 37  3.69
+zach nixon 74  2.75
+irene davidson 27  1.22
+priscilla xylophone43  1.60
+oscar zipper   25  2.43
+fred falkner   38  2.23
+ulysses polk   58  0.01
+katie hernandez47  3.80
+zach steinbeck 55  0.68
+fred laertes   69  3.62
+quinn laertes  70  3.66
+nick garcia50  0.12
+oscar young55  2.22
+bob underhill  47  0.24
+calvin young   77  1.60
+mike allen 65  2.95
+david young77  0.26
+oscar garcia   69  1.59
+ulysses ichabod26  0.95
+wendy laertes  76  1.13
+sarah laertes  20  0.24
+zach ichabod   60  1.60
+tom robinson   62  0.78
+zach steinbeck 69  1.01
+quinn garcia   57  0.98
+yuri van buren 32  1.97
+luke carson39  0.76
+calvin ovid73  0.82
+luke ellison   27  0.56
+oscar zipper   50  1.31
+fred steinbeck 52  3.14
+katie xylophone76  1.38
+luke king  54  2.30
+ethan white72  1.43
+yuri ovid  37  3.64
+jessica garcia 54  1.08
+luke young 29  0.80
+mike miller39  3.35
+fred hernandez 63  0.17
+priscilla hernandez52  0.35
+ethan garcia   43  1.70
+quinn hernandez25  2.58
+calvin nixon   33  1.01
+yuri xylophone 47  1.36
+ulysses steinbeck  63  1.05
+jessica nixon  25  2.13
+bob johnson53  3.31
+jessica ichabod56  2.21
+zach miller63  3.87
+priscilla white66  2.82
+ulysses allen  21  1.68
+katie falkner  47  1.49
+tom king   51  1.91
+bob laertes60  3.33
+luke nixon 27  3.54
+quinn johnson  42  2.24
+wendy quirinius71  0.10
+victor polk55  3.63
+rachel robinson32  1.11
+sarah king 57  1.37
+victor young   38  1.72
+priscilla steinbeck38  2.11
+fred brown 19  2.72
+xavier underhill   55  3.56
+irene ovid 67  3.80
+calvin brown   37  2.22
+katie thompson 20  3.27
+katie carson   66  3.55
+tom miller 57  2.83
+rachel brown   56  0.74
+holly johnson  

hive git commit: HIVE-18748: Rename table impacts the ACID behavior as table names are not updated in meta-tables. (Eugene Koifman, reviewed by Sankar Hariappan)

2018-05-29 Thread ekoifman
Repository: hive
Updated Branches:
  refs/heads/branch-3 bf951b7e5 -> c7f713d57


HIVE-18748: Rename table impacts the ACID behavior as table names are not 
updated in meta-tables. (Eugene Koifman, reviewed by Sankar Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c7f713d5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c7f713d5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c7f713d5

Branch: refs/heads/branch-3
Commit: c7f713d572ce95d7bbc98892190c1b5ebf2627f3
Parents: bf951b7
Author: Eugene Koifman 
Authored: Tue May 29 10:45:53 2018 -0700
Committer: Eugene Koifman 
Committed: Tue May 29 10:45:53 2018 -0700

--
 .../hadoop/hive/ql/TestTxnConcatenate.java  |  61 +++
 .../hive/metastore/AcidEventListener.java   |  51 ++
 .../hadoop/hive/metastore/HiveAlterHandler.java |   9 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   | 177 +++
 .../hadoop/hive/metastore/txn/TxnStore.java |   5 +
 5 files changed, 303 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c7f713d5/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
index 2663fec..511198a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
@@ -21,8 +21,11 @@ package org.apache.hadoop.hive.ql;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -174,4 +177,62 @@ public class TestTxnConcatenate extends 
TxnCommandsBaseForTests {
 "t/base_002/00_0"}};
 checkResult(expected2, testQuery, false, "check data after concatenate", 
LOG);
   }
+  @Test
+  public void testRenameTable() throws Exception {
+MetastoreConf.setBoolVar(hiveConf, 
MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID, true);
+runStatementOnDriver("drop database if exists mydb1 cascade");
+runStatementOnDriver("drop database if exists mydb2 cascade");
+runStatementOnDriver("create database mydb1");
+runStatementOnDriver("create database mydb2");
+runStatementOnDriver("create table mydb1.T(a int, b int) stored as orc");
+runStatementOnDriver("insert into mydb1.T values(1,2),(4,5)");
+//put something in WRITE_SET
+runStatementOnDriver("update mydb1.T set b = 6 where b = 5");
+runStatementOnDriver("alter table mydb1.T compact 'minor'");
+
+runStatementOnDriver("alter table mydb1.T RENAME TO mydb1.S");
+
+String testQuery = "select ROW__ID, a, b, INPUT__FILE__NAME from mydb1.S";
+String[][] expected = new String[][] {
+{"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2",
+"s/delta_001_001_/bucket_0"},
+{"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t4\t6",
+"s/delta_002_002_/bucket_0"}};
+checkResult(expected, testQuery, false, "check data", LOG);
+
+
+Assert.assertEquals(0, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from COMPLETED_TXN_COMPONENTS where CTC_TABLE='t'"));
+Assert.assertEquals(0, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from COMPACTION_QUEUE where CQ_TABLE='t'"));
+Assert.assertEquals(0, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from WRITE_SET where WS_TABLE='t'"));
+Assert.assertEquals(0, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from TXN_TO_WRITE_ID where T2W_TABLE='t'"));
+Assert.assertEquals(0, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from NEXT_WRITE_ID where NWI_TABLE='t'"));
+
+Assert.assertEquals(
+TxnDbUtil.queryToString(hiveConf, "select * from 
COMPLETED_TXN_COMPONENTS"), 2,
+TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from COMPLETED_TXN_COMPONENTS where 
CTC_TABLE='s'"));
+Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from COMPACTION_QUEUE where CQ_TABLE='s'"));
+Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+"select count(*) from WRITE_SET where WS_TABLE='s'"));
+Assert.assertEquals(2, 

hive git commit: HIVE-19716: Set spark.local.dir for a few more HoS integration tests (Sahil Takiar, reviewed by Peter Vary)

2018-05-29 Thread stakiar
Repository: hive
Updated Branches:
  refs/heads/master 47e85b73d -> 068d007b8


HIVE-19716: Set spark.local.dir for a few more HoS integration tests (Sahil 
Takiar, reviewed by Peter Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/068d007b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/068d007b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/068d007b

Branch: refs/heads/master
Commit: 068d007b8874f9eecbf30bb2739831ad9114107a
Parents: 47e85b7
Author: Sahil Takiar 
Authored: Fri May 25 11:52:18 2018 -0700
Committer: Sahil Takiar 
Committed: Tue May 29 12:05:38 2018 -0500

--
 .../hive/ql/exec/spark/TestSparkStatistics.java |  3 +++
 .../jdbc/TestJdbcWithLocalClusterSpark.java |  3 +++
 ...stMultiSessionsHS2WithLocalClusterSpark.java |  3 +++
 .../session/TestSparkSessionManagerImpl.java| 24 
 .../hive/spark/client/TestSparkClient.java  |  4 
 5 files changed, 27 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/068d007b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java
index be3b501..4413161 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.session.SessionState;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.nio.file.Paths;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
@@ -47,6 +48,8 @@ public class TestSparkStatistics {
 conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
 conf.setVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE, "spark");
 conf.set("spark.master", "local-cluster[1,2,1024]");
+conf.set("spark.local.dir", Paths.get(System.getProperty("test.tmp.dir"),
+"TestSparkStatistics-local-dir").toString());
 
 SessionState.start(conf);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/068d007b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
index 2a4da20..fe8a32f 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithLocalClusterSpark.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.nio.file.Paths;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -76,6 +77,8 @@ public class TestJdbcWithLocalClusterSpark {
 // Spark requires Hive to support Hadoop3 first then Spark can start
 // working on Hadoop3 support. Remove this after Spark supports Hadoop3.
 conf.set("dfs.client.datanode-restart.timeout", "30");
+conf.set("spark.local.dir", Paths.get(System.getProperty("test.tmp.dir"),
+"TestJdbcWithLocalClusterSpark-local-dir").toString());
 return conf;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/068d007b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
index 9f72e51..79d56f5 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestMultiSessionsHS2WithLocalClusterSpark.java
@@ -18,6 +18,7 @@
 
 package org.apache.hive.jdbc;
 
+import java.nio.file.Paths;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -85,6 +86,8 @@ public class TestMultiSessionsHS2WithLocalClusterSpark {
 // Spark requires Hive to support Hadoop3 first then Spark can start
 // working on Hadoop3 support. Remove this after Spark supports Hadoop3.
 conf.set("dfs.client.datanode-restart.timeout", "30");
+

hive git commit: HIVE-19731: Change staging tmp directory used by TestHCatLoaderComplexSchema (Jesus Camacho Rodriguez, reviewed by Thejas M Nair)

2018-05-29 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/branch-3 603b0f64c -> bf951b7e5


HIVE-19731: Change staging tmp directory used by TestHCatLoaderComplexSchema 
(Jesus Camacho Rodriguez, reviewed by Thejas M Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bf951b7e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bf951b7e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bf951b7e

Branch: refs/heads/branch-3
Commit: bf951b7e57c692d89f2ff5b8ad7f9fbf0fde3e8c
Parents: 603b0f6
Author: Jesus Camacho Rodriguez 
Authored: Tue May 29 08:54:14 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Tue May 29 09:21:23 2018 -0700

--
 .../hive/hcatalog/pig/TestHCatLoaderComplexSchema.java  | 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/bf951b7e/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java
--
diff --git 
a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java
 
b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java
index 9cb1477..eedd722 100644
--- 
a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java
+++ 
b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assume.assumeTrue;
 
+import java.io.File;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -32,6 +33,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.DriverFactory;
@@ -101,6 +103,16 @@ public class TestHCatLoaderComplexSchema {
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
 HiveConf hiveConf = new HiveConf(TestHCatLoaderComplexSchema.class);
+Path workDir = new Path(System.getProperty("test.tmp.dir",
+"target" + File.separator + "test" + File.separator + "tmp"));
+hiveConf.set("mapred.local.dir", workDir + File.separator + 
"TestHCatLoaderComplexSchema"
++ File.separator + "mapred" + File.separator + "local");
+hiveConf.set("mapred.system.dir", workDir + File.separator + 
"TestHCatLoaderComplexSchema"
++ File.separator + "mapred" + File.separator + "system");
+hiveConf.set("mapreduce.jobtracker.staging.root.dir", workDir + 
File.separator + "TestHCatLoaderComplexSchema"
++ File.separator + "mapred" + File.separator + "staging");
+hiveConf.set("mapred.temp.dir", workDir + File.separator + 
"TestHCatLoaderComplexSchema"
++ File.separator + "mapred" + File.separator + "temp");
 hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
 hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
 hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");



hive git commit: HIVE-19731: Change staging tmp directory used by TestHCatLoaderComplexSchema (Jesus Camacho Rodriguez, reviewed by Thejas M Nair)

2018-05-29 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 99ed2bcbc -> 47e85b73d


HIVE-19731: Change staging tmp directory used by TestHCatLoaderComplexSchema 
(Jesus Camacho Rodriguez, reviewed by Thejas M Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/47e85b73
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/47e85b73
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/47e85b73

Branch: refs/heads/master
Commit: 47e85b73d7faa550f9ff108b5cb65b8088a36db4
Parents: 99ed2bc
Author: Jesus Camacho Rodriguez 
Authored: Tue May 29 08:54:14 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Tue May 29 09:19:31 2018 -0700

--
 .../hive/hcatalog/pig/TestHCatLoaderComplexSchema.java  | 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/47e85b73/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java
--
diff --git 
a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java
 
b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java
index 9cb1477..eedd722 100644
--- 
a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java
+++ 
b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assume.assumeTrue;
 
+import java.io.File;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -32,6 +33,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.DriverFactory;
@@ -101,6 +103,16 @@ public class TestHCatLoaderComplexSchema {
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
 HiveConf hiveConf = new HiveConf(TestHCatLoaderComplexSchema.class);
+Path workDir = new Path(System.getProperty("test.tmp.dir",
+"target" + File.separator + "test" + File.separator + "tmp"));
+hiveConf.set("mapred.local.dir", workDir + File.separator + 
"TestHCatLoaderComplexSchema"
++ File.separator + "mapred" + File.separator + "local");
+hiveConf.set("mapred.system.dir", workDir + File.separator + 
"TestHCatLoaderComplexSchema"
++ File.separator + "mapred" + File.separator + "system");
+hiveConf.set("mapreduce.jobtracker.staging.root.dir", workDir + 
File.separator + "TestHCatLoaderComplexSchema"
++ File.separator + "mapred" + File.separator + "staging");
+hiveConf.set("mapred.temp.dir", workDir + File.separator + 
"TestHCatLoaderComplexSchema"
++ File.separator + "mapred" + File.separator + "temp");
 hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
 hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
 hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");



hive git commit: HIVE-19460: Improve stats estimations for NOT IN operator (Zoltan Haindrich reviewed by Ashutosh Chauhan)

2018-05-29 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/branch-3 32888e82c -> 603b0f64c


HIVE-19460: Improve stats estimations for NOT IN operator (Zoltan Haindrich 
reviewed by Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/603b0f64
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/603b0f64
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/603b0f64

Branch: refs/heads/branch-3
Commit: 603b0f64cb95dc381a9fa050c9d25ba4d709166d
Parents: 32888e8
Author: Zoltan Haindrich 
Authored: Tue May 29 12:48:53 2018 +0200
Committer: Zoltan Haindrich 
Committed: Tue May 29 12:48:53 2018 +0200

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +
 .../stats/annotation/StatsRulesProcFactory.java | 158 ++-
 .../hadoop/hive/ql/plan/ColStatistics.java  |   1 -
 .../ql/plan/mapping/TestStatEstimations.java| 113 +
 .../clientpositive/llap/acid_no_buckets.q.out   |  20 +--
 .../clientpositive/llap/explainuser_2.q.out |  26 +--
 .../clientpositive/llap/vector_between_in.q.out |  14 +-
 .../clientpositive/llap/vector_struct_in.q.out  |   6 +-
 .../clientpositive/llap/vectorization_0.q.out   |  16 +-
 9 files changed, 312 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/603b0f64/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 72336ab..66c2831 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2373,6 +2373,8 @@ public class HiveConf extends Configuration {
 "in the number of rows filtered by a certain operator, which in turn 
might lead to overprovision or\n" +
 "underprovision of resources. This factor is applied to the 
cardinality estimation of IN clauses in\n" +
 "filter operators."),
+HIVE_STATS_IN_MIN_RATIO("hive.stats.filter.in.min.ratio", (float) 0.05,
+"Output estimation of an IN filter can't be lower than this ratio"),
 // Concurrency
 HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", false,
 "Whether Hive supports concurrency control or not. \n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/603b0f64/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index 91cccfb..d0be33b 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hive.ql.optimizer.stats.annotation;
 
 import java.lang.reflect.Field;
-import java.util.Arrays;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -30,7 +30,6 @@ import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.Set;
 import java.util.Stack;
-
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.Context;
@@ -60,6 +59,7 @@ import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.ColStatistics;
+import org.apache.hadoop.hive.ql.plan.ColStatistics.Range;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -494,6 +494,19 @@ public class StatsRulesProcFactory {
 }
   }
 
+  boolean allColsFilteredByStats = true;
+  for (int i = 0; i < columnStats.size(); i++) {
+ValuePruner vp = new ValuePruner(columnStats.get(i));
+allColsFilteredByStats &= vp.isValid();
+Set newValues = Sets.newHashSet();
+for (ExprNodeDescEqualityWrapper v : values.get(i)) {
+  if (vp.accept(v)) {
+newValues.add(v);
+  }
+}
+values.set(i, newValues);
+  }
+
   // 3. Calculate IN selectivity
   double factor = 1d;
   for (int i = 0; i < columnStats.size(); i++) {
@@ -503,10 +516,151 @@ public class StatsRulesProcFactory {
 // max can be 

hive git commit: HIVE-19460: Improve stats estimations for NOT IN operator (Zoltan Haindrich reviewed by Ashutosh Chauhan)

2018-05-29 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 9d23f7185 -> 99ed2bcbc


HIVE-19460: Improve stats estimations for NOT IN operator (Zoltan Haindrich 
reviewed by Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/99ed2bcb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/99ed2bcb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/99ed2bcb

Branch: refs/heads/master
Commit: 99ed2bcbcb408cbcd81e77a1ca76c50a3bd43260
Parents: 9d23f71
Author: Zoltan Haindrich 
Authored: Tue May 29 12:47:25 2018 +0200
Committer: Zoltan Haindrich 
Committed: Tue May 29 12:47:25 2018 +0200

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +
 .../stats/annotation/StatsRulesProcFactory.java | 158 ++-
 .../hadoop/hive/ql/plan/ColStatistics.java  |   1 -
 .../ql/plan/mapping/TestStatEstimations.java| 113 +
 .../clientpositive/llap/acid_no_buckets.q.out   |  20 +--
 .../clientpositive/llap/explainuser_2.q.out |  26 +--
 .../clientpositive/llap/vector_between_in.q.out |  14 +-
 .../clientpositive/llap/vector_struct_in.q.out  |   6 +-
 .../clientpositive/llap/vectorization_0.q.out   |  16 +-
 9 files changed, 312 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/99ed2bcb/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index f48d004..7942608 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2374,6 +2374,8 @@ public class HiveConf extends Configuration {
 "in the number of rows filtered by a certain operator, which in turn 
might lead to overprovision or\n" +
 "underprovision of resources. This factor is applied to the 
cardinality estimation of IN clauses in\n" +
 "filter operators."),
+HIVE_STATS_IN_MIN_RATIO("hive.stats.filter.in.min.ratio", (float) 0.05,
+"Output estimation of an IN filter can't be lower than this ratio"),
 // Concurrency
 HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", false,
 "Whether Hive supports concurrency control or not. \n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/99ed2bcb/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index 91cccfb..d0be33b 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hive.ql.optimizer.stats.annotation;
 
 import java.lang.reflect.Field;
-import java.util.Arrays;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -30,7 +30,6 @@ import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.Set;
 import java.util.Stack;
-
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.Context;
@@ -60,6 +59,7 @@ import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.ColStatistics;
+import org.apache.hadoop.hive.ql.plan.ColStatistics.Range;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -494,6 +494,19 @@ public class StatsRulesProcFactory {
 }
   }
 
+  boolean allColsFilteredByStats = true;
+  for (int i = 0; i < columnStats.size(); i++) {
+ValuePruner vp = new ValuePruner(columnStats.get(i));
+allColsFilteredByStats &= vp.isValid();
+Set newValues = Sets.newHashSet();
+for (ExprNodeDescEqualityWrapper v : values.get(i)) {
+  if (vp.accept(v)) {
+newValues.add(v);
+  }
+}
+values.set(i, newValues);
+  }
+
   // 3. Calculate IN selectivity
   double factor = 1d;
   for (int i = 0; i < columnStats.size(); i++) {
@@ -503,10 +516,151 @@ public class StatsRulesProcFactory {
 // max can be 1, 

hive git commit: HIVE-19713: itests/hive-jmh should not reference a concreate storage-api version (Zoltan Haindrich reviewed by Ashutosh Chauhan)

2018-05-29 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/branch-3 66e5453df -> 32888e82c


HIVE-19713: itests/hive-jmh should not reference a concreate storage-api 
version (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/32888e82
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/32888e82
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/32888e82

Branch: refs/heads/branch-3
Commit: 32888e82c272dbc9f3f214807a4a05fad71d93b7
Parents: 66e5453
Author: Zoltan Haindrich 
Authored: Tue May 29 08:26:39 2018 +0200
Committer: Zoltan Haindrich 
Committed: Tue May 29 08:26:39 2018 +0200

--
 itests/hive-jmh/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/32888e82/itests/hive-jmh/pom.xml
--
diff --git a/itests/hive-jmh/pom.xml b/itests/hive-jmh/pom.xml
index 5eb3026..e045ace 100644
--- a/itests/hive-jmh/pom.xml
+++ b/itests/hive-jmh/pom.xml
@@ -66,7 +66,7 @@
 
   org.apache.hive
   hive-storage-api
-   2.7.0-SNAPSHOT
+   ${storage-api.version}
 
 
   org.apache.hive



hive git commit: HIVE-19713: itests/hive-jmh should not reference a concreate storage-api version (Zoltan Haindrich reviewed by Ashutosh Chauhan)

2018-05-29 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master 7562cdb71 -> 83e15d824


HIVE-19713: itests/hive-jmh should not reference a concreate storage-api 
version (Zoltan Haindrich reviewed by Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/83e15d82
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/83e15d82
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/83e15d82

Branch: refs/heads/master
Commit: 83e15d8242c3bf265f823b82f81b31987ece6189
Parents: 7562cdb
Author: Zoltan Haindrich 
Authored: Tue May 29 08:19:26 2018 +0200
Committer: Zoltan Haindrich 
Committed: Tue May 29 08:19:26 2018 +0200

--
 itests/hive-jmh/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/83e15d82/itests/hive-jmh/pom.xml
--
diff --git a/itests/hive-jmh/pom.xml b/itests/hive-jmh/pom.xml
index de5e180..0abefdf 100644
--- a/itests/hive-jmh/pom.xml
+++ b/itests/hive-jmh/pom.xml
@@ -66,7 +66,7 @@
 
   org.apache.hive
   hive-storage-api
-   2.7.0-SNAPSHOT
+   ${storage-api.version}
 
 
   org.apache.hive