This is an automated email from the ASF dual-hosted git repository.

dengzh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 52cfa6fc345 HIVE-29101: Add test coverage for the standalone jdbc 
(#5992)
52cfa6fc345 is described below

commit 52cfa6fc3455f736dbbe7140eee92ec3c1db65cd
Author: dengzh <[email protected]>
AuthorDate: Thu Aug 14 08:49:53 2025 +0800

    HIVE-29101: Add test coverage for the standalone jdbc (#5992)
---
 itests/pom.xml                                     |   1 +
 itests/test-jdbc/pom.xml                           |  85 +++++++
 .../org/apache/hive/jdbc/ITAbstractContainer.java  |  38 ++++
 .../java/org/apache/hive/jdbc/ITHiveServer2.java   | 248 +++++++++++++++++++++
 .../java/org/apache/hive/jdbc/ITZKHiveServer2.java | 121 ++++++++++
 .../org/apache/hive/jdbc/TestStandaloneJdbc.java   | 212 ++++++++++++++++++
 itests/test-jdbc/src/test/resources/core-site.xml  |  37 +++
 .../test-jdbc/src/test/resources/custom_hosts_file |   3 +
 itests/test-jdbc/src/test/resources/yarn-site.xml  |  27 +++
 9 files changed, 772 insertions(+)

diff --git a/itests/pom.xml b/itests/pom.xml
index b8b85e189f9..938f5267c9f 100644
--- a/itests/pom.xml
+++ b/itests/pom.xml
@@ -34,6 +34,7 @@
     <module>hive-unit</module>
     <module>hive-blobstore</module>
     <module>util</module>
+    <module>test-jdbc</module>
     <module>test-serde</module>
     <module>qtest</module>
     <module>qtest-accumulo</module>
diff --git a/itests/test-jdbc/pom.xml b/itests/test-jdbc/pom.xml
new file mode 100644
index 00000000000..8f390563834
--- /dev/null
+++ b/itests/test-jdbc/pom.xml
@@ -0,0 +1,85 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hive</groupId>
+    <artifactId>hive-it</artifactId>
+    <version>4.2.0-SNAPSHOT</version>
+    <relativePath>../pom.xml</relativePath>
+  </parent>
+  <artifactId>test-jdbc</artifactId>
+  <packaging>jar</packaging>
+  <name>Hive Integration - Testing standalone jdbc</name>
+  <properties>
+    <hive.path.to.root>../..</hive.path.to.root>
+  </properties>
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-jdbc</artifactId>
+      <classifier>standalone</classifier>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.testcontainers</groupId>
+      <artifactId>testcontainers</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>setup-test-dirs</id>
+            <phase>process-test-resources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <delete dir="${test.conf.dir}"/>
+                <mkdir dir="${test.conf.dir}"/>
+                <!-- copies hive-site.xml so it can be modified -->
+                <copy todir="${test.conf.dir}">
+                  <fileset dir="${basedir}/src/test/resources/"/>
+                </copy>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <argLine>${maven.test.jvm.args} 
-Djdk.net.hosts.file=${test.conf.dir}/custom_hosts_file</argLine>
+          <systemPropertyVariables>
+            <java.io.tmpdir>${test.tmp.dir}</java.io.tmpdir>
+          </systemPropertyVariables>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git 
a/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/ITAbstractContainer.java 
b/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/ITAbstractContainer.java
new file mode 100644
index 00000000000..39502a4324b
--- /dev/null
+++ 
b/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/ITAbstractContainer.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.jdbc;
+
+public abstract class ITAbstractContainer {
+
+  protected String getZkConnectionUrl() {
+    throw new UnsupportedOperationException("not implemented");
+  }
+
+  protected String getHttpJdbcUrl(){
+    throw new UnsupportedOperationException("not implemented");
+  }
+
+  protected String getBaseJdbcUrl() {
+    throw new UnsupportedOperationException("not implemented");
+  }
+
+  public abstract void start() throws Exception;
+
+  public abstract void stop() throws Exception;
+
+}
diff --git 
a/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/ITHiveServer2.java 
b/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/ITHiveServer2.java
new file mode 100644
index 00000000000..59422e91ca6
--- /dev/null
+++ b/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/ITHiveServer2.java
@@ -0,0 +1,248 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.jdbc;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.testcontainers.containers.BindMode;
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.wait.strategy.AbstractWaitStrategy;
+import org.testcontainers.utility.DockerImageName;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+public class ITHiveServer2 extends ITAbstractContainer {
+  protected File workDir;
+  private final MiniJdbcKdc miniKdc;
+  protected GenericContainer<?> container;
+  protected final String imageName = "apache/hive:4.0.1";
+  private int[] realmPorts;
+
+  public ITHiveServer2() {
+    this.miniKdc = null;
+  }
+
+  public ITHiveServer2(File workDir) throws Exception {
+    this.workDir = workDir;
+    this.miniKdc = new MiniJdbcKdc(this.workDir);
+    String principal = MiniJdbcKdc.HIVE_TEST_USER_1;
+    Configuration configuration = new Configuration();
+    configuration.set("hadoop.security.authentication", "kerberos");
+    UserGroupInformation.setConfiguration(configuration);
+    UserGroupInformation.loginUserFromKeytab(principal, 
miniKdc.getKeyTabFile(principal));
+    assertEquals(principal, 
UserGroupInformation.getLoginUser().getShortUserName());
+  }
+
+  protected Map<String, String> prepareEnvArgs() {
+    Map<String, String> envArgs = new HashMap<>();
+    envArgs.put("SERVICE_NAME", "hiveserver2");
+    envArgs.put("HIVE_SERVER2_TRANSPORT_MODE", "all");
+    List<String> properties = new ArrayList<>();
+    properties.add("hive.server2.authentication=KERBEROS");
+    
properties.add("hive.server2.authentication.kerberos.principal=hive/_HOST@" + 
miniKdc.getRealm());
+    
properties.add("hive.server2.authentication.kerberos.keytab=/opt/hive/conf/hive.keytab");
+    StringBuilder builder = new StringBuilder();
+    for (String prop : properties) {
+      builder.append("-D").append(prop).append(" ");
+    }
+    envArgs.put("SERVICE_OPTS", builder.toString());
+    return envArgs;
+  }
+
+  private Map<String, String> prepareFiles() {
+    Map<String, String> boundFiles = new HashMap<>();
+    boundFiles.put(createNewKrbConf().getPath(), "/etc/krb5.conf");
+    
boundFiles.put(miniKdc.getKeyTabFile(miniKdc.getServicePrincipalForUser("hive")),
 "/opt/hive/conf/hive.keytab");
+    
boundFiles.put(ITHiveServer2.class.getClassLoader().getResource("core-site.xml").getPath(),
+        "/opt/hive/conf/core-site.xml");
+    // java.io.IOException: Can't get Master Kerberos principal for use as 
renewer if the yarn-site is not present
+    
boundFiles.put(ITHiveServer2.class.getClassLoader().getResource("yarn-site.xml").getPath(),
+        "/opt/hive/conf/yarn-site.xml");
+    return boundFiles;
+  }
+
+  @Override
+  public void start() {
+    container = new GenericContainer<>(DockerImageName.parse(imageName))
+        .withEnv(prepareEnvArgs())
+        .waitingFor(new AbstractWaitStrategy() {
+          @Override
+          protected void waitUntilReady() {
+            long timeout = TimeUnit.MINUTES.toMillis(15);
+            long start;
+            do {
+              start = System.currentTimeMillis();
+              try (Connection conn = 
DriverManager.getConnection(getBaseJdbcUrl())) {
+                break;
+              } catch (Exception e) {
+                try {
+                  Thread.sleep(10 * 1000);
+                } catch (InterruptedException ex) {
+                  break;
+                }
+              }
+            } while ((timeout += start - System.currentTimeMillis()) > 0);
+          }
+        });
+    beforeStart(container);
+    container.start();
+  }
+
+  protected void beforeStart(GenericContainer<?> container) {
+    Map<String, String> boundFiles = prepareFiles();
+    for (Map.Entry<String, String> entry : boundFiles.entrySet()) {
+      container.withFileSystemBind(entry.getKey(), entry.getValue(), 
BindMode.READ_ONLY);
+    }
+    container
+        .withCreateContainerCmdModifier(it -> 
it.withHostName(MiniJdbcKdc.HOST))
+        .withExposedPorts(10000, 10001);
+    if (realmPorts != null) {
+      org.testcontainers.Testcontainers.exposeHostPorts(realmPorts);
+    }
+  }
+
+  private File createNewKrbConf() {
+    File krb5 = miniKdc.getKrb5conf();
+    File newKrb5 = new File(workDir, krb5.getName() + "_new");
+    try (BufferedReader reader = new BufferedReader(new FileReader(krb5));
+         FileWriter writer = new FileWriter(newKrb5, false)) {
+      String line;
+      List<Integer> hostPorts = new ArrayList<>();
+      String localhost = "localhost:";
+      while ((line = reader.readLine()) != null) {
+        if (line.contains(localhost)) {
+          hostPorts.add(Integer.valueOf(line.split(localhost)[1]));
+          line = line.replace("localhost", "host.testcontainers.internal");
+        }
+        writer.write(line);
+        writer.write(System.lineSeparator());
+      }
+      this.realmPorts = new int[hostPorts.size()];
+      for (int i = 0; i < hostPorts.size(); i++) {
+        realmPorts[i] = hostPorts.get(i);
+      }
+      writer.flush();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    return newKrb5;
+  }
+
+  @Override
+  public void stop() throws Exception {
+    try {
+      miniKdc.stop();
+    } finally {
+      if (container != null) {
+        container.stop();
+      }
+    }
+  }
+
+  @Override
+  protected String getHttpJdbcUrl() {
+    return "jdbc:hive2://" + MiniJdbcKdc.HOST + ":" + 
container.getMappedPort(10001) +
+        "/;principal=" + miniKdc.getServicePrincipalForUser("hive") + "@" + 
miniKdc.getRealm() +
+        ";transportMode=http;httpPath=cliservice";
+  }
+
+  @Override
+  protected String getBaseJdbcUrl() {
+    return "jdbc:hive2://" + MiniJdbcKdc.HOST + ":" + 
container.getMappedPort(10000) + "/;principal=" +
+        miniKdc.getServicePrincipalForUser("hive") + "@" + miniKdc.getRealm();
+  }
+
+  public static class MiniJdbcKdc {
+    protected static String HOST = "test-standalone-jdbc-kerberos";
+    public static String HIVE_SERVICE_PRINCIPAL = "hive";
+    public static String YARN_SERVICE_PRINCIPAL = "yarn";
+    public static String HIVE_TEST_USER_1 = "user1";
+    public static String HIVE_TEST_USER_2 = "user2";
+    public static String HIVE_TEST_SUPER_USER = "superuser";
+    private static int keyTabCounter = 1;
+    private final MiniKdc miniKdc;
+    private final Properties kdcConf = MiniKdc.createConf();
+    private final Map<String, String> userPrincipals =
+        new HashMap<String, String>();
+    private final File workDir;
+
+    public MiniJdbcKdc(File workDir) throws Exception {
+      this.workDir = workDir;
+      /**
+       *  Hadoop security classes read the default realm via static 
initialization,
+       *  before miniKdc is initialized. Hence we set the realm via a test 
configuration
+       *  and propagate that to miniKdc.
+       */
+      assertNotNull("java.security.krb5.conf is needed for hadoop security",
+          System.getProperty("java.security.krb5.conf"));
+      System.clearProperty("java.security.krb5.conf");
+
+      miniKdc = new MiniKdc(kdcConf, new File(workDir, "miniKdc"));
+      miniKdc.start();
+
+      // create default users
+      addUserPrincipal(getServicePrincipalForUser(HIVE_SERVICE_PRINCIPAL));
+      addUserPrincipal(getServicePrincipalForUser(YARN_SERVICE_PRINCIPAL));
+      addUserPrincipal(HIVE_TEST_USER_1);
+      addUserPrincipal(HIVE_TEST_USER_2);
+      addUserPrincipal(HIVE_TEST_SUPER_USER);
+    }
+
+    public void addUserPrincipal(String principal) throws Exception {
+      File keytab = new File(workDir, "miniKdc" + keyTabCounter++ + ".keytab");
+      miniKdc.createPrincipal(keytab, principal);
+      userPrincipals.put(principal, keytab.getPath());
+    }
+
+    public String getServicePrincipalForUser(String shortUserName) {
+      return shortUserName + "/" + HOST;
+    }
+
+    public String getKeyTabFile(String principalName) {
+      return userPrincipals.get(principalName);
+    }
+
+    public File getKrb5conf() {
+      return miniKdc.getKrb5conf();
+    }
+
+    public String getRealm() {
+      return miniKdc.getRealm();
+    }
+
+    public void stop() {
+      miniKdc.stop();
+    }
+  }
+}
diff --git 
a/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/ITZKHiveServer2.java 
b/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/ITZKHiveServer2.java
new file mode 100644
index 00000000000..51ca68aa3f0
--- /dev/null
+++ b/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/ITZKHiveServer2.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.jdbc;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.Network;
+import org.testcontainers.containers.wait.strategy.Wait;
+import org.testcontainers.utility.DockerImageName;
+
+public class ITZKHiveServer2 extends ITHiveServer2 {
+  private final String hostName = "test-standalone-jdbc-plain";
+  private final ITZookeeper zookeeper;
+  public ITZKHiveServer2(File workDir) throws Exception {
+    super();
+    zookeeper = new ITZookeeper();
+    zookeeper.start();
+  }
+
+  @Override
+  protected Map<String, String> prepareEnvArgs() {
+    Map<String, String> envArgs = new HashMap<>();
+    envArgs.put("SERVICE_NAME", "hiveserver2");
+    envArgs.put("HIVE_SERVER2_TRANSPORT_MODE", "all");
+    List<String> properties = new ArrayList<>();
+    properties.add("hive.zookeeper.quorum=zookeeper");
+    properties.add("hive.server2.support.dynamic.service.discovery=true");
+    properties.add("hive.server2.thrift.port=10010");
+    properties.add("hive.server2.thrift.http.port=10011");
+    StringBuilder builder = new StringBuilder();
+    for (String prop : properties) {
+      builder.append("-D").append(prop).append(" ");
+    }
+    envArgs.put("SERVICE_OPTS", builder.toString());
+    return envArgs;
+  }
+
+  @Override
+  protected void beforeStart(GenericContainer<?> container) {
+    container
+        .withNetwork(zookeeper.network)
+        .withCreateContainerCmdModifier(it -> it.withHostName(hostName))
+        .setPortBindings(List.of("10010:10010", "10011:10011"));
+  }
+
+  @Override
+  protected String getHttpJdbcUrl() {
+    return "jdbc:hive2://" + hostName + ":" + container.getMappedPort(10011) + 
"/;" +
+        "transportMode=http;httpPath=cliservice";
+  }
+
+  @Override
+  protected String getBaseJdbcUrl() {
+    return "jdbc:hive2://" + hostName + ":" + container.getMappedPort(10010) + 
"/";
+  }
+
+  @Override
+  protected String getZkConnectionUrl() {
+    return "jdbc:hive2://" + hostName + ":" + zookeeper.getListeningPort() + 
"/default;" +
+        "serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2";
+  }
+
+  @Override
+  public void stop() throws Exception {
+    try {
+      super.stop();
+    } finally {
+      zookeeper.stop();
+    }
+  }
+
+  private static class ITZookeeper extends ITAbstractContainer {
+    Network network = Network.newNetwork();
+    GenericContainer<?> zookeeper;
+
+    @Override
+    public void start() throws Exception {
+      zookeeper = new 
GenericContainer<>(DockerImageName.parse("zookeeper:3.8.4"))
+          .withNetwork(network)
+          .withNetworkAliases("zookeeper")
+          .withExposedPorts(2181)
+          .waitingFor(Wait.forLogMessage(".*binding to port.*2181.*\\n", 1));
+      zookeeper.start();
+    }
+
+    @Override
+    public void stop() throws Exception {
+      try {
+        network.close();
+      } finally {
+        if (zookeeper != null) {
+          zookeeper.stop();
+        }
+      }
+    }
+
+    public int getListeningPort() {
+      return zookeeper.getMappedPort(2181);
+    }
+  }
+}
diff --git 
a/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/TestStandaloneJdbc.java 
b/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/TestStandaloneJdbc.java
new file mode 100644
index 00000000000..8f760b015c7
--- /dev/null
+++ 
b/itests/test-jdbc/src/test/java/org/apache/hive/jdbc/TestStandaloneJdbc.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.jdbc;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.lang.reflect.Field;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class TestStandaloneJdbc {
+  private static File workDir = new File(System.getProperty("java.io.tmpdir"), 
"test-it-standalone-jdbc");
+  private static ITAbstractContainer HS2;
+  private static ITAbstractContainer ZK_HS2;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    workDir.mkdirs();
+    HS2 = new ITHiveServer2(workDir);
+    ZK_HS2 = new ITZKHiveServer2(workDir);
+
+    HS2.start();
+    ZK_HS2.start();
+  }
+
+  @Test
+  public void testBinaryJdbc() throws Exception {
+    testMetaOp(HS2.getBaseJdbcUrl(), true);
+    testDataSourceOp(HS2.getBaseJdbcUrl());
+    testNegativeJdbc(HS2.getBaseJdbcUrl());
+  }
+
+  private void testMetaOp(String url, boolean kerberos) throws Exception {
+    try (Connection con = DriverManager.getConnection(url)) {
+      try (Statement stmt = con.createStatement()) {
+        String tableName = "testHiveDriverTable1";
+        stmt.execute("drop table if exists " + tableName);
+        stmt.execute("create table " + tableName + " (key string, value 
string)");
+        // show tables
+        String sql = "show tables '" + tableName + "'";
+        System.out.println("Running: " + sql);
+        try (ResultSet res = stmt.executeQuery(sql)) {
+          ResultSetMetaData metaData = res.getMetaData();
+          assertTrue(metaData.getColumnCount() > 0);
+          assertFalse(metaData.getColumnName(1).isEmpty());
+          assertTrue(metaData.getColumnType(1) > 0);
+          assertTrue(res.next());
+          assertFalse(res.getString(1).trim().isEmpty());
+        }
+        // describe table
+        sql = "describe " + tableName;
+        System.out.println("Running: " + sql);
+        try (ResultSet res = stmt.executeQuery(sql)) {
+          assertTrue(res.next());
+          assertFalse(res.getString(1).trim().isEmpty());
+          assertFalse(res.getString(2).trim().isEmpty());
+        }
+
+        // "Client cannot authenticate via:[TOKEN, KERBEROS]" in running the 
Kerberized tez local mode
+        if (!kerberos) {
+          String values = "('a','b'),('c','d'),('e','f'),('g','h')";
+          stmt.execute("insert into table " + tableName + " values" + values);
+          // select * query
+          sql = "select * from " + tableName;
+          System.out.println("Running: " + sql);
+          try (ResultSet res = stmt.executeQuery(sql)) {
+            List<String> list = new ArrayList<>();
+            while (res.next()) {
+              list.add("('" + res.getString(1) + "','" + res.getString(2) + 
"')");
+            }
+            assertEquals(values, 
list.stream().collect(Collectors.joining(",")));
+          }
+
+          // regular hive query
+          sql = "select count(1) from " + tableName;
+          System.out.println("Running: " + sql);
+          try (ResultSet res = stmt.executeQuery(sql)) {
+            assertTrue(res.next());
+            assertEquals(4, res.getInt(1));
+          }
+        }
+      }
+    }
+  }
+
+  private void testDataSourceOp(String url) throws Exception {
+    try (Connection con = DriverManager.getConnection(url)) {
+      DatabaseMetaData metaData = con.getMetaData();
+
+      try (ResultSet rs = metaData.getSchemas()) {
+        assertTrue(rs.next());
+        assertFalse(rs.getString(1).trim().isEmpty());
+      }
+
+      try (ResultSet resultSet = metaData.getColumns("hive", ".*", ".*", 
".*")) {
+        assertTrue(resultSet.next());
+        assertFalse(resultSet.getString(3).trim().isEmpty());
+      }
+
+      try (ResultSet resultSet = metaData.getTypeInfo()) {
+        assertTrue(resultSet.next());
+        assertFalse(resultSet.getString(1).trim().isEmpty());
+      }
+    }
+  }
+
+  @Test
+  public void testHttpJdbc() throws Exception {
+    testMetaOp(HS2.getHttpJdbcUrl(), true);
+    testMetaOp(ZK_HS2.getHttpJdbcUrl(), false);
+    testDataSourceOp(HS2.getHttpJdbcUrl());
+    testNegativeJdbc(HS2.getHttpJdbcUrl());
+  }
+
+  private void testNegativeJdbc(String url) {
+    try (Connection con = DriverManager.getConnection(url);
+         Statement stmt = con.createStatement()) {
+      stmt.execute("insert into table this_is_not_exist_table values 
(1),(3),(5)");
+      fail("A SQLException is expected");
+    } catch (SQLException e) {
+      assertTrue(e.getMessage().contains("Table not found"));
+    }
+  }
+
+  @Test
+  public void testZookeeperJdbc() throws Exception {
+    testMetaOp(ZK_HS2.getZkConnectionUrl(), false);
+    testDataSourceOp(ZK_HS2.getZkConnectionUrl());
+    testNegativeJdbc(ZK_HS2.getZkConnectionUrl());
+  }
+
+  @Test
+  public void testTokenAuthentication() throws Exception {
+    String url = HS2.getBaseJdbcUrl();
+    String dt;
+    try (HiveConnection con = (HiveConnection) 
DriverManager.getConnection(url)) {
+      dt = con.getDelegationToken(ITHiveServer2.MiniJdbcKdc.HIVE_TEST_USER_1, 
"hive");
+    }
+    File dtFile = new File(workDir, "delegation-token-file");
+    try (FileOutputStream os = new FileOutputStream(dtFile)) {
+      os.write(dt.getBytes());
+    }
+    SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), dt, 
"hiveserver2ClientToken");
+    //  HADOOP_TOKEN_FILE_LOCATION
+    updateEnv("HADOOP_TOKEN_FILE_LOCATION", dtFile.getPath());
+    url += ";auth=delegationToken";
+    testMetaOp(url, true);
+  }
+
+  private void updateEnv(String name, String val) throws 
ReflectiveOperationException {
+    Map<String, String> env = System.getenv();
+    Field field = env.getClass().getDeclaredField("m");
+    field.setAccessible(true);
+    ((Map<String, String>) field.get(env)).put(name, val);
+  }
+
+  @AfterClass
+  public static void destroy() throws Exception {
+    try {
+      workDir.delete();
+    } finally {
+      killContainer(ZK_HS2, HS2);
+    }
+  }
+
+  private static void killContainer(ITAbstractContainer... containers) {
+    for (ITAbstractContainer container : containers) {
+      try {
+        if (container != null) {
+          container.stop();
+        }
+      } catch (Exception e) {
+        // ignore this exception
+      }
+    }
+  }
+}
diff --git a/itests/test-jdbc/src/test/resources/core-site.xml 
b/itests/test-jdbc/src/test/resources/core-site.xml
new file mode 100644
index 00000000000..625b34460db
--- /dev/null
+++ b/itests/test-jdbc/src/test/resources/core-site.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value>kerberos</value>
+  </property>
+
+  <property>
+    <name>hadoop.proxyuser.hive.groups</name>
+    <value>*</value>
+  </property>
+
+  <property>
+    <name>hadoop.proxyuser.hive.hosts</name>
+    <value>*</value>
+  </property>
+
+</configuration>
diff --git a/itests/test-jdbc/src/test/resources/custom_hosts_file 
b/itests/test-jdbc/src/test/resources/custom_hosts_file
new file mode 100644
index 00000000000..2073b5cb1a8
--- /dev/null
+++ b/itests/test-jdbc/src/test/resources/custom_hosts_file
@@ -0,0 +1,3 @@
+127.0.0.1       localhost
+127.0.0.1       test-standalone-jdbc-plain
+127.0.0.1       test-standalone-jdbc-kerberos
\ No newline at end of file
diff --git a/itests/test-jdbc/src/test/resources/yarn-site.xml 
b/itests/test-jdbc/src/test/resources/yarn-site.xml
new file mode 100644
index 00000000000..36bee6298bb
--- /dev/null
+++ b/itests/test-jdbc/src/test/resources/yarn-site.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>yarn.resourcemanager.principal</name>
+    <value>yarn/[email protected]</value>
+  </property>
+
+</configuration>

Reply via email to