HIVE-18495: JUnit rule to enable Driver level testing (Zoltan Haindrich 
reviewed by Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich <k...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/20ce1c61
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/20ce1c61
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/20ce1c61

Branch: refs/heads/master
Commit: 20ce1c61c59c86c9d1bc91e5ef92ccc92dbae0a5
Parents: 27ea7cc
Author: Zoltan Haindrich <k...@rxd.hu>
Authored: Mon Jan 29 08:17:21 2018 +0100
Committer: Zoltan Haindrich <k...@rxd.hu>
Committed: Mon Jan 29 08:17:21 2018 +0100

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +
 .../hive/cli/control/AbstractCliConfig.java     |  33 +-
 .../control/AbstractCoreBlobstoreCliDriver.java |   5 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |  12 +-
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |  52 ++-
 .../hive/ql/exec/tez/HivePreWarmProcessor.java  |   2 +-
 .../hive/ql/exec/tez/TezSessionState.java       |  35 +-
 .../ql/udf/generic/GenericUDTFGetSplits.java    |   2 +-
 .../apache/hive/testutils/HiveTestEnvSetup.java | 339 ++++++++++++++
 .../hive/testutils/MiniZooKeeperCluster.java    | 456 +++++++++++++++++++
 .../hive/testutils/TestHiveTestEnvSetup.java    | 102 +++++
 11 files changed, 977 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 12a7c03..13067df 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -573,6 +573,8 @@ public class HiveConf extends Configuration {
         "If not set, defaults to the codec extension for text files (e.g. 
\".gz\"), or no extension otherwise."),
 
     HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test 
mode", true),
+    HIVE_IN_TEST_IDE("hive.in.ide.test", false, "internal usage only, true if 
test running in ide",
+        true),
     HIVE_TESTING_SHORT_LOGS("hive.testing.short.logs", false,
         "internal usage only, used only in test mode. If set true, when 
requesting the " +
         "operation logs the short version (generated by 
LogDivertAppenderForTest) will be " +

http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java
----------------------------------------------------------------------
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java
 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java
index cbba779..d38810f 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCliConfig.java
@@ -23,11 +23,9 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.lang.reflect.Constructor;
 import java.net.URL;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashSet;
 import java.util.LinkedHashSet;
-import java.util.List;
 import java.util.Properties;
 import java.util.Set;
 import java.util.TreeSet;
@@ -35,15 +33,15 @@ import java.util.regex.Pattern;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.FsType;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
+import org.apache.hive.testutils.HiveTestEnvSetup;
+
 import com.google.common.base.Splitter;
-import com.google.common.collect.Sets;
 
 public abstract class AbstractCliConfig {
 
-  public static final String HIVE_ROOT = getHiveRoot();
+  public static final String HIVE_ROOT = HiveTestEnvSetup.HIVE_ROOT;
 
   public static enum MetastoreType {
     sql
@@ -80,31 +78,6 @@ public abstract class AbstractCliConfig {
     runDisabled = getSysPropValue("run_disabled");
   }
 
-  private static String getHiveRoot() {
-    List<String> candidateSiblings = new ArrayList<>();
-    if (System.getProperty("hive.root") != null) {
-      try {
-        candidateSiblings.add(new 
File(System.getProperty("hive.root")).getCanonicalPath());
-      } catch (IOException e) {
-        throw new RuntimeException("error getting hive.root",e);
-      }
-    }
-    candidateSiblings.add(new File(".").getAbsolutePath());
-
-    for (String string : candidateSiblings) {
-      File curr = new File(string);
-      do {
-        Set<String> lls = Sets.newHashSet(curr.list());
-        if (lls.contains("itests") && lls.contains("ql") && 
lls.contains("metastore")) {
-          System.out.println("detected hiveRoot: " + curr);
-          return QTestUtil.ensurePathEndsInSlash(curr.getAbsolutePath());
-        }
-        curr = curr.getParentFile();
-      } while (curr != null);
-    }
-    throw new RuntimeException("unable to find hiveRoot");
-  }
-
   protected void setQueryDir(String dir) {
     queryDirectory = getAbsolutePath(dir);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
----------------------------------------------------------------------
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
index 07468b8..eda6b69 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hive.conf.VariableSubstitution;
 import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
+import org.apache.hive.testutils.HiveTestEnvSetup;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -171,8 +172,8 @@ public abstract class AbstractCoreBlobstoreCliDriver 
extends CliAdapter {
       }
     }).substitute(new HiveConf(), qt.getConf().get(HCONF_TEST_BLOBSTORE_PATH));
 
-    testBlobstorePath = QTestUtil.ensurePathEndsInSlash(testBlobstorePath);
-    testBlobstorePath += 
QTestUtil.ensurePathEndsInSlash(this.getClass().getSimpleName()); // name of 
child class
+    testBlobstorePath = 
HiveTestEnvSetup.ensurePathEndsInSlash(testBlobstorePath);
+    testBlobstorePath += 
HiveTestEnvSetup.ensurePathEndsInSlash(this.getClass().getSimpleName()); // 
name of child class
     String uid = new 
SimpleDateFormat("yyyyMMdd.HHmmss.SSS").format(Calendar.getInstance().getTime())
         + "-" + String.format("%03d", (int)(Math.random() * 999));
     testBlobstorePathUnique = testBlobstorePath + uid;

http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 0b0b7ee..4432aca 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -95,6 +95,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hive.druid.MiniDruidCluster;
+import org.apache.hive.testutils.HiveTestEnvSetup;
 import org.apache.hadoop.hive.llap.LlapItUtils;
 import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster;
 import org.apache.hadoop.hive.llap.io.api.LlapProxy;
@@ -2174,17 +2175,6 @@ public class QTestUtil {
     System.err.flush();
   }
 
-  public static String ensurePathEndsInSlash(String path) {
-    if(path == null) {
-      throw new NullPointerException("Path cannot be null");
-    }
-    if(path.endsWith(File.separator)) {
-      return path;
-    } else {
-      return path + File.separator;
-    }
-  }
-
   private static String[] cachedQvFileList = null;
   private static ImmutableList<String> cachedDefaultQvFileList = null;
   private static Pattern qvSuffix = Pattern.compile("_[0-9]+.qv$", 
Pattern.CASE_INSENSITIVE);

http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 1db045f..9885038 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -24,7 +24,10 @@ import com.google.common.base.Function;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
 import javax.security.auth.login.LoginException;
+
+import java.io.File;
 import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -39,6 +42,8 @@ import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import java.util.zip.ZipOutputStream;
+
 import org.apache.commons.io.FilenameUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.tez.mapreduce.common.MRInputSplitDistributor;
@@ -949,7 +954,9 @@ public class DagUtils {
   }
 
   public static String[] getTempFilesFromConf(Configuration conf) {
-    if (conf == null) return new String[0]; // In tests.
+    if (conf == null) {
+      return new String[0]; // In tests.
+    }
     String addedFiles = Utilities.getResourceFiles(conf, 
SessionState.ResourceType.FILE);
     if (StringUtils.isNotBlank(addedFiles)) {
       HiveConf.setVar(conf, ConfVars.HIVEADDEDFILES, addedFiles);
@@ -986,7 +993,9 @@ public class DagUtils {
    */
   public List<LocalResource> localizeTempFiles(String hdfsDirPathStr, 
Configuration conf,
       String[] inputOutputJars, String[] skipJars) throws IOException, 
LoginException {
-    if (inputOutputJars == null) return null;
+    if (inputOutputJars == null) {
+      return null;
+    }
     List<LocalResource> tmpResources = new ArrayList<LocalResource>();
     addTempResources(conf, tmpResources, hdfsDirPathStr,
         LocalResourceType.FILE, inputOutputJars, skipJars);
@@ -1001,7 +1010,9 @@ public class DagUtils {
     if (skipFiles != null) {
       skipFileSet = new HashSet<>();
       for (String skipFile : skipFiles) {
-        if (StringUtils.isBlank(skipFile)) continue;
+        if (StringUtils.isBlank(skipFile)) {
+          continue;
+        }
         skipFileSet.add(new Path(skipFile));
       }
     }
@@ -1053,9 +1064,32 @@ public class DagUtils {
   }
 
   // the api that finds the jar being used by this class on disk
-  public String getExecJarPathLocal () throws URISyntaxException {
+  public String getExecJarPathLocal(Configuration configuration) throws 
URISyntaxException {
     // returns the location on disc of the jar of this class.
-    return 
DagUtils.class.getProtectionDomain().getCodeSource().getLocation().toURI().toString();
+
+    URI uri = 
DagUtils.class.getProtectionDomain().getCodeSource().getLocation().toURI();
+    if (configuration.getBoolean(ConfVars.HIVE_IN_TEST_IDE.varname, false)) {
+      if (new File(uri.getPath()).isDirectory()) {
+        // IDE support for running tez jobs
+        uri = createEmptyArchive();
+      }
+    }
+    return uri.toString();
+
+  }
+
+  /**
+   * Testing related; creates an empty archive to served being localized as 
hive-exec
+   */
+  private URI createEmptyArchive() {
+    try {
+      File outputJar = new File(System.getProperty("build.test.dir"), 
"empty.jar");
+      ZipOutputStream zos = new ZipOutputStream(new 
FileOutputStream(outputJar));
+      zos.close();
+      return outputJar.toURI();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
   }
 
   /*
@@ -1156,7 +1190,9 @@ public class DagUtils {
   public boolean checkOrWaitForTheFile(FileSystem srcFs, Path src, Path dest, 
Configuration conf,
       Object notifier, int waitAttempts, long sleepInterval, boolean doLog) 
throws IOException {
     for (int i = 0; i < waitAttempts; i++) {
-      if (checkPreExisting(srcFs, src, dest, conf)) return true;
+      if (checkPreExisting(srcFs, src, dest, conf)) {
+        return true;
+      }
       if (doLog && i == 0) {
         LOG.info("Waiting for the file " + dest + " (" + waitAttempts + " 
attempts, with "
             + sleepInterval + "ms interval)");
@@ -1520,7 +1556,9 @@ public class DagUtils {
   public static Map<String, LocalResource> getResourcesUpdatableForAm(
       Collection<LocalResource> allNonAppResources) {
     HashMap<String, LocalResource> allNonAppFileResources = new HashMap<>();
-    if (allNonAppResources == null) return allNonAppFileResources;
+    if (allNonAppResources == null) {
+      return allNonAppFileResources;
+    }
     for (LocalResource lr : allNonAppResources) {
       if (lr.getType() == LocalResourceType.FILE) {
         // TEZ AM will only localize FILE (no script operators in the AM)

http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
index b56595e..39a9c77 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
@@ -86,7 +86,7 @@ public class HivePreWarmProcessor extends 
AbstractLogicalIOProcessor {
     ReadaheadPool rpool = ReadaheadPool.getInstance();
     ShimLoader.getHadoopShims();
 
-    URL hiveurl = new 
URL("jar:"+DagUtils.getInstance().getExecJarPathLocal()+"!/");
+    URL hiveurl = new URL("jar:" + 
DagUtils.getInstance().getExecJarPathLocal(conf) + "!/");
     JarURLConnection hiveconn = (JarURLConnection)hiveurl.openConnection();
     JarFile hivejar = hiveconn.getJarFile();
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
index a176504..b98fb58 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
@@ -37,7 +37,6 @@ import java.util.concurrent.FutureTask;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicReference;
-
 import javax.security.auth.login.LoginException;
 
 import org.apache.commons.codec.digest.DigestUtils;
@@ -86,7 +85,6 @@ import 
org.apache.tez.serviceplugins.api.ContainerLauncherDescriptor;
 import org.apache.tez.serviceplugins.api.ServicePluginsDescriptor;
 import org.apache.tez.serviceplugins.api.TaskCommunicatorDescriptor;
 import org.apache.tez.serviceplugins.api.TaskSchedulerDescriptor;
-import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
 import org.slf4j.Logger;
@@ -162,6 +160,7 @@ public class TezSessionState {
     this.conf = conf;
   }
 
+  @Override
   public String toString() {
     return "sessionId=" + sessionId + ", queueName=" + queueName + ", user=" + 
user
         + ", doAs=" + doAsEnabled + ", isOpen=" + isOpen() + ", isDefault=" + 
defaultQueue;
@@ -177,7 +176,9 @@ public class TezSessionState {
   }
 
   public boolean isOpening() {
-    if (session != null || sessionFuture == null) return false;
+    if (session != null || sessionFuture == null) {
+      return false;
+    }
     try {
       session = sessionFuture.get(0, TimeUnit.NANOSECONDS);
     } catch (InterruptedException e) {
@@ -194,8 +195,12 @@ public class TezSessionState {
   }
 
   public boolean isOpen() {
-    if (session != null) return true;
-    if (sessionFuture == null) return false;
+    if (session != null) {
+      return true;
+    }
+    if (sessionFuture == null) {
+      return false;
+    }
     try {
       session = sessionFuture.get(0, TimeUnit.NANOSECONDS);
     } catch (InterruptedException e) {
@@ -277,7 +282,7 @@ public class TezSessionState {
 
     // unless already installed on all the cluster nodes, we'll have to
     // localize hive-exec.jar as well.
-    appJarLr = createJarLocalResource(utils.getExecJarPathLocal());
+    appJarLr = createJarLocalResource(utils.getExecJarPathLocal(conf));
 
     // configuration for the application master
     final Map<String, LocalResource> commonLocalResources = new 
HashMap<String, LocalResource>();
@@ -428,8 +433,10 @@ public class TezSessionState {
       try {
         session.waitTillReady();
       } catch (InterruptedException ie) {
-        if (isOnThread) throw new IOException(ie);
-        //ignore
+        if (isOnThread) {
+          throw new IOException(ie);
+          //ignore
+        }
       }
       isSuccessful = true;
       // sessionState.getQueueName() comes from cluster wide configured queue 
names.
@@ -460,7 +467,9 @@ public class TezSessionState {
   }
 
   public void endOpen() throws InterruptedException, CancellationException {
-    if (this.session != null || this.sessionFuture == null) return;
+    if (this.session != null || this.sessionFuture == null) {
+      return;
+    }
     try {
       this.session = this.sessionFuture.get();
     } catch (ExecutionException e) {
@@ -587,7 +596,9 @@ public class TezSessionState {
       if (hasResources) {
         for (String s : newFilesNotFromConf) {
           hasResources = resources.additionalFilesNotFromConf.contains(s);
-          if (!hasResources) break;
+          if (!hasResources) {
+            break;
+          }
         }
       }
       if (!hasResources) {
@@ -857,7 +868,9 @@ public class TezSessionState {
 
   /** Mark session as free for use from TezTask, for safety/debugging 
purposes. */
   public void markFree() {
-    if (ownerThread.getAndSet(null) == null) throw new AssertionError("Not in 
use");
+    if (ownerThread.getAndSet(null) == null) {
+      throw new AssertionError("Not in use");
+    }
   }
 
   /** Mark session as being in use from TezTask, for safety/debugging 
purposes. */

http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
index 47ff366..d56002d 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
@@ -345,7 +345,7 @@ public class GenericUDTFGetSplits extends GenericUDTF {
     Path scratchDir = utils.createTezDir(ctx.getMRScratchDir(), job);
     FileSystem fs = scratchDir.getFileSystem(job);
     try {
-      LocalResource appJarLr = 
createJarLocalResource(utils.getExecJarPathLocal(), utils, job);
+      LocalResource appJarLr = 
createJarLocalResource(utils.getExecJarPathLocal(ctx.getConf()), utils, job);
 
       LlapCoordinator coordinator = LlapCoordinator.getInstance();
       if (coordinator == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/ql/src/test/org/apache/hive/testutils/HiveTestEnvSetup.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hive/testutils/HiveTestEnvSetup.java 
b/ql/src/test/org/apache/hive/testutils/HiveTestEnvSetup.java
new file mode 100644
index 0000000..52b787f
--- /dev/null
+++ b/ql/src/test/org/apache/hive/testutils/HiveTestEnvSetup.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.testutils;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.ql.IDriver;
+import org.apache.hadoop.hive.ql.lockmgr.zookeeper.CuratorFrameworkSingleton;
+import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.HadoopShims.MiniMrShim;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooKeeper;
+import org.junit.rules.ExternalResource;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestRule;
+
+import com.google.common.collect.Sets;
+
+/**
+ * Helps in setting up environments to run high level hive tests
+ *
+ * For a sample testcase see {@link TestHiveTestEnvSetup}!
+
+ * Because setting up such a complex environment is a bit more sophisticated 
than it should;
+ * this class introduces some helper concepts beyond what juni4 has
+ *
+ * <ul>
+ *  <li>parts are decomposed into smaller {@link IHiveTestRule}
+ *  <li>{@link HiveTestEnvContext} is visible to every methodcall</li>
+ *  <li>invocation order of before calls are "forward"
+ *  <li>invocation order of before calls are "backward"
+ *  </ul>
+ *
+ *
+ * Later this should be migrated to junit5...when it will be possible; see 
HIVE-18495
+ */
+public class HiveTestEnvSetup extends ExternalResource {
+
+  static interface IHiveTestRule {
+    default void beforeClass(HiveTestEnvContext ctx) throws Exception {
+    }
+
+    default void afterClass(HiveTestEnvContext ctx) throws Exception {
+    }
+
+    default void beforeMethod(HiveTestEnvContext ctx) throws Exception {
+    }
+
+    default void afterMethod(HiveTestEnvContext ctx) throws Exception {
+    }
+  }
+
+  public static class HiveTestEnvContext {
+
+    public File tmpFolder;
+    public HiveConf hiveConf;
+
+  }
+
+  static class TmpDirSetup implements IHiveTestRule {
+
+    public TemporaryFolder tmpFolderRule = new TemporaryFolder(new 
File(HIVE_ROOT + "/target/tmp"));
+
+    @Override
+    public void beforeClass(HiveTestEnvContext ctx) throws Exception {
+      tmpFolderRule.create();
+      ctx.tmpFolder = tmpFolderRule.getRoot();
+    }
+
+    @Override
+    public void afterClass(HiveTestEnvContext ctx) {
+      tmpFolderRule.delete();
+      ctx.tmpFolder = null;
+    }
+  }
+
+
+  static class SetTestEnvs implements IHiveTestRule {
+    @Override
+    public void beforeClass(HiveTestEnvContext ctx) throws Exception {
+
+      File tmpFolder = ctx.tmpFolder;
+      String tmpFolderPath = tmpFolder.getAbsolutePath();
+
+      // these are mostly copied from the root pom.xml
+      System.setProperty("build.test.dir", tmpFolderPath);
+      System.setProperty("derby.stream.error.file", tmpFolderPath + 
"/derby.log");
+      System.setProperty("hadoop.bin.path", HIVE_ROOT + "/testutils/hadoop");
+      System.setProperty("hadoop.log.dir", tmpFolderPath);
+      System.setProperty("mapred.job.tracker", "local");
+      System.setProperty("log4j.configurationFile", "file://" + tmpFolderPath 
+ "/conf/hive-log4j2.properties");
+      System.setProperty("log4j.debug", "true");
+      System.setProperty("java.io.tmpdir", tmpFolderPath);
+      System.setProperty("test.build.data", tmpFolderPath);
+      System.setProperty("test.data.files", DATA_DIR + "/files");
+      System.setProperty("test.data.dir", DATA_DIR + "/files");
+      System.setProperty("test.tmp.dir", tmpFolderPath);
+      System.setProperty("test.tmp.dir.uri", "file://" + tmpFolderPath);
+      System.setProperty("test.dfs.mkdir", "-mkdir -p");
+      System.setProperty("test.warehouse.dir", tmpFolderPath + "/warehouse"); 
// this is changed to be *under* tmp dir
+      System.setProperty("java.net.preferIPv4Stack", "true"); // not sure if 
this will have any effect..
+      System.setProperty("test.src.tables", "src");
+      System.setProperty("hive.jar.directory", tmpFolderPath);
+    }
+
+  }
+
+  static class SetupHiveConf implements IHiveTestRule {
+
+    private HiveConf savedConf;
+
+    @Override
+    public void beforeClass(HiveTestEnvContext ctx) throws Exception {
+
+      File confFolder = new File(ctx.tmpFolder, "conf");
+
+      FileUtils.copyDirectory(new File(DATA_DIR + "/conf/"), confFolder);
+      FileUtils.copyDirectory(new File(DATA_DIR + "/conf/tez"), confFolder);
+
+      HiveConf.setHiveSiteLocation(new File(confFolder, 
"hive-site.xml").toURI().toURL());
+      HiveConf.setHivemetastoreSiteUrl(new File(confFolder, 
"hivemetastore-site.xml").toURI().toURL());
+      // FIXME: hiveServer2SiteUrl is not settable?
+
+      ctx.hiveConf = new HiveConf(IDriver.class);
+      ctx.hiveConf.setBoolVar(ConfVars.HIVE_IN_TEST_IDE, true);
+    }
+
+    @Override
+    public void beforeMethod(HiveTestEnvContext ctx) throws Exception {
+      if (savedConf == null) {
+        savedConf = new HiveConf(ctx.hiveConf);
+      }
+      // service a fresh conf for every testMethod
+      ctx.hiveConf = new HiveConf(savedConf);
+    }
+
+    @Override
+    public void afterClass(HiveTestEnvContext ctx) throws Exception {
+      savedConf = null;
+      ctx.hiveConf = null;
+    }
+  }
+
+  static class SetupZookeeper implements IHiveTestRule {
+
+    private ZooKeeper zooKeeper;
+    private MiniZooKeeperCluster zooKeeperCluster;
+    private int zkPort;
+
+    @Override
+    public void beforeClass(HiveTestEnvContext ctx) throws Exception {
+      File tmpDir = new File(ctx.tmpFolder, "zookeeper");
+      zooKeeperCluster = new MiniZooKeeperCluster();
+      zkPort = zooKeeperCluster.startup(tmpDir);
+    }
+
+    @Override
+    public void beforeMethod(HiveTestEnvContext ctx) throws Exception {
+      int sessionTimeout = (int) 
ctx.hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, 
TimeUnit.MILLISECONDS);
+      zooKeeper = new ZooKeeper("localhost:" + zkPort, sessionTimeout, new 
Watcher() {
+        @Override
+        public void process(WatchedEvent arg0) {
+        }
+      });
+
+      String zkServer = "localhost";
+      ctx.hiveConf.set("hive.zookeeper.quorum", zkServer);
+      ctx.hiveConf.set("hive.zookeeper.client.port", "" + zkPort);
+    }
+
+    @Override
+    public void afterMethod(HiveTestEnvContext ctx) throws Exception {
+      zooKeeper.close();
+      ZooKeeperHiveLockManager.releaseAllLocks(ctx.hiveConf);
+    }
+
+    @Override
+    public void afterClass(HiveTestEnvContext ctx) throws Exception {
+      CuratorFrameworkSingleton.closeAndReleaseInstance();
+
+      if (zooKeeperCluster != null) {
+        zooKeeperCluster.shutdown();
+        zooKeeperCluster = null;
+      }
+    }
+
+  }
+
+  static class SetupTez implements IHiveTestRule {
+    private MiniMrShim mr1;
+
+    @Override
+    public void beforeClass(HiveTestEnvContext ctx) throws Exception {
+      HadoopShims shims = ShimLoader.getHadoopShims();
+      mr1 = shims.getLocalMiniTezCluster(ctx.hiveConf, true);
+      mr1.setupConfiguration(ctx.hiveConf);
+    }
+
+    @Override
+    public void afterClass(HiveTestEnvContext ctx) throws Exception {
+      mr1.shutdown();
+    }
+  }
+
+  public static final String HIVE_ROOT = getHiveRoot();
+  public static final String DATA_DIR = HIVE_ROOT + "/data/";
+  List<IHiveTestRule> parts = new ArrayList<>();
+
+  public HiveTestEnvSetup() {
+    parts.add(new TmpDirSetup());
+    parts.add(new SetTestEnvs());
+    parts.add(new SetupHiveConf());
+    parts.add(new SetupZookeeper());
+    parts.add(new SetupTez());
+  }
+
+  TemporaryFolder tmpFolderRule = new TemporaryFolder(new File(HIVE_ROOT + 
"/target/tmp"));
+  private HiveTestEnvContext testEnvContext = new HiveTestEnvContext();
+
+  @Override
+  protected void before() throws Throwable {
+    for (IHiveTestRule p : parts) {
+      p.beforeClass(testEnvContext);
+    }
+  }
+
+  @Override
+  protected void after() {
+    try {
+      for (IHiveTestRule p : Lists.reverse(parts)) {
+        p.afterClass(testEnvContext);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException("test-subsystem error", e);
+    }
+  }
+
+  class MethodRuleProxy extends ExternalResource {
+
+    @Override
+    protected void before() throws Throwable {
+      for (IHiveTestRule p : parts) {
+        p.beforeMethod(testEnvContext);
+      }
+    }
+
+    @Override
+    protected void after() {
+      try {
+        for (IHiveTestRule p : Lists.reverse(parts)) {
+          p.afterMethod(testEnvContext);
+        }
+      } catch (Exception e) {
+        throw new RuntimeException("test-subsystem error", e);
+      }
+    }
+  }
+
+  private static String getHiveRoot() {
+    List<String> candidateSiblings = new ArrayList<>();
+    if (System.getProperty("hive.root") != null) {
+      try {
+        candidateSiblings.add(new 
File(System.getProperty("hive.root")).getCanonicalPath());
+      } catch (IOException e) {
+        throw new RuntimeException("error getting hive.root", e);
+      }
+    }
+    candidateSiblings.add(new File(".").getAbsolutePath());
+
+    for (String string : candidateSiblings) {
+      File curr = new File(string);
+      do {
+        Set<String> lls = Sets.newHashSet(curr.list());
+        if (lls.contains("itests") && lls.contains("ql") && 
lls.contains("metastore")) {
+          System.out.println("detected hiveRoot: " + curr);
+          return ensurePathEndsInSlash(curr.getAbsolutePath());
+        }
+        curr = curr.getParentFile();
+      } while (curr != null);
+    }
+    throw new RuntimeException("unable to find hiveRoot");
+  }
+
+  public static String ensurePathEndsInSlash(String path) {
+    if (path == null) {
+      throw new NullPointerException("Path cannot be null");
+    }
+    if (path.endsWith(File.separator)) {
+      return path;
+    } else {
+      return path + File.separator;
+    }
+  }
+
+  public File getDir(String string) {
+    try {
+      return tmpFolderRule.newFolder(string);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  public TestRule getMethodRule() {
+    return new MethodRuleProxy();
+  }
+
+  public HiveTestEnvContext getTestCtx() {
+    return testEnvContext;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/ql/src/test/org/apache/hive/testutils/MiniZooKeeperCluster.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hive/testutils/MiniZooKeeperCluster.java 
b/ql/src/test/org/apache/hive/testutils/MiniZooKeeperCluster.java
new file mode 100644
index 0000000..1067dcf
--- /dev/null
+++ b/ql/src/test/org/apache/hive/testutils/MiniZooKeeperCluster.java
@@ -0,0 +1,456 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.testutils;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InterruptedIOException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.Reader;
+import java.net.BindException;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.zookeeper.server.NIOServerCnxnFactory;
+import org.apache.zookeeper.server.ZooKeeperServer;
+import org.apache.zookeeper.server.persistence.FileTxnLog;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+
+/**
+ * TODO: Most of the code in this class is ripped from ZooKeeper tests. Instead
+ * of redoing it, we should contribute updates to their code which let us more
+ * easily access testing helper objects.
+ *
+ *XXX: copied from the only used class by qtestutil from hbase-tests
+ */
+class MiniZooKeeperCluster {
+  private static final Log LOG = LogFactory.getLog(MiniZooKeeperCluster.class);
+
+  private static final int TICK_TIME = 2000;
+  private static final int DEFAULT_CONNECTION_TIMEOUT = 30000;
+  private int connectionTimeout;
+
+  private boolean started;
+
+  /** The default port. If zero, we use a random port. */
+  private int defaultClientPort = 0;
+
+  private List<NIOServerCnxnFactory> standaloneServerFactoryList;
+  private List<ZooKeeperServer> zooKeeperServers;
+  private List<Integer> clientPortList;
+
+  private int activeZKServerIndex;
+  private int tickTime = 0;
+
+  private Configuration configuration;
+
+  public MiniZooKeeperCluster() {
+    this(new Configuration());
+  }
+
+  public MiniZooKeeperCluster(Configuration configuration) {
+    this.started = false;
+    this.configuration = configuration;
+    activeZKServerIndex = -1;
+    zooKeeperServers = new ArrayList<>();
+    clientPortList = new ArrayList<>();
+    standaloneServerFactoryList = new ArrayList<>();
+    connectionTimeout = configuration.getInt(HConstants.ZK_SESSION_TIMEOUT + 
".localHBaseCluster", DEFAULT_CONNECTION_TIMEOUT);
+  }
+
+  /**
+   * Add a client port to the list.
+   *
+   * @param clientPort the specified port
+   */
+  public void addClientPort(int clientPort) {
+    clientPortList.add(clientPort);
+  }
+
+  /**
+   * Get the list of client ports.
+   * @return clientPortList the client port list
+   */
+  @VisibleForTesting
+  public List<Integer> getClientPortList() {
+    return clientPortList;
+  }
+
+  /**
+   * Check whether the client port in a specific position of the client port 
list is valid.
+   *
+   * @param index the specified position
+   */
+  private boolean hasValidClientPortInList(int index) {
+    return (clientPortList.size() > index && clientPortList.get(index) > 0);
+  }
+
+  public void setDefaultClientPort(int clientPort) {
+    if (clientPort <= 0) {
+      throw new IllegalArgumentException("Invalid default ZK client port: " + 
clientPort);
+    }
+    this.defaultClientPort = clientPort;
+  }
+
+  /**
+   * Selects a ZK client port.
+   *
+   * @param seedPort the seed port to start with; -1 means first time.
+   * @Returns a valid and unused client port
+   */
+  private int selectClientPort(int seedPort) {
+    int i;
+    int returnClientPort = seedPort + 1;
+    if (returnClientPort == 0) {
+      // If the new port is invalid, find one - starting with the default 
client port.
+      // If the default client port is not specified, starting with a random 
port.
+      // The random port is selected from the range between 49152 to 65535. 
These ports cannot be
+      // registered with IANA and are intended for dynamic allocation (see 
http://bit.ly/dynports).
+      if (defaultClientPort > 0) {
+        returnClientPort = defaultClientPort;
+      } else {
+        returnClientPort = 0xc000 + new Random().nextInt(0x3f00);
+      }
+    }
+    // Make sure that the port is unused.
+    while (true) {
+      for (i = 0; i < clientPortList.size(); i++) {
+        if (returnClientPort == clientPortList.get(i)) {
+          // Already used. Update the port and retry.
+          returnClientPort++;
+          break;
+        }
+      }
+      if (i == clientPortList.size()) {
+        break; // found a unused port, exit
+      }
+    }
+    return returnClientPort;
+  }
+
+  public void setTickTime(int tickTime) {
+    this.tickTime = tickTime;
+  }
+
+  public int getBackupZooKeeperServerNum() {
+    return zooKeeperServers.size() - 1;
+  }
+
+  public int getZooKeeperServerNum() {
+    return zooKeeperServers.size();
+  }
+
+  // / XXX: From o.a.zk.t.ClientBase
+  private static void setupTestEnv() {
+    // during the tests we run with 100K prealloc in the logs.
+    // on windows systems prealloc of 64M was seen to take ~15seconds
+    // resulting in test failure (client timeout on first session).
+    // set env and directly in order to handle static init/gc issues
+    System.setProperty("zookeeper.preAllocSize", "100");
+    FileTxnLog.setPreallocSize(100 * 1024);
+  }
+
+  public int startup(File baseDir) throws IOException, InterruptedException {
+    int numZooKeeperServers = clientPortList.size();
+    if (numZooKeeperServers == 0) {
+      numZooKeeperServers = 1; // need at least 1 ZK server for testing
+    }
+    return startup(baseDir, numZooKeeperServers);
+  }
+
+  /**
+   * @param baseDir
+   * @param numZooKeeperServers
+   * @return ClientPort server bound to, -1 if there was a
+   *         binding problem and we couldn't pick another port.
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public int startup(File baseDir, int numZooKeeperServers) throws 
IOException, InterruptedException {
+    if (numZooKeeperServers <= 0) {
+      return -1;
+    }
+
+    setupTestEnv();
+    shutdown();
+
+    int tentativePort = -1; // the seed port
+    int currentClientPort;
+
+    // running all the ZK servers
+    for (int i = 0; i < numZooKeeperServers; i++) {
+      File dir = new File(baseDir, "zookeeper_" + i).getAbsoluteFile();
+      createDir(dir);
+      int tickTimeToUse;
+      if (this.tickTime > 0) {
+        tickTimeToUse = this.tickTime;
+      } else {
+        tickTimeToUse = TICK_TIME;
+      }
+
+      // Set up client port - if we have already had a list of valid ports, 
use it.
+      if (hasValidClientPortInList(i)) {
+        currentClientPort = clientPortList.get(i);
+      } else {
+        tentativePort = selectClientPort(tentativePort); // update the seed
+        currentClientPort = tentativePort;
+      }
+
+      ZooKeeperServer server = new ZooKeeperServer(dir, dir, tickTimeToUse);
+      // Setting {min,max}SessionTimeout defaults to be the same as in 
Zookeeper
+      
server.setMinSessionTimeout(configuration.getInt("hbase.zookeeper.property.minSessionTimeout",
 -1));
+      
server.setMaxSessionTimeout(configuration.getInt("hbase.zookeeper.property.maxSessionTimeout",
 -1));
+      NIOServerCnxnFactory standaloneServerFactory;
+      while (true) {
+        try {
+          standaloneServerFactory = new NIOServerCnxnFactory();
+          standaloneServerFactory.configure(new 
InetSocketAddress(currentClientPort),
+              configuration.getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, 
HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS));
+        } catch (BindException e) {
+          LOG.debug("Failed binding ZK Server to client port: " + 
currentClientPort, e);
+          // We're told to use some port but it's occupied, fail
+          if (hasValidClientPortInList(i)) {
+            return -1;
+          }
+          // This port is already in use, try to use another.
+          tentativePort = selectClientPort(tentativePort);
+          currentClientPort = tentativePort;
+          continue;
+        }
+        break;
+      }
+
+      // Start up this ZK server
+      standaloneServerFactory.startup(server);
+      // Runs a 'stat' against the servers.
+      if (!waitForServerUp(currentClientPort, connectionTimeout)) {
+        throw new IOException("Waiting for startup of standalone server");
+      }
+
+      // We have selected a port as a client port.  Update clientPortList if 
necessary.
+      if (clientPortList.size() <= i) { // it is not in the list, add the port
+        clientPortList.add(currentClientPort);
+      } else if (clientPortList.get(i) <= 0) { // the list has invalid port, 
update with valid port
+        clientPortList.remove(i);
+        clientPortList.add(i, currentClientPort);
+      }
+
+      standaloneServerFactoryList.add(standaloneServerFactory);
+      zooKeeperServers.add(server);
+    }
+
+    // set the first one to be active ZK; Others are backups
+    activeZKServerIndex = 0;
+    started = true;
+    int clientPort = clientPortList.get(activeZKServerIndex);
+    LOG.info("Started MiniZooKeeperCluster and ran successful 'stat' " + "on 
client port=" + clientPort);
+    return clientPort;
+  }
+
+  private void createDir(File dir) throws IOException {
+    try {
+      if (!dir.exists()) {
+        dir.mkdirs();
+      }
+    } catch (SecurityException e) {
+      throw new IOException("creating dir: " + dir, e);
+    }
+  }
+
+  /**
+   * @throws IOException
+   */
+  public void shutdown() throws IOException {
+    // shut down all the zk servers
+    for (int i = 0; i < standaloneServerFactoryList.size(); i++) {
+      NIOServerCnxnFactory standaloneServerFactory = 
standaloneServerFactoryList.get(i);
+      int clientPort = clientPortList.get(i);
+
+      standaloneServerFactory.shutdown();
+      if (!waitForServerDown(clientPort, connectionTimeout)) {
+        throw new IOException("Waiting for shutdown of standalone server");
+      }
+    }
+    standaloneServerFactoryList.clear();
+
+    for (ZooKeeperServer zkServer : zooKeeperServers) {
+      //explicitly close ZKDatabase since ZookeeperServer does not close them
+      zkServer.getZKDatabase().close();
+    }
+    zooKeeperServers.clear();
+
+    // clear everything
+    if (started) {
+      started = false;
+      activeZKServerIndex = 0;
+      clientPortList.clear();
+      LOG.info("Shutdown MiniZK cluster with all ZK servers");
+    }
+  }
+
+  /**@return clientPort return clientPort if there is another ZK backup can run
+   *         when killing the current active; return -1, if there is no 
backups.
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public int killCurrentActiveZooKeeperServer() throws IOException, 
InterruptedException {
+    if (!started || activeZKServerIndex < 0) {
+      return -1;
+    }
+
+    // Shutdown the current active one
+    NIOServerCnxnFactory standaloneServerFactory = 
standaloneServerFactoryList.get(activeZKServerIndex);
+    int clientPort = clientPortList.get(activeZKServerIndex);
+
+    standaloneServerFactory.shutdown();
+    if (!waitForServerDown(clientPort, connectionTimeout)) {
+      throw new IOException("Waiting for shutdown of standalone server");
+    }
+
+    zooKeeperServers.get(activeZKServerIndex).getZKDatabase().close();
+
+    // remove the current active zk server
+    standaloneServerFactoryList.remove(activeZKServerIndex);
+    clientPortList.remove(activeZKServerIndex);
+    zooKeeperServers.remove(activeZKServerIndex);
+    LOG.info("Kill the current active ZK servers in the cluster " + "on client 
port: " + clientPort);
+
+    if (standaloneServerFactoryList.isEmpty()) {
+      // there is no backup servers;
+      return -1;
+    }
+    clientPort = clientPortList.get(activeZKServerIndex);
+    LOG.info("Activate a backup zk server in the cluster " + "on client port: 
" + clientPort);
+    // return the next back zk server's port
+    return clientPort;
+  }
+
+  /**
+   * Kill one back up ZK servers
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public void killOneBackupZooKeeperServer() throws IOException, 
InterruptedException {
+    if (!started || activeZKServerIndex < 0 || 
standaloneServerFactoryList.size() <= 1) {
+      return;
+    }
+
+    int backupZKServerIndex = activeZKServerIndex + 1;
+    // Shutdown the current active one
+    NIOServerCnxnFactory standaloneServerFactory = 
standaloneServerFactoryList.get(backupZKServerIndex);
+    int clientPort = clientPortList.get(backupZKServerIndex);
+
+    standaloneServerFactory.shutdown();
+    if (!waitForServerDown(clientPort, connectionTimeout)) {
+      throw new IOException("Waiting for shutdown of standalone server");
+    }
+
+    zooKeeperServers.get(backupZKServerIndex).getZKDatabase().close();
+
+    // remove this backup zk server
+    standaloneServerFactoryList.remove(backupZKServerIndex);
+    clientPortList.remove(backupZKServerIndex);
+    zooKeeperServers.remove(backupZKServerIndex);
+    LOG.info("Kill one backup ZK servers in the cluster " + "on client port: " 
+ clientPort);
+  }
+
+  // XXX: From o.a.zk.t.ClientBase
+  private static boolean waitForServerDown(int port, long timeout) throws 
IOException {
+    long start = System.currentTimeMillis();
+    while (true) {
+      try {
+        Socket sock = new Socket("localhost", port);
+        try {
+          OutputStream outstream = sock.getOutputStream();
+          outstream.write("stat".getBytes());
+          outstream.flush();
+        } finally {
+          sock.close();
+        }
+      } catch (IOException e) {
+        return true;
+      }
+
+      if (System.currentTimeMillis() > start + timeout) {
+        break;
+      }
+      try {
+        Thread.sleep(250);
+      } catch (InterruptedException e) {
+        throw (InterruptedIOException) new 
InterruptedIOException().initCause(e);
+      }
+    }
+    return false;
+  }
+
+  // XXX: From o.a.zk.t.ClientBase
+  private static boolean waitForServerUp(int port, long timeout) throws 
IOException {
+    long start = System.currentTimeMillis();
+    while (true) {
+      try {
+        Socket sock = new Socket("localhost", port);
+        BufferedReader reader = null;
+        try {
+          OutputStream outstream = sock.getOutputStream();
+          outstream.write("stat".getBytes());
+          outstream.flush();
+
+          Reader isr = new InputStreamReader(sock.getInputStream());
+          reader = new BufferedReader(isr);
+          String line = reader.readLine();
+          if (line != null && line.startsWith("Zookeeper version:")) {
+            return true;
+          }
+        } finally {
+          sock.close();
+          if (reader != null) {
+            reader.close();
+          }
+        }
+      } catch (IOException e) {
+        // ignore as this is expected
+        LOG.info("server localhost:" + port + " not up " + e);
+      }
+
+      if (System.currentTimeMillis() > start + timeout) {
+        break;
+      }
+      try {
+        Thread.sleep(250);
+      } catch (InterruptedException e) {
+        throw (InterruptedIOException) new 
InterruptedIOException().initCause(e);
+      }
+    }
+    return false;
+  }
+
+  public int getClientPort() {
+    return activeZKServerIndex < 0 || activeZKServerIndex >= 
clientPortList.size() ? -1 : clientPortList.get(activeZKServerIndex);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20ce1c61/ql/src/test/org/apache/hive/testutils/TestHiveTestEnvSetup.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hive/testutils/TestHiveTestEnvSetup.java 
b/ql/src/test/org/apache/hive/testutils/TestHiveTestEnvSetup.java
new file mode 100644
index 0000000..9161366
--- /dev/null
+++ b/ql/src/test/org/apache/hive/testutils/TestHiveTestEnvSetup.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.testutils;
+
+import static org.junit.Assert.assertEquals;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.DriverFactory;
+import org.apache.hadoop.hive.ql.IDriver;
+import org.apache.hadoop.hive.ql.parse.ParseException;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hive.testutils.HiveTestEnvSetup;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+
+public class TestHiveTestEnvSetup {
+
+  @ClassRule
+  public static HiveTestEnvSetup env_setup = new HiveTestEnvSetup();
+
+  @Rule
+  public TestRule methodRule = env_setup.getMethodRule();
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    IDriver driver = createDriver();
+    dropTables(driver);
+    String cmds[] = {
+        // @formatter:off
+        "create table tu(u int)",
+        "insert into tu values (1),(2),(3)",
+        // @formatter:on
+    };
+    for (String cmd : cmds) {
+      int ret = driver.run(cmd).getResponseCode();
+      assertEquals("Checking command success", 0, ret);
+    }
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    IDriver driver = createDriver();
+    dropTables(driver);
+  }
+
+  public static void dropTables(IDriver driver) throws Exception {
+    String tables[] = { "s", "tu", "tv", "tw" };
+    for (String t : tables) {
+      int ret = driver.run("drop table if exists " + t).getResponseCode();
+      assertEquals("Checking command success", 0, ret);
+    }
+  }
+
+  @Test
+  public void testMappingSameQuery() throws ParseException, Exception {
+    IDriver driver = createDriver();
+    String query = "select sum(u*u),sum(u) from tu where u>1";
+    CommandProcessorResponse ret = driver.run(query);
+    assertEquals(0, ret.getResponseCode());
+
+    List res = new ArrayList();
+    driver.getFetchTask().fetch(res);
+    System.out.println(res);
+    assertEquals(1, res.size());
+    assertEquals("13\t5", res.get(0));
+  }
+
+
+  private static IDriver createDriver() {
+    HiveConf conf = env_setup.getTestCtx().hiveConf;
+    conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
+        
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, 
false);
+    SessionState.start(conf);
+
+    IDriver driver = DriverFactory.newDriver(conf);
+    return driver;
+  }
+
+}

Reply via email to