hive git commit: HIVE-13771 : LLAPIF: generate app ID (Sergey Shelukhin, reviewed by Jason Dere)

2016-06-15 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/master bad9eb666 -> 4c57ed35f


HIVE-13771 : LLAPIF: generate app ID (Sergey Shelukhin, reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4c57ed35
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4c57ed35
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4c57ed35

Branch: refs/heads/master
Commit: 4c57ed35f1151269a1adbbf64db300ae2a1fa915
Parents: bad9eb6
Author: Sergey Shelukhin 
Authored: Tue Jun 14 19:26:31 2016 -0700
Committer: Sergey Shelukhin 
Committed: Wed Jun 15 14:11:49 2016 -0700

--
 .../hadoop/hive/llap/coordinator/LlapCoordinator.java | 13 +
 .../hive/ql/udf/generic/GenericUDTFGetSplits.java | 14 +++---
 2 files changed, 16 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4c57ed35/llap-client/src/java/org/apache/hadoop/hive/llap/coordinator/LlapCoordinator.java
--
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/coordinator/LlapCoordinator.java
 
b/llap-client/src/java/org/apache/hadoop/hive/llap/coordinator/LlapCoordinator.java
index f55779b..ebddfc8 100644
--- 
a/llap-client/src/java/org/apache/hadoop/hive/llap/coordinator/LlapCoordinator.java
+++ 
b/llap-client/src/java/org/apache/hadoop/hive/llap/coordinator/LlapCoordinator.java
@@ -23,6 +23,7 @@ import java.util.Random;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -76,6 +77,8 @@ public class LlapCoordinator {
 
   private HiveConf hiveConf;
   private String clusterUser;
+  private long startTime;
+  private final AtomicInteger appIdCounter = new AtomicInteger(0);
 
   LlapCoordinator() {
   }
@@ -85,6 +88,11 @@ public class LlapCoordinator {
 // HS2 init without the knowledge of LLAP usage (or lack thereof) in the 
cluster.
 this.hiveConf = hiveConf;
 this.clusterUser = 
UserGroupInformation.getCurrentUser().getShortUserName();
+// TODO: if two HS2s start at exactly the same time, which could happen 
during a coordinated
+//   restart, they could start generating the same IDs. Should we 
store the startTime
+//   somewhere like ZK? Try to randomize it a bit for now...
+long randomBits = (long)(new Random().nextInt()) << 32;
+this.startTime = Math.abs((System.currentTimeMillis() & 
(long)Integer.MAX_VALUE) | randomBits);
   }
 
   public LlapSigner getLlapSigner(final Configuration jobConf) {
@@ -105,14 +113,11 @@ public class LlapCoordinator {
   }
 
   public ApplicationId createExtClientAppId() {
-// TODO: moved from UDTF; need JIRA to generate this properly (no dups, 
etc.)...
-return ApplicationId.newInstance(Math.abs(new Random().nextInt()), 0);
 // Note that we cannot allow users to provide app ID, since providing 
somebody else's appId
 // would give one LLAP token (and splits) for that app ID. If we could 
verify it somehow
 // (YARN token? nothing we can do in an UDF), we could get it from client 
already running on
 // YARN. As such, the clients running on YARN will have two app IDs to be 
aware of.
-// TODO: Perhaps they can give us their app id as an argument to the UDF, 
and we'd just append
-// a unique string here, for easier tracking?
+return ApplicationId.newInstance(startTime, 
appIdCounter.incrementAndGet());
   }
 
   public LlapTokenLocalClient getLocalTokenClient(

http://git-wip-us.apache.org/repos/asf/hive/blob/4c57ed35/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
index a2ad4f9..bdf254b 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
@@ -334,7 +334,7 @@ public class GenericUDTFGetSplits extends GenericUDTF {
   }
 
   // See the discussion in the implementation as to why we generate app ID.
-  ApplicationId fakeApplicationId = coordinator.createExtClientAppId();
+  ApplicationId applicationId = coordinator.createExtClientAppId();
 
   // This assumes LLAP cluster owner is always the HS2 user.
   String llapUser = UserGroupInformation.getLoginUser().getShortUserName();
@@ -354,7 +354,7 @@ public class GenericUDTFGetSplits extends GenericUDTF {
 LlapTokenLocalClient tokenCli

hive git commit: HIVE-14020: Hive MS restart failed during EU with ORA-00922 error as part of DB schema upgrade(Hari Subramaniyan, reviewed by Sushanth Sowmyan)

2016-06-15 Thread harisankar
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 45c1775e1 -> b399de13b


HIVE-14020: Hive MS restart failed during EU with ORA-00922 error as part of DB 
schema upgrade(Hari Subramaniyan, reviewed by Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b399de13
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b399de13
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b399de13

Branch: refs/heads/branch-2.1
Commit: b399de13becabbbe1253d7ebde6c2434cc47467c
Parents: 45c1775
Author: Hari Subramaniyan 
Authored: Wed Jun 15 11:30:31 2016 -0700
Committer: Hari Subramaniyan 
Committed: Wed Jun 15 11:31:43 2016 -0700

--
 metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b399de13/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql
--
diff --git a/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql 
b/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql
index a456e1e..52d25c1 100644
--- a/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql
@@ -1,4 +1,4 @@
-CREATE TABLE IF NOT EXISTS  KEY_CONSTRAINTS
+CREATE TABLE KEY_CONSTRAINTS
 (
   CHILD_CD_ID NUMBER,
   CHILD_INTEGER_IDX NUMBER,



hive git commit: HIVE-14020: Hive MS restart failed during EU with ORA-00922 error as part of DB schema upgrade(Hari Subramaniyan, reviewed by Sushanth Sowmyan)

2016-06-15 Thread harisankar
Repository: hive
Updated Branches:
  refs/heads/master 9bf23f6c6 -> bad9eb666


HIVE-14020: Hive MS restart failed during EU with ORA-00922 error as part of DB 
schema upgrade(Hari Subramaniyan, reviewed by Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bad9eb66
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bad9eb66
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bad9eb66

Branch: refs/heads/master
Commit: bad9eb66677cf229337bb1e1f63b366e46508865
Parents: 9bf23f6
Author: Hari Subramaniyan 
Authored: Wed Jun 15 11:30:31 2016 -0700
Committer: Hari Subramaniyan 
Committed: Wed Jun 15 11:30:31 2016 -0700

--
 metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/bad9eb66/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql
--
diff --git a/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql 
b/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql
index a456e1e..52d25c1 100644
--- a/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql
@@ -1,4 +1,4 @@
-CREATE TABLE IF NOT EXISTS  KEY_CONSTRAINTS
+CREATE TABLE KEY_CONSTRAINTS
 (
   CHILD_CD_ID NUMBER,
   CHILD_INTEGER_IDX NUMBER,



hive git commit: HIVE-13956: LLAP: external client output is writing to channel before it is writable again (Jason Dere, reviewed by Prasanth Jayachandran)

2016-06-15 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/master dc4c66f6b -> 9bf23f6c6


HIVE-13956: LLAP: external client output is writing to channel before it is 
writable again (Jason Dere, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9bf23f6c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9bf23f6c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9bf23f6c

Branch: refs/heads/master
Commit: 9bf23f6c684fab4308fcdc4c042dbef1618a0e06
Parents: dc4c66f
Author: Jason Dere 
Authored: Wed Jun 15 11:05:41 2016 -0700
Committer: Jason Dere 
Committed: Wed Jun 15 11:05:41 2016 -0700

--
 .../hadoop/hive/llap/ChannelOutputStream.java| 19 +--
 .../hive/llap/LlapOutputFormatService.java   | 11 ++-
 2 files changed, 27 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/9bf23f6c/ql/src/java/org/apache/hadoop/hive/llap/ChannelOutputStream.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/llap/ChannelOutputStream.java 
b/ql/src/java/org/apache/hadoop/hive/llap/ChannelOutputStream.java
index e861791..239e061 100644
--- a/ql/src/java/org/apache/hadoop/hive/llap/ChannelOutputStream.java
+++ b/ql/src/java/org/apache/hadoop/hive/llap/ChannelOutputStream.java
@@ -40,6 +40,7 @@ public class ChannelOutputStream extends OutputStream {
   private ByteBuf buf;
   private byte[] singleByte = new byte[1];
   private boolean closed = false;
+  private final Object channelWritabilityMonitor;
 
   private ChannelFutureListener listener = new ChannelFutureListener() {
 @Override
@@ -52,11 +53,12 @@ public class ChannelOutputStream extends OutputStream {
 }
   };
 
-  public ChannelOutputStream(ChannelHandlerContext chc, String id, int 
bufSize) {
+  public ChannelOutputStream(ChannelHandlerContext chc, String id, int 
bufSize, final Object monitor) {
 this.chc = chc;
 this.id = id;
 this.bufSize = bufSize;
 this.buf = chc.alloc().buffer(bufSize);
+this.channelWritabilityMonitor = monitor;
   }
 
   @Override
@@ -124,8 +126,21 @@ public class ChannelOutputStream extends OutputStream {
   throw new IOException("Already closed: " + id);
 }
 
-chc.write(buf.copy()).addListener(listener);
+chc.writeAndFlush(buf.copy()).addListener(listener);
 buf.clear();
+
+// if underlying channel is not writable (perhaps because of slow 
consumer) wait for
+// notification about writable state change
+synchronized (channelWritabilityMonitor) {
+  // to prevent spurious wake up
+  while (!chc.channel().isWritable()) {
+try {
+  channelWritabilityMonitor.wait();
+} catch (InterruptedException e) {
+  throw new IOException("Interrupted when waiting for channel 
writability state change", e);
+}
+  }
+}
   }
 
   private void writeInternal(byte[] b, int off, int len) throws IOException {

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf23f6c/ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormatService.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormatService.java 
b/ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormatService.java
index 151a31f..825488f 100644
--- a/ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormatService.java
+++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormatService.java
@@ -166,6 +166,7 @@ public class LlapOutputFormatService {
   protected class LlapOutputFormatServiceHandler
 extends SimpleChannelInboundHandler {
 private final int sendBufferSize;
+private final Object channelWritabilityMonitor = new Object();
 public LlapOutputFormatServiceHandler(final int sendBufferSize) {
   this.sendBufferSize = sendBufferSize;
 }
@@ -195,7 +196,7 @@ public class LlapOutputFormatService {
   LOG.debug("registering socket for: " + id);
   @SuppressWarnings("rawtypes")
   LlapRecordWriter writer = new LlapRecordWriter(
-  new ChannelOutputStream(ctx, id, sendBufferSize));
+  new ChannelOutputStream(ctx, id, sendBufferSize, 
channelWritabilityMonitor));
   boolean isFailed = true;
   synchronized (lock) {
 if (!writers.containsKey(id)) {
@@ -221,6 +222,14 @@ public class LlapOutputFormatService {
   }
   LOG.error(error);
 }
+
+@Override
+public void channelWritabilityChanged(final ChannelHandlerContext ctx) 
throws Exception {
+  super.channelWritabilityChanged(ctx);
+  synchronized (channelWritabilityMonitor) {
+channelWritabilityMonitor.notifyAll();
+  }
+}
   }
 
   protected class LlapOutputFormatChannelCloseListener imple

hive git commit: HIVE-13759: LlapTaskUmbilicalExternalClient should be closed by the record reader (Jason Dere, reviewed by Siddharth Seth)

2016-06-15 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/master d43938ca1 -> dc4c66f6b


HIVE-13759: LlapTaskUmbilicalExternalClient should be closed by the record 
reader (Jason Dere, reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dc4c66f6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dc4c66f6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dc4c66f6

Branch: refs/heads/master
Commit: dc4c66f6babf0d52344d52e9af37cdfa2ed42be0
Parents: d43938c
Author: Jason Dere 
Authored: Wed Jun 15 10:57:17 2016 -0700
Committer: Jason Dere 
Committed: Wed Jun 15 10:57:17 2016 -0700

--
 .../hadoop/hive/llap/LlapBaseRecordReader.java  | 26 ++--
 .../ext/LlapTaskUmbilicalExternalClient.java|  3 ++-
 .../hadoop/hive/llap/LlapBaseInputFormat.java   | 25 +--
 .../hadoop/hive/llap/LlapRowInputFormat.java|  4 +--
 .../hadoop/hive/llap/TestLlapOutputFormat.java  |  2 +-
 5 files changed, 35 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/dc4c66f6/llap-client/src/java/org/apache/hadoop/hive/llap/LlapBaseRecordReader.java
--
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/LlapBaseRecordReader.java 
b/llap-client/src/java/org/apache/hadoop/hive/llap/LlapBaseRecordReader.java
index 3c858a8..f2700c8 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/LlapBaseRecordReader.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/LlapBaseRecordReader.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.llap;
 
+import java.io.Closeable;
 import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -52,14 +53,16 @@ public class LlapBaseRecordReader implements Recor
   protected Thread readerThread = null;
   protected final LinkedBlockingQueue readerEvents = new 
LinkedBlockingQueue();
   protected final long timeout;
+  protected final Closeable client;
 
-  public LlapBaseRecordReader(InputStream in, Schema schema, Class clazz, 
JobConf job) {
+  public LlapBaseRecordReader(InputStream in, Schema schema, Class clazz, 
JobConf job, Closeable client) {
 din = new DataInputStream(in);
 this.schema = schema;
 this.clazz = clazz;
 this.readerThread = Thread.currentThread();
 this.timeout = 3 * HiveConf.getTimeVar(job,
 HiveConf.ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS, 
TimeUnit.MILLISECONDS);
+this.client = client;
   }
 
   public Schema getSchema() {
@@ -68,7 +71,26 @@ public class LlapBaseRecordReader implements Recor
 
   @Override
   public void close() throws IOException {
-din.close();
+Exception caughtException = null;
+try {
+  din.close();
+} catch (Exception err) {
+  LOG.error("Error closing input stream:" + err.getMessage(), err);
+  caughtException = err;
+}
+
+if (client != null) {
+  try {
+client.close();
+  } catch (Exception err) {
+LOG.error("Error closing client:" + err.getMessage(), err);
+caughtException = (caughtException == null ? err : caughtException);
+  }
+}
+
+if (caughtException != null) {
+  throw new IOException("Exception during close: " + 
caughtException.getMessage(), caughtException);
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/dc4c66f6/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
--
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
 
b/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
index 85943d2..5f250b4 100644
--- 
a/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
+++ 
b/llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.hive.llap.ext;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
@@ -58,7 +59,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 
-public class LlapTaskUmbilicalExternalClient extends AbstractService {
+public class LlapTaskUmbilicalExternalClient extends AbstractService 
implements Closeable {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(LlapTaskUmbilicalExternalClient.class);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/dc4c66f6/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapBaseInputFormat.java
--
diff --git 
a/llap-ext-client/src/java/org/apache/hadoop/hive/ll

hive git commit: HIVE-13833 : Add an initial delay when starting the heartbeat (Wei Zheng, reviewed by Eugene Koifman)

2016-06-15 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 3eb16ebec -> 45c1775e1


HIVE-13833 : Add an initial delay when starting the heartbeat (Wei Zheng, 
reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/45c1775e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/45c1775e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/45c1775e

Branch: refs/heads/branch-2.1
Commit: 45c1775e1b32234d576e7a474a372d9fa9326053
Parents: 3eb16eb
Author: Wei Zheng 
Authored: Tue Jun 14 15:30:56 2016 -0700
Committer: Wei Zheng 
Committed: Wed Jun 15 10:27:48 2016 -0700

--
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java| 45 ++--
 1 file changed, 22 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/45c1775e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 9988eec..5b6f20c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.lockmgr;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hive.common.util.ShutdownHookManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -84,7 +83,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
   private static ScheduledExecutorService heartbeatExecutorService = null;
   private ScheduledFuture heartbeatTask = null;
   private Runnable shutdownRunner = null;
-  static final int SHUTDOWN_HOOK_PRIORITY = 0;
+  private static final int SHUTDOWN_HOOK_PRIORITY = 0;
 
   DbTxnManager() {
 shutdownRunner = new Runnable() {
@@ -161,10 +160,11 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 getLockManager();
 
 boolean atLeastOneLock = false;
+queryId = plan.getQueryId();
 
-LockRequestBuilder rqstBuilder = new LockRequestBuilder(plan.getQueryId());
+LockRequestBuilder rqstBuilder = new LockRequestBuilder(queryId);
 //link queryId to txnId
-LOG.info("Setting lock request transaction to " + 
JavaUtils.txnIdToString(txnId) + " for queryId=" + plan.getQueryId());
+LOG.info("Setting lock request transaction to " + 
JavaUtils.txnIdToString(txnId) + " for queryId=" + queryId);
 rqstBuilder.setTransactionId(txnId)
 .setUser(username);
 
@@ -304,7 +304,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 // Make sure we need locks.  It's possible there's nothing to lock in
 // this operation.
 if (!atLeastOneLock) {
-  LOG.debug("No locks needed for queryId" + plan.getQueryId());
+  LOG.debug("No locks needed for queryId" + queryId);
   return null;
 }
 
@@ -312,7 +312,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 if(isTxnOpen()) {
   statementId++;
 }
-LockState lockState = lockMgr.lock(rqstBuilder.build(), plan.getQueryId(), 
isBlocking, locks);
+LockState lockState = lockMgr.lock(rqstBuilder.build(), queryId, 
isBlocking, locks);
 ctx.setHiveLocks(locks);
 return lockState;
   }
@@ -324,15 +324,13 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 return t;
   }
   /**
-   * This is for testing only.
+   * This is for testing only. Normally client should call {@link 
#acquireLocks(QueryPlan, Context, String, boolean)}
* @param delay time to delay for first heartbeat
-   * @return null if no locks were needed
*/
   @VisibleForTesting
   void acquireLocksWithHeartbeatDelay(QueryPlan plan, Context ctx, String 
username, long delay) throws LockException {
 acquireLocks(plan, ctx, username, true);
 ctx.setHeartbeater(startHeartbeat(delay));
-queryId = plan.getQueryId();
   }
 
 
@@ -439,24 +437,25 @@ public class DbTxnManager extends HiveTxnManagerImpl {
 }
   }
 
-  private Heartbeater startHeartbeat() throws LockException {
-return startHeartbeat(0);
-  }
-
   /**
-   *  This is for testing only.  Normally client should call {@link 
#startHeartbeat()}
-   *  Make the heartbeater start before an initial delay period.
-   *  @param delay time to delay before first execution, in milliseconds
-   *  @return heartbeater
+   * Start the heartbeater threadpool and return the task.
+   * @param initialDelay time to delay before first execution, in milliseconds
+   * @return heartbeater
*/
-  Heartbeater startHeartbeat(long delay) throws LockException {
+  private Heartbeater st

hive git commit: HIVE-13961 : ACID: Major compaction fails to include the original bucket files if there's no delta directory (Wei Zheng, reviewed by Eugene Koifman)

2016-06-15 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 98465d9b9 -> 3eb16ebec


HIVE-13961 : ACID: Major compaction fails to include the original bucket files 
if there's no delta directory (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3eb16ebe
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3eb16ebe
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3eb16ebe

Branch: refs/heads/branch-2.1
Commit: 3eb16ebecc516790f9a766049c44611ee10b1949
Parents: 98465d9
Author: Wei Zheng 
Authored: Wed Jun 15 10:19:30 2016 -0700
Committer: Wei Zheng 
Committed: Wed Jun 15 10:24:52 2016 -0700

--
 .../hive/ql/txn/compactor/CompactorMR.java  |   7 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 308 ++-
 2 files changed, 310 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3eb16ebe/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index 37c5314..b54a95d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -250,10 +250,9 @@ public class CompactorMR {
   }
 }
 
-if (parsedDeltas.size() == 0) {
-  // Seriously, no deltas?  Can't compact that.
-  LOG.error(  "No delta files found to compact in " + sd.getLocation());
-  //couldn't someone want to run a Major compaction to convert old table 
to ACID?
+if (parsedDeltas.size() == 0 && dir.getOriginalFiles() == null) {
+  // Skip compaction if there's no delta files AND there's no original 
files
+  LOG.error("No delta files or original files found to compact in " + 
sd.getLocation());
   return;
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3eb16ebe/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index a1bd0fb..e76c925 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -270,10 +270,15 @@ public class TestTxnCommands2 {
 
   /**
* Test the query correctness and directory layout after ACID table 
conversion and MAJOR compaction
+   * 1. Insert a row to Non-ACID table
+   * 2. Convert Non-ACID to ACID table
+   * 3. Insert a row to ACID table
+   * 4. Perform Major compaction
+   * 5. Clean
* @throws Exception
*/
   @Test
-  public void testNonAcidToAcidConversionAndMajorCompaction() throws Exception 
{
+  public void testNonAcidToAcidConversion1() throws Exception {
 FileSystem fs = FileSystem.get(hiveConf);
 FileStatus[] status;
 
@@ -394,6 +399,307 @@ public class TestTxnCommands2 {
 Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
   }
 
+  /**
+   * Test the query correctness and directory layout after ACID table 
conversion and MAJOR compaction
+   * 1. Insert a row to Non-ACID table
+   * 2. Convert Non-ACID to ACID table
+   * 3. Update the existing row in ACID table
+   * 4. Perform Major compaction
+   * 5. Clean
+   * @throws Exception
+   */
+  @Test
+  public void testNonAcidToAcidConversion2() throws Exception {
+FileSystem fs = FileSystem.get(hiveConf);
+FileStatus[] status;
+
+// 1. Insert a row to Non-ACID table
+runStatementOnDriver("insert into " + Table.NONACIDORCTBL + "(a,b) 
values(1,2)");
+status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+(Table.NONACIDORCTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+// There should be 2 original bucket files in the location (00_0 and 
01_0)
+Assert.assertEquals(BUCKET_COUNT, status.length);
+for (int i = 0; i < status.length; i++) {
+  Assert.assertTrue(status[i].getPath().getName().matches("0[01]_0"));
+}
+List rs = runStatementOnDriver("select a,b from " + 
Table.NONACIDORCTBL);
+int [][] resultData = new int[][] {{1, 2}};
+Assert.assertEquals(stringifyValues(resultData), rs);
+rs = runStatementOnDriver("select count(*) from " + Table.NONACIDORCTBL);
+int resultCount = 1;
+Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
+
+// 2. Convert NONACIDORCTBL to ACID table
+runStatementOnDriver("alter table " + Table.NONACIDORCTBL + " SET 
TBLPROPERTIES ('transactional'='true')");
+status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/

hive git commit: HIVE-13961 : ACID: Major compaction fails to include the original bucket files if there's no delta directory (Wei Zheng, reviewed by Eugene Koifman)

2016-06-15 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 000fb2c2f -> 0b63145de


HIVE-13961 : ACID: Major compaction fails to include the original bucket files 
if there's no delta directory (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0b63145d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0b63145d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0b63145d

Branch: refs/heads/branch-1
Commit: 0b63145dea924aa6ca305e9a68795d3b0e33e688
Parents: 000fb2c
Author: Wei Zheng 
Authored: Wed Jun 15 10:23:41 2016 -0700
Committer: Wei Zheng 
Committed: Wed Jun 15 10:23:41 2016 -0700

--
 .../hive/ql/txn/compactor/CompactorMR.java  |   7 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 308 ++-
 2 files changed, 310 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0b63145d/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index 3e7bb93..e7ea70f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -252,10 +252,9 @@ public class CompactorMR {
   }
 }
 
-if (parsedDeltas.size() == 0) {
-  // Seriously, no deltas?  Can't compact that.
-  LOG.error(  "No delta files found to compact in " + sd.getLocation());
-  //couldn't someone want to run a Major compaction to convert old table 
to ACID?
+if (parsedDeltas.size() == 0 && dir.getOriginalFiles() == null) {
+  // Skip compaction if there's no delta files AND there's no original 
files
+  LOG.error("No delta files or original files found to compact in " + 
sd.getLocation());
   return;
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0b63145d/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 6a944bc..b3b5bfd 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -270,10 +270,15 @@ public class TestTxnCommands2 {
 
   /**
* Test the query correctness and directory layout after ACID table 
conversion and MAJOR compaction
+   * 1. Insert a row to Non-ACID table
+   * 2. Convert Non-ACID to ACID table
+   * 3. Insert a row to ACID table
+   * 4. Perform Major compaction
+   * 5. Clean
* @throws Exception
*/
   @Test
-  public void testNonAcidToAcidConversionAndMajorCompaction() throws Exception 
{
+  public void testNonAcidToAcidConversion1() throws Exception {
 FileSystem fs = FileSystem.get(hiveConf);
 FileStatus[] status;
 
@@ -394,6 +399,307 @@ public class TestTxnCommands2 {
 Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
   }
 
+  /**
+   * Test the query correctness and directory layout after ACID table 
conversion and MAJOR compaction
+   * 1. Insert a row to Non-ACID table
+   * 2. Convert Non-ACID to ACID table
+   * 3. Update the existing row in ACID table
+   * 4. Perform Major compaction
+   * 5. Clean
+   * @throws Exception
+   */
+  @Test
+  public void testNonAcidToAcidConversion2() throws Exception {
+FileSystem fs = FileSystem.get(hiveConf);
+FileStatus[] status;
+
+// 1. Insert a row to Non-ACID table
+runStatementOnDriver("insert into " + Table.NONACIDORCTBL + "(a,b) 
values(1,2)");
+status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+(Table.NONACIDORCTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+// There should be 2 original bucket files in the location (00_0 and 
01_0)
+Assert.assertEquals(BUCKET_COUNT, status.length);
+for (int i = 0; i < status.length; i++) {
+  Assert.assertTrue(status[i].getPath().getName().matches("0[01]_0"));
+}
+List rs = runStatementOnDriver("select a,b from " + 
Table.NONACIDORCTBL);
+int [][] resultData = new int[][] {{1, 2}};
+Assert.assertEquals(stringifyValues(resultData), rs);
+rs = runStatementOnDriver("select count(*) from " + Table.NONACIDORCTBL);
+int resultCount = 1;
+Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
+
+// 2. Convert NONACIDORCTBL to ACID table
+runStatementOnDriver("alter table " + Table.NONACIDORCTBL + " SET 
TBLPROPERTIES ('transactional'='true')");
+status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +

hive git commit: HIVE-13961 : ACID: Major compaction fails to include the original bucket files if there's no delta directory (Wei Zheng, reviewed by Eugene Koifman)

2016-06-15 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 53e01e4a5 -> d43938ca1


HIVE-13961 : ACID: Major compaction fails to include the original bucket files 
if there's no delta directory (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d43938ca
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d43938ca
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d43938ca

Branch: refs/heads/master
Commit: d43938ca127a849e50e8eaddd6313d409cea6770
Parents: 53e01e4
Author: Wei Zheng 
Authored: Wed Jun 15 10:19:30 2016 -0700
Committer: Wei Zheng 
Committed: Wed Jun 15 10:19:30 2016 -0700

--
 .../hive/ql/txn/compactor/CompactorMR.java  |   7 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 308 ++-
 2 files changed, 310 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d43938ca/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index 71e69d5..6caca98 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -254,10 +254,9 @@ public class CompactorMR {
   }
 }
 
-if (parsedDeltas.size() == 0) {
-  // Seriously, no deltas?  Can't compact that.
-  LOG.error(  "No delta files found to compact in " + sd.getLocation());
-  //couldn't someone want to run a Major compaction to convert old table 
to ACID?
+if (parsedDeltas.size() == 0 && dir.getOriginalFiles() == null) {
+  // Skip compaction if there's no delta files AND there's no original 
files
+  LOG.error("No delta files or original files found to compact in " + 
sd.getLocation());
   return;
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d43938ca/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
--
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index a1bd0fb..e76c925 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -270,10 +270,15 @@ public class TestTxnCommands2 {
 
   /**
* Test the query correctness and directory layout after ACID table 
conversion and MAJOR compaction
+   * 1. Insert a row to Non-ACID table
+   * 2. Convert Non-ACID to ACID table
+   * 3. Insert a row to ACID table
+   * 4. Perform Major compaction
+   * 5. Clean
* @throws Exception
*/
   @Test
-  public void testNonAcidToAcidConversionAndMajorCompaction() throws Exception 
{
+  public void testNonAcidToAcidConversion1() throws Exception {
 FileSystem fs = FileSystem.get(hiveConf);
 FileStatus[] status;
 
@@ -394,6 +399,307 @@ public class TestTxnCommands2 {
 Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
   }
 
+  /**
+   * Test the query correctness and directory layout after ACID table 
conversion and MAJOR compaction
+   * 1. Insert a row to Non-ACID table
+   * 2. Convert Non-ACID to ACID table
+   * 3. Update the existing row in ACID table
+   * 4. Perform Major compaction
+   * 5. Clean
+   * @throws Exception
+   */
+  @Test
+  public void testNonAcidToAcidConversion2() throws Exception {
+FileSystem fs = FileSystem.get(hiveConf);
+FileStatus[] status;
+
+// 1. Insert a row to Non-ACID table
+runStatementOnDriver("insert into " + Table.NONACIDORCTBL + "(a,b) 
values(1,2)");
+status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+(Table.NONACIDORCTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+// There should be 2 original bucket files in the location (00_0 and 
01_0)
+Assert.assertEquals(BUCKET_COUNT, status.length);
+for (int i = 0; i < status.length; i++) {
+  Assert.assertTrue(status[i].getPath().getName().matches("0[01]_0"));
+}
+List rs = runStatementOnDriver("select a,b from " + 
Table.NONACIDORCTBL);
+int [][] resultData = new int[][] {{1, 2}};
+Assert.assertEquals(stringifyValues(resultData), rs);
+rs = runStatementOnDriver("select count(*) from " + Table.NONACIDORCTBL);
+int resultCount = 1;
+Assert.assertEquals(resultCount, Integer.parseInt(rs.get(0)));
+
+// 2. Convert NONACIDORCTBL to ACID table
+runStatementOnDriver("alter table " + Table.NONACIDORCTBL + " SET 
TBLPROPERTIES ('transactional'='true')");
+status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+   

hive git commit: HIVE-13987: Clarify current error shown when HS2 is down (Abdullah Yousufi, reviewed by Sergio Pena)

2016-06-15 Thread spena
Repository: hive
Updated Branches:
  refs/heads/master 113df6ba7 -> 53e01e4a5


HIVE-13987: Clarify current error shown when HS2 is down (Abdullah Yousufi, 
reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/53e01e4a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/53e01e4a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/53e01e4a

Branch: refs/heads/master
Commit: 53e01e4a5cd88fe04fa67031b8c3183084b287cf
Parents: 113df6b
Author: Abdullah Yousufi 
Authored: Wed Jun 15 10:29:56 2016 -0500
Committer: Sergio Pena 
Committed: Wed Jun 15 10:29:56 2016 -0500

--
 beeline/src/java/org/apache/hive/beeline/BeeLine.java | 5 +
 beeline/src/main/resources/BeeLine.properties | 2 ++
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/53e01e4a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 734eeb8..ba7da70 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -92,6 +92,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hive.beeline.cli.CliOptionsProcessor;
+import org.apache.thrift.transport.TTransportException;
 
 /**
  * A console SQL shell with command completion.
@@ -1745,6 +1746,10 @@ public class BeeLine implements Closeable {
   return;
 }
 
+if (e.getCause() instanceof TTransportException) {
+  error(loc("hs2-unavailable"));
+}
+
 error(loc(e instanceof SQLWarning ? "Warning" : "Error",
 new Object[] {
 e.getMessage() == null ? "" : e.getMessage().trim(),

http://git-wip-us.apache.org/repos/asf/hive/blob/53e01e4a/beeline/src/main/resources/BeeLine.properties
--
diff --git a/beeline/src/main/resources/BeeLine.properties 
b/beeline/src/main/resources/BeeLine.properties
index e940a7d..d85ef65 100644
--- a/beeline/src/main/resources/BeeLine.properties
+++ b/beeline/src/main/resources/BeeLine.properties
@@ -142,6 +142,8 @@ active-connections: 0#No active connections|1#{0} active 
connection:|1<{0} activ
 
 time-ms: ({0,number,#.###} seconds)
 
+hs2-unavailable: HS2 may be unavailable, check server status
+
 cmd-usage: Usage: java org.apache.hive.cli.beeline.BeeLine \n \
 \  -uthe JDBC URL to connect to\n \
 \  -r  reconnect to last saved connect url (in 
conjunction with !save)\n \