hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/branch-1 5bae0ad45 -> 82068205a HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/82068205 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/82068205 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/82068205 Branch: refs/heads/branch-1 Commit: 82068205a59ed4b6aeb2b353eb612ce0da73c5c2 Parents: 5bae0ad Author: Wei Zheng Authored: Thu Mar 24 22:38:39 2016 -0700 Committer: Wei Zheng Committed: Thu Mar 24 22:38:39 2016 -0700 -- .../hive/hcatalog/streaming/HiveEndPoint.java | 11 + .../hadoop/hive/ql/txn/compactor/Cleaner.java | 6 +++ .../hive/ql/txn/compactor/CompactorThread.java | 5 +++ .../hadoop/hive/ql/txn/compactor/Initiator.java | 9 +++- .../hadoop/hive/ql/txn/compactor/Worker.java| 7 +++ .../apache/hadoop/hive/ql/TestTxnCommands2.java | 46 6 files changed, 83 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/82068205/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java -- diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index b0bbd66..2e81bf8 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -20,6 +20,7 @@ package org.apache.hive.hcatalog.streaming; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; @@ -342,6 +343,11 @@ public class HiveEndPoint { return null; } } ); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); +} } catch (IOException e) { LOG.error("Error closing connection to " + endPt, e); } catch (InterruptedException e) { @@ -937,6 +943,11 @@ public class HiveEndPoint { } } ); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); +} } catch (IOException e) { throw new ImpersonationFailed("Failed closing Txn Batch as user '" + username + "' on endPoint :" + endPt, e); http://git-wip-us.apache.org/repos/asf/hive/blob/82068205/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 1e6e8a1..d861bc2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -272,6 +272,12 @@ public class Cleaner extends CompactorThread { return null; } }); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + + ci.getFullPartitionName(), exception); +} } txnHandler.markCleaned(ci); } catch (Exception e) { http://git-wip-us.apache.org/repos/asf/hive/blob/82068205/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index ae8865c..952b27a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -174,6 +174,11 @@ abstract class CompactorThread extends Thread implements MetaStoreThread { return null; } }); + try { +FileSystem.closeAllForUGI(ugi); + } catch (IOException exception) { +LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); + } if (wrapper.size() == 1) {
hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/master 6bfec2e97 -> 4fabd038c HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4fabd038 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4fabd038 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4fabd038 Branch: refs/heads/master Commit: 4fabd038cf64b906a89726805958c43b97194291 Parents: 6bfec2e Author: Wei Zheng Authored: Thu Mar 24 22:18:32 2016 -0700 Committer: Wei Zheng Committed: Thu Mar 24 22:18:32 2016 -0700 -- .../java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java | 5 +++-- .../java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java | 4 ++-- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java | 5 +++-- 3 files changed, 8 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 4c31a49..23b1b7f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -275,8 +275,9 @@ public class Cleaner extends CompactorThread { try { FileSystem.closeAllForUGI(ugi); } catch (IOException exception) { - LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + - ci.getFullPartitionName());} + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + + ci.getFullPartitionName(), exception); +} } txnHandler.markCleaned(ci); } catch (Exception e) { http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index 98ebf53..abbe5d4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -235,8 +235,8 @@ public class Initiator extends CompactorThread { try { FileSystem.closeAllForUGI(ugi); } catch (IOException exception) { -LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + -ci.getFullPartitionName()); +LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + +ci.getFullPartitionName(), exception); } return compactionType; } http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index e21ca27..6238e2b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -175,8 +175,9 @@ public class Worker extends CompactorThread { try { FileSystem.closeAllForUGI(ugi); } catch (IOException exception) { - LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + - ci.getFullPartitionName());} + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + + ci.getFullPartitionName(), exception); +} } txnHandler.markCompacted(ci); } catch (Exception e) {
hive git commit: HIVE-12367 : Lock/unlock database should add current database to inputs and outputs of authz hook (Dapeng Sun via Ashutosh Chauhan)
Repository: hive Updated Branches: refs/heads/master a71edcf6a -> 6bfec2e97 HIVE-12367 : Lock/unlock database should add current database to inputs and outputs of authz hook (Dapeng Sun via Ashutosh Chauhan) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6bfec2e9 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6bfec2e9 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6bfec2e9 Branch: refs/heads/master Commit: 6bfec2e97c4e434646aa9aeffd98c9939313fa6e Parents: a71edcf Author: Dapeng Sun Authored: Tue Dec 15 19:39:00 2015 -0800 Committer: Ashutosh Chauhan Committed: Thu Mar 24 19:22:28 2016 -0700 -- .../java/org/apache/hadoop/hive/ql/Driver.java | 20 .../hive/ql/parse/DDLSemanticAnalyzer.java | 15 +-- .../clientnegative/dbtxnmgr_nodblock.q.out | 2 ++ .../clientnegative/dbtxnmgr_nodbunlock.q.out| 2 ++ .../lockneg_query_tbl_in_locked_db.q.out| 6 ++ .../lockneg_try_db_lock_conflict.q.out | 6 ++ .../lockneg_try_drop_locked_db.q.out| 4 .../lockneg_try_lock_db_in_use.q.out| 6 ++ 8 files changed, 59 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/6bfec2e9/ql/src/java/org/apache/hadoop/hive/ql/Driver.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index d7e4ac7..7276e31 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -1416,6 +1416,10 @@ public class Driver implements CommandProcessor { if (!checkConcurrency()) { return false; } +// Lock operations themselves don't require the lock. +if (isExplicitLockOperation()){ + return false; +} if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_LOCK_MAPRED_ONLY)) { return true; } @@ -1438,6 +1442,22 @@ public class Driver implements CommandProcessor { return false; } + private boolean isExplicitLockOperation() { +HiveOperation currentOpt = plan.getOperation(); +if (currentOpt != null) { + switch (currentOpt) { + case LOCKDB: + case UNLOCKDB: + case LOCKTABLE: + case UNLOCKTABLE: +return true; + default: +return false; + } +} +return false; + } + private CommandProcessorResponse createProcessorResponse(int ret) { queryDisplay.setErrorMessage(errorMessage); return new CommandProcessorResponse(ret, errorMessage, SQLState, downstreamError); http://git-wip-us.apache.org/repos/asf/hive/blob/6bfec2e9/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 0c087ed..fe9b8cc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -2444,8 +2444,12 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { String dbName = unescapeIdentifier(ast.getChild(0).getText()); String mode = unescapeIdentifier(ast.getChild(1).getText().toUpperCase()); -//inputs.add(new ReadEntity(dbName)); -//outputs.add(new WriteEntity(dbName)); +inputs.add(new ReadEntity(getDatabase(dbName))); +// Lock database operation is to acquire the lock explicitly, the operation +// itself doesn't need to be locked. Set the WriteEntity as WriteType: +// DDL_NO_LOCK here, otherwise it will conflict with Hive's transaction. +outputs.add(new WriteEntity(getDatabase(dbName), WriteType.DDL_NO_LOCK)); + LockDatabaseDesc lockDatabaseDesc = new LockDatabaseDesc(dbName, mode, HiveConf.getVar(conf, ConfVars.HIVEQUERYID)); lockDatabaseDesc.setQueryStr(ctx.getCmd()); @@ -2457,6 +2461,13 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer { private void analyzeUnlockDatabase(ASTNode ast) throws SemanticException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); +inputs.add(new ReadEntity(getDatabase(dbName))); +// Unlock database operation is to release the lock explicitly, the +// operation itself don't need to be locked. Set the WriteEntity as +// WriteType: DDL_NO_LOCK here, otherwise it will conflict with +// Hive's transaction. +outputs.add(new WriteEntity(getDatabase(dbName), WriteType.DDL_NO_LOCK)); + UnlockDatabaseDesc unlockDatabaseDesc = new UnlockDatabaseDesc(dbName); DDLWork work = new DDLWork(getIn
hive git commit: HIVE-9499 : hive.limit.query.max.table.partition makes queries fail on non-partitioned tables (Navis via Ashutosh Chauhan)
Repository: hive Updated Branches: refs/heads/master 3b6b56d70 -> a71edcf6a HIVE-9499 : hive.limit.query.max.table.partition makes queries fail on non-partitioned tables (Navis via Ashutosh Chauhan) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a71edcf6 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a71edcf6 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a71edcf6 Branch: refs/heads/master Commit: a71edcf6a5672452a8e00c2bad4f20cffced26d9 Parents: 3b6b56d Author: Navis Ryu Authored: Sun Feb 8 17:57:00 2015 -0800 Committer: Ashutosh Chauhan Committed: Thu Mar 24 19:09:47 2016 -0700 -- .../ql/optimizer/stats/annotation/StatsRulesProcFactory.java | 3 +-- .../java/org/apache/hadoop/hive/ql/parse/ParseContext.java | 5 + .../org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java| 8 ++-- 3 files changed, 12 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/a71edcf6/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index 4bcf6bf..c4fc5ca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -105,8 +105,7 @@ public class StatsRulesProcFactory { Object... nodeOutputs) throws SemanticException { TableScanOperator tsop = (TableScanOperator) nd; AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx; - PrunedPartitionList partList = - aspCtx.getParseContext().getPrunedPartitions(tsop.getName(), tsop); + PrunedPartitionList partList = aspCtx.getParseContext().getPrunedPartitions(tsop); Table table = tsop.getConf().getTableMetadata(); try { http://git-wip-us.apache.org/repos/asf/hive/blob/a71edcf6/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 4f784d1..95c254c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -466,6 +466,11 @@ public class ParseContext { this.fetchTask = fetchTask; } + public PrunedPartitionList getPrunedPartitions(TableScanOperator ts) + throws SemanticException { +return getPrunedPartitions(ts.getConf().getAlias(), ts); + } + public PrunedPartitionList getPrunedPartitions(String alias, TableScanOperator ts) throws SemanticException { PrunedPartitionList partsList = opToPartList.get(ts); http://git-wip-us.apache.org/repos/asf/hive/blob/a71edcf6/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index d9db1d5..adee14b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -10776,10 +10776,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { // check whether any of them break the limit for (Operator topOp : topOps.values()) { if (topOp instanceof TableScanOperator) { -if (((TableScanDesc)topOp.getConf()).getIsMetadataOnly()) { +TableScanOperator tsOp = (TableScanOperator) topOp; +if (tsOp.getConf().getIsMetadataOnly()) { + continue; +} +PrunedPartitionList parts = pCtx.getPrunedPartitions(tsOp); +if (!parts.getSourceTable().isPartitioned()) { continue; } -PrunedPartitionList parts = pCtx.getOpToPartList().get(topOp); if (parts.getPartitions().size() > scanLimit) { throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, "" + parts.getPartitions().size(), "" + parts.getSourceTable().getTableName(), ""
[1/2] hive git commit: HIVE-13262: LLAP: Remove log levels from DebugUtils (Prasanth Jayachandran reviewed by Sergey Shelukhin)
Repository: hive Updated Branches: refs/heads/master dfba1fb28 -> 3b6b56d70 http://git-wip-us.apache.org/repos/asf/hive/blob/3b6b56d7/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java index 29b51ec..f4cfa53 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hive.ql.io.orc.RecordReaderUtils; import org.apache.orc.impl.StreamName; import org.apache.orc.StripeInformation; import org.apache.orc.impl.BufferChunk; -import org.apache.hadoop.hive.ql.io.orc.RecordReaderUtils.ByteBufferAllocatorPool; import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.OrcEncodedColumnBatch; import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.PoolFactory; import org.apache.orc.OrcProto; @@ -103,8 +102,7 @@ class EncodedReaderImpl implements EncodedReader { private final List types; private final long rowIndexStride; private final DataCache cache; - private ByteBufferAllocatorPool pool; - private boolean isDebugTracingEnabled; + private boolean isTracingEnabled; public EncodedReaderImpl(Object fileKey, List types, CompressionCodec codec, int bufferSize, long strideRate, DataCache cache, DataReader dataReader, PoolFactory pf) @@ -209,8 +207,8 @@ class EncodedReaderImpl implements EncodedReader { long offset = 0; // Stream offset in relation to the stripe. // 1.1. Figure out which columns have a present stream boolean[] hasNull = RecordReaderUtils.findPresentStreamsByColumn(streamList, types); -if (isDebugTracingEnabled) { - LOG.info("The following columns have PRESENT streams: " + arrayToString(hasNull)); +if (isTracingEnabled) { + LOG.trace("The following columns have PRESENT streams: " + arrayToString(hasNull)); } // We assume stream list is sorted by column and that non-data @@ -230,8 +228,8 @@ class EncodedReaderImpl implements EncodedReader { // We have a stream for included column, but in future it might have no data streams. // It's more like "has at least one column included that has an index stream". hasIndexOnlyCols = hasIndexOnlyCols | included[colIx]; -if (isDebugTracingEnabled) { - LOG.info("Skipping stream: " + streamKind + " at " + offset + ", " + length); +if (isTracingEnabled) { + LOG.trace("Skipping stream: " + streamKind + " at " + offset + ", " + length); } offset += length; continue; @@ -244,8 +242,8 @@ class EncodedReaderImpl implements EncodedReader { includedRgs = colRgs[colRgIx]; ctx = colCtxs[colRgIx] = new ColumnReadContext( colIx, encodings.get(colIx), indexes[colIx]); -if (isDebugTracingEnabled) { - LOG.info("Creating context " + colRgIx + " for column " + colIx + ":" + ctx.toString()); +if (isTracingEnabled) { + LOG.trace("Creating context " + colRgIx + " for column " + colIx + ":" + ctx.toString()); } } else { ctx = colCtxs[colRgIx]; @@ -254,14 +252,14 @@ class EncodedReaderImpl implements EncodedReader { int indexIx = RecordReaderUtils.getIndexPosition(ctx.encoding.getKind(), types.get(colIx).getKind(), streamKind, isCompressed, hasNull[colIx]); ctx.addStream(offset, stream, indexIx); - if (isDebugTracingEnabled) { -LOG.info("Adding stream for column " + colIx + ": " + streamKind + " at " + offset + if (isTracingEnabled) { +LOG.trace("Adding stream for column " + colIx + ": " + streamKind + " at " + offset + ", " + length + ", index position " + indexIx); } if (includedRgs == null || RecordReaderUtils.isDictionary(streamKind, encodings.get(colIx))) { RecordReaderUtils.addEntireStreamToRanges(offset, length, listToRead, true); -if (isDebugTracingEnabled) { - LOG.info("Will read whole stream " + streamKind + "; added to " + listToRead.getTail()); +if (isTracingEnabled) { + LOG.trace("Will read whole stream " + streamKind + "; added to " + listToRead.getTail()); } } else { RecordReaderUtils.addRgFilteredStreamToRanges(stream, includedRgs, @@ -287,15 +285,15 @@ class EncodedReaderImpl implements EncodedReader { // 2. Now, read all of the ranges from cache or disk. DiskRangeList.MutateHelper toRead = new DiskRangeList.MutateHelper(listToRead.get()); -if (isDebugTracingEnabled && LOG.isInfoEnabled()) { - LOG.info("Resulting disk ranges to read (file " + fileKey + "): " +if (isTracingEnabled && LOG.isInfoEnabled()) { + LOG.trace("Resultin
[2/2] hive git commit: HIVE-13262: LLAP: Remove log levels from DebugUtils (Prasanth Jayachandran reviewed by Sergey Shelukhin)
HIVE-13262: LLAP: Remove log levels from DebugUtils (Prasanth Jayachandran reviewed by Sergey Shelukhin) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3b6b56d7 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3b6b56d7 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3b6b56d7 Branch: refs/heads/master Commit: 3b6b56d7000ee1d80c0f191611968d4249f311d7 Parents: dfba1fb Author: Prasanth Jayachandran Authored: Thu Mar 24 20:49:30 2016 -0500 Committer: Prasanth Jayachandran Committed: Thu Mar 24 20:49:30 2016 -0500 -- .../llap/IncrementalObjectSizeEstimator.java| 54 ++--- .../hadoop/hive/llap/cache/LlapDataBuffer.java | 12 +- .../hive/llap/cache/LowLevelCacheImpl.java | 35 ++- .../llap/cache/LowLevelCacheMemoryManager.java | 6 +- .../llap/cache/LowLevelFifoCachePolicy.java | 4 +- .../llap/cache/LowLevelLrfuCachePolicy.java | 14 +- .../hive/llap/cache/SimpleBufferManager.java| 8 +- .../hive/llap/io/api/impl/LlapInputFormat.java | 32 +-- .../hive/llap/io/api/impl/LlapIoImpl.java | 21 +- .../llap/io/decode/OrcColumnVectorProducer.java | 4 +- .../llap/io/encoded/OrcEncodedDataReader.java | 95 +++- .../hadoop/hive/llap/old/BufferInProgress.java | 82 --- .../apache/hadoop/hive/llap/old/BufferPool.java | 225 -- .../hadoop/hive/llap/old/CachePolicy.java | 34 --- .../apache/hadoop/hive/llap/old/ChunkPool.java | 237 --- .../resources/llap-daemon-log4j2.properties | 14 +- .../org/apache/hadoop/hive/llap/DebugUtils.java | 43 .../org/apache/hadoop/hive/llap/LogLevels.java | 53 - .../ql/exec/vector/VectorGroupByOperator.java | 2 +- .../hive/ql/io/orc/encoded/EncodedReader.java | 2 +- .../ql/io/orc/encoded/EncodedReaderImpl.java| 131 +- 21 files changed, 192 insertions(+), 916 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/3b6b56d7/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java -- diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java index 7d68294..3efbcc2 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java @@ -186,9 +186,7 @@ public class IncrementalObjectSizeEstimator { fieldCol = (Collection)fieldObj; if (fieldCol.size() == 0) { fieldCol = null; -if (DebugUtils.isTraceEnabled()) { - LlapIoImpl.LOG.info("Empty collection " + field); -} +LlapIoImpl.LOG.trace("Empty collection {}", field); } } if (fieldCol != null) { @@ -219,9 +217,7 @@ public class IncrementalObjectSizeEstimator { fieldCol = (Map)fieldObj; if (fieldCol.size() == 0) { fieldCol = null; -if (DebugUtils.isTraceEnabled()) { - LlapIoImpl.LOG.info("Empty map " + field); -} +LlapIoImpl.LOG.trace("Empty map {}", field); } } if (fieldCol != null) { @@ -257,15 +253,11 @@ public class IncrementalObjectSizeEstimator { return new Class[] { (Class)types[0], (Class)types[1] }; } else { // TODO: we could try to get the declaring object and infer argument... stupid Java. -if (DebugUtils.isTraceEnabled()) { - LlapIoImpl.LOG.info("Cannot determine map type: " + field); -} +LlapIoImpl.LOG.trace("Cannot determine map type: {}", field); } } else { // TODO: we could try to get superclass or generic interfaces. - if (DebugUtils.isTraceEnabled()) { -LlapIoImpl.LOG.info("Non-parametrized map type: " + field); - } + LlapIoImpl.LOG.trace("Non-parametrized map type: {}", field); } return null; } @@ -279,15 +271,11 @@ public class IncrementalObjectSizeEstimator { return (Class)type; } else { // TODO: we could try to get the declaring object and infer argument... stupid Java. -if (DebugUtils.isTraceEnabled()) { - LlapIoImpl.LOG.info("Cannot determine collection type: " + field); -} +LlapIoImpl.LOG.trace("Cannot determine collection type: {}", field); } } else { // TODO: we could try to get superclass or generic interfaces. - if (DebugUtils.isTraceEnabled()) { -LlapIoImpl.LOG.info("Non-parametrized collection type: " + field); - } + LlapIoImpl.LOG.trace("Non-parametrized collection type: {}", field); } return null; } @@ -297,11 +285,7 @@ public class Increment
hive git commit: HIVE-13362: Commit binary file required for HIVE-13361 (Prasanth Jayachandran reviewed by Gopal V)
Repository: hive Updated Branches: refs/heads/master ab095f0bc -> dfba1fb28 HIVE-13362: Commit binary file required for HIVE-13361 (Prasanth Jayachandran reviewed by Gopal V) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dfba1fb2 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dfba1fb2 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dfba1fb2 Branch: refs/heads/master Commit: dfba1fb280f82822c1c006a0961a3ce9a52b6a6d Parents: ab095f0 Author: Prasanth Jayachandran Authored: Thu Mar 24 20:09:14 2016 -0500 Committer: Prasanth Jayachandran Committed: Thu Mar 24 20:10:26 2016 -0500 -- data/files/alltypesorc3xcols | Bin 0 -> 1504592 bytes 1 file changed, 0 insertions(+), 0 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/dfba1fb2/data/files/alltypesorc3xcols -- diff --git a/data/files/alltypesorc3xcols b/data/files/alltypesorc3xcols new file mode 100644 index 000..e484873 Binary files /dev/null and b/data/files/alltypesorc3xcols differ
hive git commit: HIVE-13362: Commit binary file required for HIVE-13361 (Prasanth Jayachandran reviewed by Gopal V)
Repository: hive Updated Branches: refs/heads/branch-2.0 f76c30384 -> 68c231c1b HIVE-13362: Commit binary file required for HIVE-13361 (Prasanth Jayachandran reviewed by Gopal V) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/68c231c1 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/68c231c1 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/68c231c1 Branch: refs/heads/branch-2.0 Commit: 68c231c1b6a4769c2719f02b7732d64791ff4364 Parents: f76c303 Author: Prasanth Jayachandran Authored: Thu Mar 24 20:09:14 2016 -0500 Committer: Prasanth Jayachandran Committed: Thu Mar 24 20:09:57 2016 -0500 -- data/files/alltypesorc3xcols | Bin 0 -> 1504592 bytes 1 file changed, 0 insertions(+), 0 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/68c231c1/data/files/alltypesorc3xcols -- diff --git a/data/files/alltypesorc3xcols b/data/files/alltypesorc3xcols new file mode 100644 index 000..e484873 Binary files /dev/null and b/data/files/alltypesorc3xcols differ
hive git commit: HIVE-13362: Commit binary file required for HIVE-13361 (Prasanth Jayachandran reviewed by Gopal V)
Repository: hive Updated Branches: refs/heads/branch-1 178708231 -> 5bae0ad45 HIVE-13362: Commit binary file required for HIVE-13361 (Prasanth Jayachandran reviewed by Gopal V) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5bae0ad4 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5bae0ad4 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5bae0ad4 Branch: refs/heads/branch-1 Commit: 5bae0ad4563cf8ee30118179f6010675d795355b Parents: 1787082 Author: Prasanth Jayachandran Authored: Thu Mar 24 20:09:14 2016 -0500 Committer: Prasanth Jayachandran Committed: Thu Mar 24 20:09:14 2016 -0500 -- data/files/alltypesorc3xcols | Bin 0 -> 1504592 bytes 1 file changed, 0 insertions(+), 0 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/5bae0ad4/data/files/alltypesorc3xcols -- diff --git a/data/files/alltypesorc3xcols b/data/files/alltypesorc3xcols new file mode 100644 index 000..e484873 Binary files /dev/null and b/data/files/alltypesorc3xcols differ
hive git commit: HIVE-13008 - WebHcat DDL commands in secure mode NPE when default FileSystem doesn't support delegation tokens (Eugene Koifman, reviewed by Chris Nauroth, Thejas Nair)
Repository: hive Updated Branches: refs/heads/master f9d1b6ab7 -> ab095f0bc HIVE-13008 - WebHcat DDL commands in secure mode NPE when default FileSystem doesn't support delegation tokens (Eugene Koifman, reviewed by Chris Nauroth, Thejas Nair) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ab095f0b Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ab095f0b Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ab095f0b Branch: refs/heads/master Commit: ab095f0bc24447ab73843a1ae23a32f7b6c4bd1a Parents: f9d1b6a Author: Eugene Koifman Authored: Thu Mar 24 18:03:32 2016 -0700 Committer: Eugene Koifman Committed: Thu Mar 24 18:03:32 2016 -0700 -- .../hcatalog/templeton/SecureProxySupport.java | 46 ++-- 1 file changed, 33 insertions(+), 13 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/ab095f0b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java -- diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java index 2ac62c0..13f3c9b 100644 --- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java +++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java @@ -20,10 +20,14 @@ package org.apache.hive.hcatalog.templeton; import java.io.File; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; +import java.util.Collection; import java.util.List; import java.util.Map; +import org.apache.commons.lang3.ArrayUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -79,7 +83,7 @@ public class SecureProxySupport { this.user = user; File t = File.createTempFile("templeton", null); tokenPath = new Path(t.toURI()); - Token fsToken = getFSDelegationToken(user, conf); + Token[] fsToken = getFSDelegationToken(user, conf); String hcatTokenStr; try { hcatTokenStr = buildHcatDelegationToken(user); @@ -130,11 +134,11 @@ public class SecureProxySupport { } } - class TokenWrapper { -Token token; + private static class TokenWrapper { +Token[] tokens = new Token[0]; } - private Token getFSDelegationToken(String user, + private Token[] getFSDelegationToken(String user, final Configuration conf) throws IOException, InterruptedException { LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName()); @@ -142,18 +146,32 @@ public class SecureProxySupport { final TokenWrapper twrapper = new TokenWrapper(); ugi.doAs(new PrivilegedExceptionAction() { - public Object run() throws IOException { -FileSystem fs = FileSystem.get(conf); -//todo: according to JavaDoc this seems like private API: addDelegationToken should be used -twrapper.token = fs.getDelegationToken(ugi.getShortUserName()); + public Object run() throws IOException, URISyntaxException { +Credentials creds = new Credentials(); +//get Tokens for default FS. Not all FSs support delegation tokens, e.g. WASB +collectTokens(FileSystem.get(conf), twrapper, creds, ugi.getShortUserName()); +//get tokens for all other known FSs since Hive tables may result in different ones +//passing "creds" prevents duplicate tokens from being added +Collection URIs = conf.getStringCollection("mapreduce.job.hdfs-servers"); +for(String uri : URIs) { + LOG.debug("Getting tokens for " + uri); + collectTokens(FileSystem.get(new URI(uri), conf), twrapper, creds, ugi.getShortUserName()); +} return null; } }); -return twrapper.token; - +return twrapper.tokens; } - - private void writeProxyDelegationTokens(final Token fsToken, + private static void collectTokens(FileSystem fs, TokenWrapper twrapper, Credentials creds, String userName) throws IOException { +Token[] tokens = fs.addDelegationTokens(userName, creds); +if(tokens != null && tokens.length > 0) { + twrapper.tokens = ArrayUtils.addAll(twrapper.tokens, tokens); +} + } + /** + * @param fsTokens not null + */ + private void writeProxyDelegationTokens(final Token fsTokens[], final Token msToken, final Configuration conf, String user, @@ -168,7 +186,9 @@ public class SecureProxySupport { ugi.doAs(new PrivilegedExceptio
hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman)
Repository: hive Updated Branches: refs/heads/master d3a5f20b4 -> f9d1b6ab7 HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f9d1b6ab Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f9d1b6ab Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f9d1b6ab Branch: refs/heads/master Commit: f9d1b6ab77ab15b8337c17fbe38557c1f7b5ce58 Parents: d3a5f20 Author: Wei Zheng Authored: Thu Mar 24 17:29:59 2016 -0700 Committer: Wei Zheng Committed: Thu Mar 24 17:29:59 2016 -0700 -- .../hive/hcatalog/streaming/HiveEndPoint.java | 11 + .../hadoop/hive/ql/txn/compactor/Cleaner.java | 5 +++ .../hive/ql/txn/compactor/CompactorThread.java | 5 +++ .../hadoop/hive/ql/txn/compactor/Initiator.java | 9 +++- .../hadoop/hive/ql/txn/compactor/Worker.java| 8 +++- .../apache/hadoop/hive/ql/TestTxnCommands2.java | 47 6 files changed, 82 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java -- diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index 4c77842..baeafad 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -18,6 +18,7 @@ package org.apache.hive.hcatalog.streaming; +import org.apache.hadoop.fs.FileSystem; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.cli.CliSessionState; @@ -342,6 +343,11 @@ public class HiveEndPoint { return null; } } ); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); +} } catch (IOException e) { LOG.error("Error closing connection to " + endPt, e); } catch (InterruptedException e) { @@ -937,6 +943,11 @@ public class HiveEndPoint { } } ); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); +} } catch (IOException e) { throw new ImpersonationFailed("Failed closing Txn Batch as user '" + username + "' on endPoint :" + endPt, e); http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 9ffeaec..4c31a49 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -272,6 +272,11 @@ public class Cleaner extends CompactorThread { return null; } }); +try { + FileSystem.closeAllForUGI(ugi); +} catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception + " for " + + ci.getFullPartitionName());} } txnHandler.markCleaned(ci); } catch (Exception e) { http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index 8495c66..4d6e24e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -174,6 +174,11 @@ abstract class CompactorThread extends Thread implements MetaStoreThread { return null; } }); + try { +FileSystem.closeAllForUGI(ugi); + } catch (IOException exception) { +LOG.error("Could not clean up file-system handles for UGI: " + ugi, exception); + } if (wrapper.size() == 1) { LOG.debug("Running job as " + wrapper.get(0)); http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/ql/src/java/org/ap
[3/3] hive git commit: HIVE-11388 - Allow ACID Compactor components to run in multiple metastores (Eugene Koifman, reviewed by Wei Zheng)
HIVE-11388 - Allow ACID Compactor components to run in multiple metastores (Eugene Koifman, reviewed by Wei Zheng) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/17870823 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/17870823 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/17870823 Branch: refs/heads/branch-1 Commit: 178708231e09bb2c08aa05cf9979efd6d3cd542c Parents: c829505 Author: Eugene Koifman Authored: Thu Mar 24 16:22:21 2016 -0700 Committer: Eugene Koifman Committed: Thu Mar 24 16:22:21 2016 -0700 -- .../apache/hadoop/hive/common/ServerUtils.java | 14 ++ .../deployers/config/hive/hive-site.mysql.xml | 24 ++- .../hive/metastore/txn/CompactionInfo.java | 4 + .../metastore/txn/CompactionTxnHandler.java | 7 +- .../hadoop/hive/metastore/txn/TxnDbUtil.java| 20 ++- .../hadoop/hive/metastore/txn/TxnHandler.java | 169 ++- .../hadoop/hive/metastore/txn/TxnStore.java | 33 +++- .../hadoop/hive/metastore/txn/TxnUtils.java | 4 +- .../metastore/txn/ValidCompactorTxnList.java| 2 +- .../hive/metastore/txn/TestTxnHandler.java | 93 ++ .../ql/txn/AcidCompactionHistoryService.java| 7 + .../hive/ql/txn/AcidHouseKeeperService.java | 7 + .../hadoop/hive/ql/txn/compactor/Cleaner.java | 62 ++- .../hadoop/hive/ql/txn/compactor/Initiator.java | 19 ++- .../apache/hadoop/hive/ql/TestTxnCommands2.java | 7 +- .../hive/ql/lockmgr/TestDbTxnManager.java | 6 + 16 files changed, 445 insertions(+), 33 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/17870823/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java -- diff --git a/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java b/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java index a284f18..4141770 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java @@ -24,6 +24,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import java.net.InetAddress; +import java.net.UnknownHostException; + /** * ServerUtils (specific to HiveServer version 1) */ @@ -47,4 +50,15 @@ public class ServerUtils { } } + /** + * @return name of current host + */ + public static String hostname() { +try { + return InetAddress.getLocalHost().getHostName(); +} catch (UnknownHostException e) { + LOG.error("Unable to resolve my host name " + e.getMessage()); + throw new RuntimeException(e); +} + } } http://git-wip-us.apache.org/repos/asf/hive/blob/17870823/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml -- diff --git a/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml b/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml index b6f1ab7..387da6c 100644 --- a/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml +++ b/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml @@ -62,13 +62,14 @@ hive.exec.dynamic.partition.mode nonstrict + hive.compactor.initiator.on -false +true hive.compactor.worker.threads -2 +5 hive.timedout.txn.reaper.start @@ -81,9 +82,24 @@ --> hive.timedout.txn.reaper.interval -30s +1s + + +hive.compactor.history.reaper.interval +1s + + +hive.compactor.cleaner.run.interval +1s + + +hive.compactor.check.interval +1s + + +hive.compactor.delta.num.threshold +2 -
[2/3] hive git commit: HIVE-13344 - port HIVE-12902 to 1.x line (Eugene Koifman, reviewed by Wei Zheng)
HIVE-13344 - port HIVE-12902 to 1.x line (Eugene Koifman, reviewed by Wei Zheng) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c8295051 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c8295051 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c8295051 Branch: refs/heads/branch-1 Commit: c8295051cc26577dcc1eb17709d4ffc0f9784c5b Parents: db2efe4 Author: Eugene Koifman Authored: Thu Mar 24 16:21:07 2016 -0700 Committer: Eugene Koifman Committed: Thu Mar 24 16:21:07 2016 -0700 -- .../org/apache/hadoop/hive/conf/HiveConf.java | 6 +- .../hive/ql/txn/compactor/TestCompactor.java| 25 +- .../hive/metastore/AcidEventListener.java | 38 +- .../hadoop/hive/metastore/HiveMetaStore.java| 14 +- .../hive/metastore/HiveMetaStoreClient.java | 6 +- .../metastore/txn/CompactionTxnHandler.java | 47 +-- .../hadoop/hive/metastore/txn/TxnHandler.java | 127 +-- .../hadoop/hive/metastore/txn/TxnStore.java | 364 +++ .../hadoop/hive/metastore/txn/TxnUtils.java | 209 +++ .../metastore/txn/TestCompactionTxnHandler.java | 5 +- .../hive/metastore/txn/TestTxnHandler.java | 183 +- .../metastore/txn/TestTxnHandlerNegative.java | 2 +- .../ql/txn/AcidCompactionHistoryService.java| 16 +- .../hive/ql/txn/AcidHouseKeeperService.java | 13 +- .../hive/ql/txn/compactor/CompactorThread.java | 7 +- .../hadoop/hive/ql/txn/compactor/Initiator.java | 8 +- .../hadoop/hive/ql/txn/compactor/Worker.java| 6 +- .../apache/hadoop/hive/ql/TestTxnCommands2.java | 22 +- .../hive/ql/lockmgr/TestDbTxnManager.java | 12 +- .../hive/ql/txn/compactor/CompactorTest.java| 34 +- .../hive/ql/txn/compactor/TestCleaner.java | 20 +- .../hive/ql/txn/compactor/TestInitiator.java| 7 +- .../hive/ql/txn/compactor/TestWorker.java | 13 +- 23 files changed, 847 insertions(+), 337 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/c8295051/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java -- diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index b78bea2..f84c940 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -39,6 +39,7 @@ import java.util.regex.Pattern; import javax.security.auth.login.LoginException; +import com.google.common.base.Joiner; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -56,7 +57,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Shell; import org.apache.hive.common.HiveCompat; -import com.google.common.base.Joiner; /** * Hive Configuration. @@ -607,6 +607,10 @@ public class HiveConf extends Configuration { METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore", "Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" + "This class is used to store and retrieval of raw metadata objects such as table, database"), +METASTORE_TXN_STORE_IMPL("hive.metastore.txn.store.impl", +"org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler", +"Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnStore. This " + +"class is used to store and retrieve transactions and locks"), METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver", "Driver class name for a JDBC metastore"), METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass", http://git-wip-us.apache.org/repos/asf/hive/blob/c8295051/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java -- diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index 9c0f374..37bbab8 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -8,7 +8,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HiveM
[1/3] hive git commit: HIVE-13344 - port HIVE-12902 to 1.x line (Eugene Koifman, reviewed by Wei Zheng)
Repository: hive Updated Branches: refs/heads/branch-1 db2efe42a -> 178708231 http://git-wip-us.apache.org/repos/asf/hive/blob/c8295051/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java -- diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index 5545574..cac4623 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -20,15 +20,31 @@ package org.apache.hadoop.hive.ql.txn.compactor; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.MetaStoreThread; -import org.apache.hadoop.hive.metastore.api.*; -import org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; +import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; +import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; +import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.io.AcidInputFormat; import org.apache.hadoop.hive.ql.io.AcidOutputFormat; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -39,7 +55,11 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.mapred.*; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.RecordWriter; +import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Progressable; import org.apache.thrift.TException; @@ -62,7 +82,7 @@ public abstract class CompactorTest { static final private String CLASS_NAME = CompactorTest.class.getName(); static final private Log LOG = LogFactory.getLog(CLASS_NAME); - protected CompactionTxnHandler txnHandler; + protected TxnStore txnHandler; protected IMetaStoreClient ms; protected long sleepTime = 1000; protected HiveConf conf; @@ -75,7 +95,7 @@ public abstract class CompactorTest { TxnDbUtil.setConfValues(conf); TxnDbUtil.cleanDb(); ms = new HiveMetaStoreClient(conf); -txnHandler = new CompactionTxnHandler(conf); +txnHandler = TxnUtils.getTxnStore(conf); tmpdir = new File(System.getProperty("java.io.tmpdir") + System.getProperty("file.separator") + "compactor_test_tables"); tmpdir.mkdir(); http://git-wip-us.apache.org/repos/asf/hive/blob/c8295051/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java -- diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java index 913c8bc..17634f0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hive.ql.txn.compactor; -import org.apache.hadoop.hive.metastore.txn.TxnHandler; import org.junit.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -25,6 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.junit.Test; import java.util.Ar
hive git commit: HIVE-13325: Excessive logging when ORC PPD fails type conversions (Prasanth Jayachandran reviewed by Gopal V)
Repository: hive Updated Branches: refs/heads/branch-1 8fc708b87 -> db2efe42a HIVE-13325: Excessive logging when ORC PPD fails type conversions (Prasanth Jayachandran reviewed by Gopal V) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/db2efe42 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/db2efe42 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/db2efe42 Branch: refs/heads/branch-1 Commit: db2efe42af51f1556daab8bf4feb52111553a641 Parents: 8fc708b Author: Prasanth Jayachandran Authored: Thu Mar 24 13:30:55 2016 -0500 Committer: Prasanth Jayachandran Committed: Thu Mar 24 13:31:52 2016 -0500 -- .../hadoop/hive/ql/io/orc/RecordReaderImpl.java | 15 --- 1 file changed, 12 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/db2efe42/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java index 44cac68..a21a298 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java @@ -385,9 +385,9 @@ class RecordReaderImpl implements RecordReader { } TruthValue result; +Object baseObj = predicate.getLiteral(); try { // Predicate object and stats objects are converted to the type of the predicate object. - Object baseObj = predicate.getLiteral(); Object minValue = getBaseObjectForComparison(predicate.getType(), min); Object maxValue = getBaseObjectForComparison(predicate.getType(), max); Object predObj = getBaseObjectForComparison(predicate.getType(), baseObj); @@ -399,8 +399,17 @@ class RecordReaderImpl implements RecordReader { // in case failed conversion, return the default YES_NO_NULL truth value } catch (Exception e) { if (LOG.isWarnEnabled()) { -LOG.warn("Exception when evaluating predicate. Skipping ORC PPD." + -" Exception: " + ExceptionUtils.getStackTrace(e)); +final String statsType = min == null ? +(max == null ? "null" : max.getClass().getSimpleName()) : +min.getClass().getSimpleName(); +final String predicateType = baseObj == null ? "null" : baseObj.getClass().getSimpleName(); +final String reason = e.getClass().getSimpleName() + " when evaluating predicate." + +" Skipping ORC PPD." + +" Exception: " + e.getMessage() + +" StatsType: " + statsType + +" PredicateType: " + predicateType; +LOG.warn(reason); +LOG.debug(reason, e); } if (predicate.getOperator().equals(PredicateLeaf.Operator.NULL_SAFE_EQUALS) || !hasNull) { result = TruthValue.YES_NO;
hive git commit: HIVE-13325: Excessive logging when ORC PPD fails type conversions (Prasanth Jayachandran reviewed by Gopal V)
Repository: hive Updated Branches: refs/heads/master d469e6110 -> d3a5f20b4 HIVE-13325: Excessive logging when ORC PPD fails type conversions (Prasanth Jayachandran reviewed by Gopal V) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d3a5f20b Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d3a5f20b Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d3a5f20b Branch: refs/heads/master Commit: d3a5f20b4487e241b3e9424d1d762dfca0c25d2f Parents: d469e61 Author: Prasanth Jayachandran Authored: Thu Mar 24 13:30:55 2016 -0500 Committer: Prasanth Jayachandran Committed: Thu Mar 24 13:31:08 2016 -0500 -- .../hadoop/hive/ql/io/orc/RecordReaderImpl.java | 15 --- 1 file changed, 12 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/d3a5f20b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java index d511df6..aa835ae 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java @@ -378,9 +378,9 @@ public class RecordReaderImpl implements RecordReader { } TruthValue result; +Object baseObj = predicate.getLiteral(); try { // Predicate object and stats objects are converted to the type of the predicate object. - Object baseObj = predicate.getLiteral(); Object minValue = getBaseObjectForComparison(predicate.getType(), min); Object maxValue = getBaseObjectForComparison(predicate.getType(), max); Object predObj = getBaseObjectForComparison(predicate.getType(), baseObj); @@ -392,8 +392,17 @@ public class RecordReaderImpl implements RecordReader { // in case failed conversion, return the default YES_NO_NULL truth value } catch (Exception e) { if (LOG.isWarnEnabled()) { -LOG.warn("Exception when evaluating predicate. Skipping ORC PPD." + -" Exception: " + ExceptionUtils.getStackTrace(e)); +final String statsType = min == null ? +(max == null ? "null" : max.getClass().getSimpleName()) : +min.getClass().getSimpleName(); +final String predicateType = baseObj == null ? "null" : baseObj.getClass().getSimpleName(); +final String reason = e.getClass().getSimpleName() + " when evaluating predicate." + +" Skipping ORC PPD." + +" Exception: " + e.getMessage() + +" StatsType: " + statsType + +" PredicateType: " + predicateType; +LOG.warn(reason); +LOG.debug(reason, e); } if (predicate.getOperator().equals(PredicateLeaf.Operator.NULL_SAFE_EQUALS) || !hasNull) { result = TruthValue.YES_NO;
hive git commit: HIVE-12616 : NullPointerException when spark session is reused to run a mapjoin (Nemon Lou, via Szehon)
Repository: hive Updated Branches: refs/heads/master 219d3527c -> d469e6110 HIVE-12616 : NullPointerException when spark session is reused to run a mapjoin (Nemon Lou, via Szehon) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d469e611 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d469e611 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d469e611 Branch: refs/heads/master Commit: d469e61108a1844fcc173674bfb2cd9f7ad01c18 Parents: 219d352 Author: Szehon Ho Authored: Thu Mar 24 11:12:08 2016 -0700 Committer: Szehon Ho Committed: Thu Mar 24 11:12:50 2016 -0700 -- .../apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java | 4 1 file changed, 4 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/d469e611/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java index 1798622..2427321 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java @@ -28,6 +28,7 @@ import java.util.Set; import org.apache.commons.compress.utils.CharsetNames; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.hive.ql.session.SessionState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -120,6 +121,9 @@ public class HiveSparkClientFactory { sparkMaster = sparkConf.get("spark.master"); hiveConf.set("spark.master", sparkMaster); } +if (SessionState.get() != null && SessionState.get().getConf() != null) { + SessionState.get().getConf().set("spark.master", sparkMaster); +} if (sparkMaster.equals("yarn-cluster")) { sparkConf.put("spark.yarn.maxAppAttempts", "1"); }
hive git commit: HIVE-13300 : Hive on spark throws exception for multi-insert with join (Szehon, reviewed by Xuefu and Chao Sun)
Repository: hive Updated Branches: refs/heads/master e665f020b -> 219d3527c HIVE-13300 : Hive on spark throws exception for multi-insert with join (Szehon, reviewed by Xuefu and Chao Sun) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/219d3527 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/219d3527 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/219d3527 Branch: refs/heads/master Commit: 219d3527cfac09045f0ac247821746e7c95dcb8c Parents: e665f02 Author: Szehon Ho Authored: Thu Mar 24 11:08:04 2016 -0700 Committer: Szehon Ho Committed: Thu Mar 24 11:09:10 2016 -0700 -- .../test/resources/testconfiguration.properties | 1 + .../ql/exec/spark/SparkReduceRecordHandler.java | 2 + .../clientpositive/multi_insert_with_join.q | 29 + .../clientpositive/multi_insert_with_join.q.out | 128 +++ .../spark/multi_insert_with_join.q.out | 128 +++ 5 files changed, 288 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/219d3527/itests/src/test/resources/testconfiguration.properties -- diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 232e262..f8e8bda 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -1006,6 +1006,7 @@ spark.query.files=add_part_multiple.q, \ multi_insert_lateral_view.q, \ multi_insert_mixed.q, \ multi_insert_move_tasks_share_dependencies.q, \ + multi_insert_with_join.q, \ multi_join_union.q, \ multi_join_union_src.q, \ multigroupby_singlemr.q, \ http://git-wip-us.apache.org/repos/asf/hive/blob/219d3527/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java index 439e0df..0d31e5f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java @@ -230,8 +230,10 @@ public class SparkReduceRecordHandler extends SparkRecordHandler { if (isTagged) { // remove the tag from key coming out of reducer // and store it in separate variable. +// make a copy for multi-insert with join case as Spark re-uses input key from same parent int size = keyWritable.getSize() - 1; tag = keyWritable.get()[size]; +keyWritable = new BytesWritable(keyWritable.getBytes(), size); keyWritable.setSize(size); } http://git-wip-us.apache.org/repos/asf/hive/blob/219d3527/ql/src/test/queries/clientpositive/multi_insert_with_join.q -- diff --git a/ql/src/test/queries/clientpositive/multi_insert_with_join.q b/ql/src/test/queries/clientpositive/multi_insert_with_join.q new file mode 100644 index 000..862dd9f --- /dev/null +++ b/ql/src/test/queries/clientpositive/multi_insert_with_join.q @@ -0,0 +1,29 @@ +set hive.auto.convert.join=false; + +drop table if exists status_updates; +drop table if exists profiles; +drop table if exists school_summary; +drop table if exists gender_summary; + +create table status_updates(userid int,status string,ds string); +create table profiles(userid int,school string,gender int); +create table school_summary(school string,cnt int) partitioned by (ds string); +create table gender_summary(gender int, cnt int) partitioned by (ds string); + +insert into status_updates values (1, "status_1", "2009-03-20"); +insert into profiles values (1, "school_1", 0); + +FROM (SELECT a.status, b.school, b.gender +FROM status_updates a JOIN profiles b +ON (a.userid = b.userid and +a.ds='2009-03-20' ) +) subq1 +INSERT OVERWRITE TABLE gender_summary +PARTITION(ds='2009-03-20') +SELECT subq1.gender, COUNT(1) GROUP BY subq1.gender +INSERT OVERWRITE TABLE school_summary +PARTITION(ds='2009-03-20') +SELECT subq1.school, COUNT(1) GROUP BY subq1.school; + +select * from school_summary; +select * from gender_summary; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hive/blob/219d3527/ql/src/test/results/clientpositive/multi_insert_with_join.q.out -- diff --git a/ql/src/test/results/clientpositive/multi_insert_with_join.q.out b/ql/src/test/results/clientpositive/multi_insert_with_join.q.out new file mode 100644 index 000..28bce84 --- /dev/null +++ b/ql/src/test/results/clientpositi
hive git commit: HIVE-13295: Improvement to LDAP search queries in HS2 LDAP Authenticator (Naveen Gangam via Chaoyu Tang)
Repository: hive Updated Branches: refs/heads/master 55383d815 -> e665f020b HIVE-13295: Improvement to LDAP search queries in HS2 LDAP Authenticator (Naveen Gangam via Chaoyu Tang) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e665f020 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e665f020 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e665f020 Branch: refs/heads/master Commit: e665f020b419cf9096006c45f4afcda13fa9e882 Parents: 55383d8 Author: ctang Authored: Thu Mar 24 09:34:59 2016 -0700 Committer: ctang Committed: Thu Mar 24 09:34:59 2016 -0700 -- .../org/apache/hadoop/hive/conf/HiveConf.java | 9 + .../auth/LdapAuthenticationProviderImpl.java| 317 ++- .../auth/TestLdapAtnProviderWithMiniDS.java | 200 +++- 3 files changed, 373 insertions(+), 153 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/e665f020/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java -- diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index b8b9dcf..b8870f2 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2237,6 +2237,15 @@ public class HiveConf extends Configuration { HIVE_SERVER2_PLAIN_LDAP_USERFILTER("hive.server2.authentication.ldap.userFilter", null, "COMMA-separated list of LDAP usernames (just short names, not full DNs).\n" + "For example: hiveuser,impalauser,hiveadmin,hadoopadmin"), + HIVE_SERVER2_PLAIN_LDAP_GUIDKEY("hive.server2.authentication.ldap.guidKey", "uid", +"LDAP attribute name whose values are unique in this LDAP server.\n" + +"For example: uid or CN."), + HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY("hive.server2.authentication.ldap.groupMembershipKey", "member", +"LDAP attribute name on the user entry that references a group, the user belongs to.\n" + +"For example: member, uniqueMember or memberUid"), + HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY("hive.server2.authentication.ldap.groupClassKey", "groupOfNames", +"LDAP attribute name on the group entry that is to be used in LDAP group searches.\n" + +"For example: group, groupOfNames or groupOfUniqueNames."), HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY("hive.server2.authentication.ldap.customLDAPQuery", null, "A full LDAP query that LDAP Atn provider uses to execute against LDAP Server.\n" + "If this query returns a null resultset, the LDAP Provider fails the Authentication\n" + http://git-wip-us.apache.org/repos/asf/hive/blob/e665f020/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java -- diff --git a/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java b/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java index 9b0b14d..8f64672 100644 --- a/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java +++ b/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java @@ -41,7 +41,6 @@ import org.slf4j.LoggerFactory; public class LdapAuthenticationProviderImpl implements PasswdAuthenticationProvider { private static final Logger LOG = LoggerFactory.getLogger(LdapAuthenticationProviderImpl.class); - private static final String DN_ATTR = "distinguishedName"; private String ldapURL; private String baseDN; @@ -51,6 +50,9 @@ public class LdapAuthenticationProviderImpl implements PasswdAuthenticationProvi private static List userFilter; private static List groupFilter; private String customQuery; + private static String guid_attr; + private static String groupMembership_attr; + private static String groupClass_attr; LdapAuthenticationProviderImpl(HiveConf conf) { init(conf); @@ -61,65 +63,66 @@ public class LdapAuthenticationProviderImpl implements PasswdAuthenticationProvi baseDN = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_BASEDN); ldapDomain = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_DOMAIN); customQuery = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY); - -if (customQuery == null) { - groupBases = new ArrayList(); - userBases = new ArrayList(); - String groupDNPatterns = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN); - String groupFilterVal = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER); - String userDNPatterns =
hive git commit: HIVE-13217 : Replication for HoS mapjoin small file needs to respect dfs.replication.max (Chinna Rao L , via Szehon Ho)
Repository: hive Updated Branches: refs/heads/master d8705a12f -> 55383d815 HIVE-13217 : Replication for HoS mapjoin small file needs to respect dfs.replication.max (Chinna Rao L , via Szehon Ho) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/55383d81 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/55383d81 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/55383d81 Branch: refs/heads/master Commit: 55383d815420b9d1e8ad364dd2efa8ac894abcb5 Parents: d8705a1 Author: Chinna Rao L Authored: Thu Mar 24 14:03:16 2016 +0530 Committer: Chinna Rao L Committed: Thu Mar 24 14:03:16 2016 +0530 -- .../hadoop/hive/ql/exec/SparkHashTableSinkOperator.java | 8 ++-- 1 file changed, 6 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/55383d81/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java index 85344fc..5837614 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java @@ -47,11 +47,12 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; public class SparkHashTableSinkOperator extends TerminalOperator implements Serializable { - private static final int MIN_REPLICATION = 10; private static final long serialVersionUID = 1L; private final String CLASS_NAME = this.getClass().getName(); private final PerfLogger perfLogger = SessionState.getPerfLogger(); protected static final Logger LOG = LoggerFactory.getLogger(SparkHashTableSinkOperator.class.getName()); + public static final String DFS_REPLICATION_MAX = "dfs.replication.max"; + private int minReplication = 10; private final HashTableSinkOperator htsOperator; @@ -73,6 +74,9 @@ public class SparkHashTableSinkOperator byte tag = conf.getTag(); inputOIs[tag] = inputObjInspectors[0]; conf.setTagOrder(new Byte[]{ tag }); +int dfsMaxReplication = hconf.getInt(DFS_REPLICATION_MAX, minReplication); +// minReplication value should not cross the value of dfs.replication.max +minReplication = Math.min(minReplication, dfsMaxReplication); htsOperator.setConf(conf); htsOperator.initialize(hconf, inputOIs); } @@ -151,7 +155,7 @@ public class SparkHashTableSinkOperator } // TODO find out numOfPartitions for the big table int numOfPartitions = replication; -replication = (short) Math.max(MIN_REPLICATION, numOfPartitions); +replication = (short) Math.max(minReplication, numOfPartitions); htsOperator.console.printInfo(Utilities.now() + "\tDump the side-table for tag: " + tag + " with group count: " + tableContainer.size() + " into file: " + path); // get the hashtable file and path