[ https://issues.apache.org/jira/browse/HIVE-23716?focusedWorklogId=463349&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-463349 ]
ASF GitHub Bot logged work on HIVE-23716: ----------------------------------------- Author: ASF GitHub Bot Created on: 26/Jul/20 12:36 Start Date: 26/Jul/20 12:36 Worklog Time Spent: 10m Work Description: maheshk114 commented on a change in pull request #1147: URL: https://github.com/apache/hive/pull/1147#discussion_r460522312 ########## File path: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinAntiJoinLongOperator.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import org.apache.hadoop.hive.ql.CompilationOpContext; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashSet; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.VectorDesc; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; + +// TODO : Duplicate codes need to merge with semi join. +// Single-Column Long hash table import. +// Single-Column Long specific imports. + +/* + * Specialized class for doing a vectorized map join that is an anti join on a Single-Column Long + * using a hash set. + */ +public class VectorMapJoinAntiJoinLongOperator extends VectorMapJoinAntiJoinGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final String CLASS_NAME = VectorMapJoinAntiJoinLongOperator.class.getName(); + private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME); + protected String getLoggingPrefix() { + return super.getLoggingPrefix(CLASS_NAME); + } + + // The above members are initialized by the constructor and must not be + // transient. + + // The hash map for this specialized class. + private transient VectorMapJoinLongHashSet hashSet; + + // Single-Column Long specific members. + // For integers, we have optional min/max filtering. + private transient boolean useMinMax; + private transient long min; + private transient long max; + + // The column number for this one column join specialization. + private transient int singleJoinColumn; + + // Pass-thru constructors. + /** Kryo ctor. */ + protected VectorMapJoinAntiJoinLongOperator() { + super(); + } + + public VectorMapJoinAntiJoinLongOperator(CompilationOpContext ctx) { + super(ctx); + } + + public VectorMapJoinAntiJoinLongOperator(CompilationOpContext ctx, OperatorDesc conf, + VectorizationContext vContext, VectorDesc vectorDesc) throws HiveException { + super(ctx, conf, vContext, vectorDesc); + } + + // Process Single-Column Long Anti Join on a vectorized row batch. + @Override + protected void commonSetup() throws HiveException { + super.commonSetup(); + + // Initialize Single-Column Long members for this specialized class. + singleJoinColumn = bigTableKeyColumnMap[0]; + } + + @Override + public void hashTableSetup() throws HiveException { + super.hashTableSetup(); + + // Get our Single-Column Long hash set information for this specialized class. + hashSet = (VectorMapJoinLongHashSet) vectorMapJoinHashTable; + useMinMax = hashSet.useMinMax(); + if (useMinMax) { + min = hashSet.min(); + max = hashSet.max(); + } + } + + @Override + public void processBatch(VectorizedRowBatch batch) throws HiveException { + + try { + // (Currently none) + // antiPerBatchSetup(batch); + + // For anti joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + if (inputLogicalSize == 0) { + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // The one join column for this specialized class. + LongColumnVector joinColVector = (LongColumnVector) batch.cols[singleJoinColumn]; + long[] vector = joinColVector.vector; + + // Check single column for repeating. + boolean allKeyInputColumnsRepeating = joinColVector.isRepeating; + + if (allKeyInputColumnsRepeating) { + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + JoinUtil.JoinResult joinResult; + if (!joinColVector.noNulls && joinColVector.isNull[0]) { + // For anti join, if the right side is null then its a match. + joinResult = JoinUtil.JoinResult.MATCH; + } else { + long key = vector[0]; + if (useMinMax && (key < min || key > max)) { + // Out of range for whole batch. Its a match for anti join. We can emit the row. + joinResult = JoinUtil.JoinResult.MATCH; + } else { + joinResult = hashSet.contains(key, hashSetResults[0]); + // reverse the join result for anti join. + if (joinResult == JoinUtil.JoinResult.NOMATCH) { + joinResult = JoinUtil.JoinResult.MATCH; + } else if (joinResult == JoinUtil.JoinResult.MATCH) { + joinResult = JoinUtil.JoinResult.NOMATCH; + } + } + } + + // Common repeated join result processing. + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + finishAntiRepeated(batch, joinResult, hashSetResults[0]); + } else { + // NOT Repeating. + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matches / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashSetResultCount = 0; + int allMatchCount = 0; + int spillCount = 0; + long saveKey = 0; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + // Single-Column Long get key. + long currentKey; + boolean isNull; + if (!joinColVector.noNulls && joinColVector.isNull[batchIndex]) { + currentKey = 0; + isNull = true; + } else { + currentKey = vector[batchIndex]; + isNull = false; + } + + // Equal key series checking. + if (isNull || !haveSaveKey || currentKey != saveKey) { + // New key. + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + // We have extracted the existence from the hash set result, so we don't keep it. + break; + case SPILL: + // We keep the hash set result for its spill information. + hashSetResultCount++; + break; + case NOMATCH: + break; + } + } + + if (isNull) { + saveJoinResult = JoinUtil.JoinResult.MATCH; + haveSaveKey = false; + } else { + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + saveKey = currentKey; + if (useMinMax && (currentKey < min || currentKey > max)) { + // Key out of range for whole hash table, is a valid match for anti join. + saveJoinResult = JoinUtil.JoinResult.NOMATCH; + } else { + saveJoinResult = hashSet.contains(currentKey, hashSetResults[hashSetResultCount]); + } + + // Reverse the match result for anti join. + if (saveJoinResult == JoinUtil.JoinResult.NOMATCH) { + saveJoinResult = JoinUtil.JoinResult.MATCH; + } else if (saveJoinResult == JoinUtil.JoinResult.MATCH) { + saveJoinResult = JoinUtil.JoinResult.NOMATCH; + } + } + + // Common anti join result processing. + switch (saveJoinResult) { + case MATCH: + allMatchs[allMatchCount++] = batchIndex; + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " MATCH isSingleValue " + equalKeySeriesIsSingleValue[equalKeySeriesCount] + " currentKey " + currentKey); + break; + + case SPILL: + spills[spillCount] = batchIndex; + spillHashMapResultIndices[spillCount] = hashSetResultCount; + spillCount++; + break; + + case NOMATCH: + // VectorizedBatchUtil.debugDisplayOneRow(batch, batchIndex, CLASS_NAME + " NOMATCH" + " currentKey " + currentKey); + break; + } + } else { + // Series of equal keys. Review comment: done ########## File path: ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinAntiJoinLongOperator.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.mapjoin; + +import org.apache.hadoop.hive.ql.CompilationOpContext; +import org.apache.hadoop.hive.ql.exec.JoinUtil; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.mapjoin.hashtable.VectorMapJoinLongHashSet; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.VectorDesc; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; + +// TODO : Duplicate codes need to merge with semi join. +// Single-Column Long hash table import. +// Single-Column Long specific imports. + +/* + * Specialized class for doing a vectorized map join that is an anti join on a Single-Column Long + * using a hash set. + */ +public class VectorMapJoinAntiJoinLongOperator extends VectorMapJoinAntiJoinGenerateResultOperator { + + private static final long serialVersionUID = 1L; + private static final String CLASS_NAME = VectorMapJoinAntiJoinLongOperator.class.getName(); + private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME); + protected String getLoggingPrefix() { + return super.getLoggingPrefix(CLASS_NAME); + } + + // The above members are initialized by the constructor and must not be + // transient. + + // The hash map for this specialized class. + private transient VectorMapJoinLongHashSet hashSet; + + // Single-Column Long specific members. + // For integers, we have optional min/max filtering. + private transient boolean useMinMax; + private transient long min; + private transient long max; + + // The column number for this one column join specialization. + private transient int singleJoinColumn; + + // Pass-thru constructors. + /** Kryo ctor. */ + protected VectorMapJoinAntiJoinLongOperator() { + super(); + } + + public VectorMapJoinAntiJoinLongOperator(CompilationOpContext ctx) { + super(ctx); + } + + public VectorMapJoinAntiJoinLongOperator(CompilationOpContext ctx, OperatorDesc conf, + VectorizationContext vContext, VectorDesc vectorDesc) throws HiveException { + super(ctx, conf, vContext, vectorDesc); + } + + // Process Single-Column Long Anti Join on a vectorized row batch. + @Override + protected void commonSetup() throws HiveException { + super.commonSetup(); + + // Initialize Single-Column Long members for this specialized class. + singleJoinColumn = bigTableKeyColumnMap[0]; + } + + @Override + public void hashTableSetup() throws HiveException { + super.hashTableSetup(); + + // Get our Single-Column Long hash set information for this specialized class. + hashSet = (VectorMapJoinLongHashSet) vectorMapJoinHashTable; + useMinMax = hashSet.useMinMax(); + if (useMinMax) { + min = hashSet.min(); + max = hashSet.max(); + } + } + + @Override + public void processBatch(VectorizedRowBatch batch) throws HiveException { + + try { + // (Currently none) + // antiPerBatchSetup(batch); + + // For anti joins, we may apply the filter(s) now. + for(VectorExpression ve : bigTableFilterExpressions) { + ve.evaluate(batch); + } + + final int inputLogicalSize = batch.size; + if (inputLogicalSize == 0) { + return; + } + + // Perform any key expressions. Results will go into scratch columns. + if (bigTableKeyExpressions != null) { + for (VectorExpression ve : bigTableKeyExpressions) { + ve.evaluate(batch); + } + } + + // The one join column for this specialized class. + LongColumnVector joinColVector = (LongColumnVector) batch.cols[singleJoinColumn]; + long[] vector = joinColVector.vector; + + // Check single column for repeating. + boolean allKeyInputColumnsRepeating = joinColVector.isRepeating; + + if (allKeyInputColumnsRepeating) { + // All key input columns are repeating. Generate key once. Lookup once. + // Since the key is repeated, we must use entry 0 regardless of selectedInUse. + JoinUtil.JoinResult joinResult; + if (!joinColVector.noNulls && joinColVector.isNull[0]) { + // For anti join, if the right side is null then its a match. + joinResult = JoinUtil.JoinResult.MATCH; + } else { + long key = vector[0]; + if (useMinMax && (key < min || key > max)) { + // Out of range for whole batch. Its a match for anti join. We can emit the row. + joinResult = JoinUtil.JoinResult.MATCH; + } else { + joinResult = hashSet.contains(key, hashSetResults[0]); + // reverse the join result for anti join. + if (joinResult == JoinUtil.JoinResult.NOMATCH) { + joinResult = JoinUtil.JoinResult.MATCH; + } else if (joinResult == JoinUtil.JoinResult.MATCH) { + joinResult = JoinUtil.JoinResult.NOMATCH; + } + } + } + + // Common repeated join result processing. + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); + } + finishAntiRepeated(batch, joinResult, hashSetResults[0]); + } else { + // NOT Repeating. + + if (LOG.isDebugEnabled()) { + LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); + } + + // We remember any matching rows in matches / matchSize. At the end of the loop, + // selected / batch.size will represent both matching and non-matching rows for outer join. + // Only deferred rows will have been removed from selected. + int selected[] = batch.selected; + boolean selectedInUse = batch.selectedInUse; + + int hashSetResultCount = 0; + int allMatchCount = 0; + int spillCount = 0; + long saveKey = 0; + + // We optimize performance by only looking up the first key in a series of equal keys. + boolean haveSaveKey = false; + JoinUtil.JoinResult saveJoinResult = JoinUtil.JoinResult.NOMATCH; + + // Logical loop over the rows in the batch since the batch may have selected in use. + for (int logical = 0; logical < inputLogicalSize; logical++) { + int batchIndex = (selectedInUse ? selected[logical] : logical); + + // Single-Column Long get key. + long currentKey; + boolean isNull; + if (!joinColVector.noNulls && joinColVector.isNull[batchIndex]) { + currentKey = 0; + isNull = true; + } else { + currentKey = vector[batchIndex]; + isNull = false; + } + + // Equal key series checking. + if (isNull || !haveSaveKey || currentKey != saveKey) { + // New key. + if (haveSaveKey) { + // Move on with our counts. + switch (saveJoinResult) { + case MATCH: + // We have extracted the existence from the hash set result, so we don't keep it. + break; + case SPILL: + // We keep the hash set result for its spill information. + hashSetResultCount++; + break; + case NOMATCH: + break; + } + } + + if (isNull) { + saveJoinResult = JoinUtil.JoinResult.MATCH; + haveSaveKey = false; + } else { + // Regardless of our matching result, we keep that information to make multiple use + // of it for a possible series of equal keys. + haveSaveKey = true; + saveKey = currentKey; + if (useMinMax && (currentKey < min || currentKey > max)) { + // Key out of range for whole hash table, is a valid match for anti join. + saveJoinResult = JoinUtil.JoinResult.NOMATCH; + } else { + saveJoinResult = hashSet.contains(currentKey, hashSetResults[hashSetResultCount]); + } + + // Reverse the match result for anti join. Review comment: done ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org Issue Time Tracking ------------------- Worklog Id: (was: 463349) Time Spent: 12h 40m (was: 12.5h) > Support Anti Join in Hive > -------------------------- > > Key: HIVE-23716 > URL: https://issues.apache.org/jira/browse/HIVE-23716 > Project: Hive > Issue Type: Bug > Reporter: mahesh kumar behera > Assignee: mahesh kumar behera > Priority: Major > Labels: pull-request-available > Attachments: HIVE-23716.01.patch > > Time Spent: 12h 40m > Remaining Estimate: 0h > > Currently hive does not support Anti join. The query for anti join is > converted to left outer join and null filter on right side join key is added > to get the desired result. This is causing > # Extra computation — The left outer join projects the redundant columns > from right side. Along with that, filtering is done to remove the redundant > rows. This is can be avoided in case of anti join as anti join will project > only the required columns and rows from the left side table. > # Extra shuffle — In case of anti join the duplicate records moved to join > node can be avoided from the child node. This can reduce significant amount > of data movement if the number of distinct rows( join keys) is significant. > # Extra Memory Usage - In case of map based anti join , hash set is > sufficient as just the key is required to check if the records matches the > join condition. In case of left join, we need the key and the non key columns > also and thus a hash table will be required. > For a query like > {code:java} > select wr_order_number FROM web_returns LEFT JOIN web_sales ON > wr_order_number = ws_order_number WHERE ws_order_number IS NULL;{code} > The number of distinct ws_order_number in web_sales table in a typical 10TB > TPCDS set up is just 10% of total records. So when we convert this query to > anti join, instead of 7 billion rows, only 600 million rows are moved to join > node. > In the current patch, just one conversion is done. The pattern of > project->filter->left-join is converted to project->anti-join. This will take > care of sub queries with “not exists” clause. The queries with “not exists” > are converted first to filter + left-join and then its converted to anti > join. The queries with “not in” are not handled in the current patch. > From execution side, both merge join and map join with vectorized execution > is supported for anti join. -- This message was sent by Atlassian Jira (v8.3.4#803005)