amansinha100 commented on a change in pull request #1334: DRILL-6385: Support 
JPPD feature
URL: https://github.com/apache/drill/pull/1334#discussion_r200454496
 
 

 ##########
 File path: 
exec/java-exec/src/main/java/org/apache/drill/exec/work/filter/RuntimeFilterManager.java
 ##########
 @@ -0,0 +1,755 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.work.filter;
+
+import com.google.common.collect.Sets;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.drill.common.expression.LogicalExpression;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.expression.visitors.AbstractExprVisitor;
+import org.apache.drill.common.logical.data.JoinCondition;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.ops.AccountingDataTunnel;
+import org.apache.drill.exec.ops.Consumer;
+import org.apache.drill.exec.ops.QueryContext;
+import org.apache.drill.exec.ops.SendingAccountor;
+import org.apache.drill.exec.ops.StatusHandler;
+import org.apache.drill.exec.physical.PhysicalPlan;
+
+import org.apache.drill.exec.physical.base.AbstractPhysicalVisitor;
+import org.apache.drill.exec.physical.base.Exchange;
+import org.apache.drill.exec.physical.base.GroupScan;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.physical.base.Store;
+import org.apache.drill.exec.physical.config.BroadcastExchange;
+import org.apache.drill.exec.physical.config.HashAggregate;
+import org.apache.drill.exec.physical.config.HashJoinPOP;
+import org.apache.drill.exec.physical.config.StreamingAggregate;
+import org.apache.drill.exec.planner.fragment.Fragment;
+import org.apache.drill.exec.planner.fragment.Wrapper;
+import org.apache.drill.exec.proto.BitData;
+import org.apache.drill.exec.proto.CoordinationProtos;
+import org.apache.drill.exec.proto.GeneralRPCProtos;
+import org.apache.drill.exec.proto.UserBitShared;
+import org.apache.drill.exec.proto.helper.QueryIdHelper;
+import org.apache.drill.exec.rpc.RpcException;
+import org.apache.drill.exec.rpc.RpcOutcomeListener;
+import org.apache.drill.exec.rpc.data.DataTunnel;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.util.Pointer;
+import org.apache.drill.exec.work.QueryWorkUnit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * This class traverses the physical operator tree to find the HashJoin 
operator
+ * for which is JPPD (join predicate push down) is possible. The prerequisite 
to do JPPD
+ * is:
+ * 1. The join condition is equality
+ * 2. The physical join node is a HashJoin one
+ * 3. The probe side children of the HashJoin node should not contain a 
blocking operator like HashAgg
+ */
+public class RuntimeFilterManager {
+
+  private Wrapper rootWrapper;
+  //HashJoin node's major fragment id to its corresponding probe side nodes's 
endpoints
+  private Map<Integer, List<CoordinationProtos.DrillbitEndpoint>> 
joinMjId2probdeScanEps = new HashMap<>();
+  //HashJoin node's major fragment id to its corresponding probe side nodes's 
number
+  private Map<Integer, Integer> joinMjId2scanSize = new ConcurrentHashMap<>();
+  //HashJoin node's major fragment id to its corresponding probe side scan 
node's belonging major fragment id
+  private Map<Integer, Integer> joinMjId2ScanMjId = new HashMap<>();
+
+  private RuntimeFilterWritable aggregatedRuntimeFilter;
+
+  private DrillbitContext drillbitContext;
+
+  private QueryContext queryContext;
+
+  private SendingAccountor sendingAccountor = new SendingAccountor();
+
+  private String lineSeparator;
+
+
+
+  private static final Logger logger = 
LoggerFactory.getLogger(RuntimeFilterManager.class);
+
+  /**
+   * This class maintains context for the runtime join push down's filter 
management. It
+   * does a traversal of the physical operators by leveraging the root wrapper 
which indirectly
+   * holds the global PhysicalOperator tree and contains the minor fragment 
endpoints.
+   * @param workUnit
+   * @param queryContext
+   */
+  public RuntimeFilterManager(QueryWorkUnit workUnit, QueryContext 
queryContext, DrillbitContext drillbitContext) {
+    this.rootWrapper = workUnit.getRootWrapper();
+    this.queryContext = queryContext;
+    this.drillbitContext = drillbitContext;
+    lineSeparator = java.security.AccessController.doPrivileged(new 
sun.security.action.GetPropertyAction("line.separator"));
+  }
+
+  /**
+   * Apply runtime filter to the physical plan if possible
+   * @param plan
+   * @param queryContext
+   */
+  public static void applyRuntimeFilter(PhysicalPlan plan, QueryContext 
queryContext) {
+    //TODO except the default configured bloom filter bytes size, we should 
also calculate it by the NDV (number of distinct value)
+    int bloomFilterSizeInBytes = 
queryContext.getOption(ExecConstants.HASHJOIN_BLOOM_FILTER_DEFAULT_SIZE_KEY).int_val;
+    boolean enableRuntimeFilter = 
queryContext.getOption(ExecConstants.HASHJOIN_ENABLE_RUNTIME_FILTER_KEY).bool_val;
+    if (!enableRuntimeFilter) {
+      return;
+    }
+    final PhysicalOperator rootOperator = 
plan.getSortedOperators(false).iterator().next();
+    CandidateHashJoinOpIdentifier candidateHashJoinOpIdentifier = new 
CandidateHashJoinOpIdentifier();
+    rootOperator.accept(candidateHashJoinOpIdentifier, null);
+    List<HashJoinOpHolder> qualifiedHolders = 
candidateHashJoinOpIdentifier.qualifiedHolders();
+    //set the RuntimeFilterDef to its corresponding HashJoin node from top to 
down.
+    for (HashJoinOpHolder holder : qualifiedHolders) {
+      boolean sendToSelf = false;
+      if (holder.isBroadcastHashJoin) {
+        // send RuntimeFilter to the join node itself
+        sendToSelf = true;
+      }
+      List<BloomFilterDef> bloomFilterDefs = constructBloomFilterDefs(holder, 
bloomFilterSizeInBytes, sendToSelf);
+      RuntimeFilterDef runtimeFilterDef = new RuntimeFilterDef(true, false, 
bloomFilterDefs, !sendToSelf);
+      holder.getHashJoinPOP().setRuntimeFilterDef(runtimeFilterDef);
+    }
+  }
+
+
+  /**
+   * This method is to find the possible HashJoin physical nodes from the 
PhysicalOperator tree to
+   * applyRuntimeFilter RuntimeFilter. Then it constructs a 
RuntimeFilterRouting to record the relationship between
+   * the RuntimeFilter producers and consumers.
+   */
+  public void collectRuntimeFilterControlInfo(Pointer<String> textPlan) {
+    Map<String, String> mjOpIdPair2runtimeFilter = new HashMap<>();
+    RuntimeFilterParallelismCollector runtimeFilterParallelismCollector = new 
RuntimeFilterParallelismCollector();
+    rootWrapper.getNode().getRoot().accept(runtimeFilterParallelismCollector, 
null);
+    List<HashJoinOpHolder> holders = 
runtimeFilterParallelismCollector.getHolders();
+    int bloomFilterSizeInBytes = 
queryContext.getOption(ExecConstants.HASHJOIN_BLOOM_FILTER_DEFAULT_SIZE_KEY).int_val;
+
+    for (HashJoinOpHolder holder : holders) {
+      List<CoordinationProtos.DrillbitEndpoint> probeSideEndpoints = 
holder.getProbeSideScanEndpoints();
+      int probeSideScanMajorId = holder.getProbeSideScanMajorId();
+      int joinNodeMajorId = holder.getJoinMajorId();
+      boolean sendToSelf = false;
+      if (holder.isBroadcastHashJoin) {
+        // send RuntimeFilter to the join node itself
+        sendToSelf = true;
+      }
+      //mark the runtime filter info to the profile
+      int probeSideScanOpId = holder.getProbeSideScanOpId();
+      List<BloomFilterDef> bloomFilterDefs = constructBloomFilterDefs(holder, 
bloomFilterSizeInBytes, sendToSelf);
+      String mjOpIdPair = String.format("%02d-%02d", probeSideScanMajorId, 
probeSideScanOpId);
+      StringBuilder stringBuilder = new StringBuilder();
+      stringBuilder.append("RuntimeFilter[");
+      for (BloomFilterDef bloomFilterDef : bloomFilterDefs) {
+        stringBuilder.append(bloomFilterDef.toString()).append(",");
+      }
+      stringBuilder.append("]");
+      String runtimeFiltersJson = stringBuilder.toString();
+      mjOpIdPair2runtimeFilter.put(mjOpIdPair, runtimeFiltersJson);
+      if (!sendToSelf) {
+        //send RuntimeFilter to Foreman
+        joinMjId2probdeScanEps.put(joinNodeMajorId, probeSideEndpoints);
+        joinMjId2scanSize.put(joinNodeMajorId, probeSideEndpoints.size());
+        joinMjId2ScanMjId.put(joinNodeMajorId, probeSideScanMajorId);
+      }
+    }
+    reconstructTextPlan(textPlan, mjOpIdPair2runtimeFilter);
+  }
+
+
+  public void waitForComplete() {
+    sendingAccountor.waitForSendComplete();
+  }
+
+  /**
+   * This method is passively invoked by receiving a runtime filter from the 
network
+   * @param runtimeFilterWritable
+   */
+  public void registerRuntimeFilter(RuntimeFilterWritable 
runtimeFilterWritable) {
+    BitData.RuntimeFilterBDef runtimeFilterB = 
runtimeFilterWritable.getRuntimeFilterBDef();
+    int majorId = runtimeFilterB.getMajorFragmentId();
+    UserBitShared.QueryId queryId = runtimeFilterB.getQueryId();
+    List<String> probeFields = runtimeFilterB.getProbeFieldsList();
+    logger.info("RuntimeFilterManager receives a runtime filter , majorId:{}, 
queryId:{}", majorId, QueryIdHelper.getQueryId(queryId));
+    int size;
+    synchronized (this) {
+      size = joinMjId2scanSize.get(majorId);
+      if (aggregatedRuntimeFilter == null) {
+        aggregatedRuntimeFilter = runtimeFilterWritable;
+      } else {
+        this.aggregatedRuntimeFilter.aggregate(runtimeFilterWritable);
+      }
+      size--;
+      joinMjId2scanSize.put(majorId, size);
+    }
+    if (size == 0) {
+      broadcastAggregatedRuntimeFilter(majorId, queryId, probeFields);
+    }
+  }
+
+
+  private static List<BloomFilterDef> 
constructBloomFilterDefs(HashJoinOpHolder holder, int bloomFilterSizeInBytes, 
boolean sendToSelf) {
+    List<JoinCondition> joinConditions = holder.hashJoinPOP.getConditions();
+    List<BloomFilterDef> bloomFilterDefs = new ArrayList<>();
+    for (JoinCondition joinCondition : joinConditions) {
+      LogicalExpression leftProExp = joinCondition.getLeft();
+      FieldReferenceFinder fieldReferenceFinder = new FieldReferenceFinder();
 
 Review comment:
   Suppose the join condition involves an expression such as CAST function:   
`WHERE CAST(o_custkey as INT) = CAST(c_custkey as INT)` .  In this case I would 
expect that the Bloom Filter should NOT be applied since the hash join probe 
should be done after the cast is performed.  It seems to me that the 
FieldReferenceFinder will get the inputs of the CAST and construct the bloom 
filter but I think that will not be correct...any thoughts ?   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to