[ 
https://issues.apache.org/jira/browse/DRILL-6385?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16529171#comment-16529171
 ] 

ASF GitHub Bot commented on DRILL-6385:
---------------------------------------

amansinha100 commented on a change in pull request #1334: DRILL-6385: Support 
JPPD feature
URL: https://github.com/apache/drill/pull/1334#discussion_r199336934
 
 

 ##########
 File path: 
exec/java-exec/src/main/java/org/apache/drill/exec/work/filter/RuntimeFilterManager.java
 ##########
 @@ -0,0 +1,735 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.work.filter;
+
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.drill.common.expression.LogicalExpression;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.logical.data.JoinCondition;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.ops.AccountingDataTunnel;
+import org.apache.drill.exec.ops.Consumer;
+import org.apache.drill.exec.ops.QueryContext;
+import org.apache.drill.exec.ops.SendingAccountor;
+import org.apache.drill.exec.ops.StatusHandler;
+import org.apache.drill.exec.physical.PhysicalPlan;
+
+import org.apache.drill.exec.physical.base.AbstractPhysicalVisitor;
+import org.apache.drill.exec.physical.base.Exchange;
+import org.apache.drill.exec.physical.base.GroupScan;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.physical.base.Store;
+import org.apache.drill.exec.physical.config.BroadcastExchange;
+import org.apache.drill.exec.physical.config.HashAggregate;
+import org.apache.drill.exec.physical.config.HashJoinPOP;
+import org.apache.drill.exec.physical.config.StreamingAggregate;
+import org.apache.drill.exec.planner.fragment.Fragment;
+import org.apache.drill.exec.planner.fragment.Wrapper;
+import org.apache.drill.exec.proto.BitData;
+import org.apache.drill.exec.proto.CoordinationProtos;
+import org.apache.drill.exec.proto.GeneralRPCProtos;
+import org.apache.drill.exec.proto.UserBitShared;
+import org.apache.drill.exec.proto.helper.QueryIdHelper;
+import org.apache.drill.exec.rpc.RpcException;
+import org.apache.drill.exec.rpc.RpcOutcomeListener;
+import org.apache.drill.exec.rpc.data.DataTunnel;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.store.parquet.ParquetRGFilterEvaluator;
+import org.apache.drill.exec.util.Pointer;
+import org.apache.drill.exec.work.QueryWorkUnit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * This class traverses the physical operator tree to find the HashJoin 
operator
+ * which is JPDD (join predicate push down) possible. The prerequisite to do 
JPDD
+ * is:
+ * 1. The join condition is equality
+ * 2. The physical join node is a HashJoin one
+ * 3. The probe side children of the HashJoin node should not contain a 
blocked operator like HashAgg
+ */
+public class RuntimeFilterManager {
+
+  private Wrapper rootWrapper;
+
+  private Map<Integer, List<CoordinationProtos.DrillbitEndpoint>> 
joinMjId2probdeScanEps = new HashMap<>();
+
+  private Map<Integer, Integer> joinMjId2scanSize = new ConcurrentHashMap<>();
+
+
+  private Map<Integer, Integer> joinMjId2ScanMjId = new HashMap<>();
+
+  private RuntimeFilterWritable aggregatedRuntimeFilter;
+
+  private DrillbitContext drillbitContext;
+
+  private QueryContext queryContext;
+
+  private SendingAccountor sendingAccountor = new SendingAccountor();
+
+  private String lineSeparator;
+
+
+
+  private static final Logger logger = 
LoggerFactory.getLogger(RuntimeFilterManager.class);
+
+  /**
+   * Here we leverage the root Wrapper to do the traverse which indirectly
+   * holds the whole global PhysicalOperator tree but also contains the 
endpoints
+   * of all the MinorFragments.
+   *
+   * @param workUnit
+   * @param queryContext
+   */
+  public RuntimeFilterManager(QueryWorkUnit workUnit, QueryContext 
queryContext, DrillbitContext drillbitContext) {
+    this.rootWrapper = workUnit.getRootWrapper();
+    this.queryContext = queryContext;
+    this.drillbitContext = drillbitContext;
+    lineSeparator = java.security.AccessController.doPrivileged(new 
sun.security.action.GetPropertyAction("line.separator"));
+  }
+
+  /**
+   * Apply runtime filter to the physical plan if possible
+   * @param plan
+   * @param queryContext
+   */
+  public static void applyRuntimeFilter(PhysicalPlan plan, QueryContext 
queryContext) {
+    //TODO except the default configured bloom filter bytes size, we should 
also calculate it by the NDV (number of distinct value)
+    int bloomFilterSizeInBytes = 
queryContext.getOption(ExecConstants.HASHJOIN_BLOOM_FILTER_DEFAULT_SIZE_KEY).int_val;
+    boolean enableRuntimeFilter = 
queryContext.getOption(ExecConstants.HASHJOIN_ENABLE_RUNTIME_FILTER_KEY).bool_val;
+    if (!enableRuntimeFilter) {
+      return;
+    }
+    final PhysicalOperator rootOperator = 
plan.getSortedOperators(false).iterator().next();
+    CandidateHashJoinOpIdentifier candidateHashJoinOpIdentifier = new 
CandidateHashJoinOpIdentifier();
+    rootOperator.accept(candidateHashJoinOpIdentifier, null);
+    List<HashJoinOpHolder> qualifiedHolders = 
candidateHashJoinOpIdentifier.qualifiedHolders();
+    //set the RuntimeFilterDef to its corresponding HashJoin node from top to 
down.
+    for (HashJoinOpHolder holder : qualifiedHolders) {
+      boolean sendToSelf = false;
+      if (holder.isBroadcastHashJoin) {
+        // send RuntimeFilter to the join node itself
+        sendToSelf = true;
+      }
+      List<BloomFilterDef> bloomFilterDefs = constructBloomFilterDefs(holder, 
bloomFilterSizeInBytes, sendToSelf);
+      RuntimeFilterDef runtimeFilterDef = new RuntimeFilterDef(true, false, 
bloomFilterDefs, !sendToSelf);
+      holder.getHashJoinPOP().setRuntimeFilterDef(runtimeFilterDef);
+    }
+  }
+
+
+  /**
+   * This method is to find the possible HashJoin physical nodes from the 
PhysicalOperator tree to
+   * applyRuntimeFilter RuntimeFilter. Then it constructs a 
RuntimeFilterRouting to record the relationship between
+   * the RuntimeFilter producers and consumers.
+   */
+  public void collectRuntimeFilterControlInfo(Pointer<String> textPlan) {
+    Map<String, String> mjOpIdPair2runtimeFilter = new HashMap<>();
+    RuntimeFilterParallelismCollector runtimeFilterParallelismCollector = new 
RuntimeFilterParallelismCollector();
+    rootWrapper.getNode().getRoot().accept(runtimeFilterParallelismCollector, 
null);
+    List<HashJoinOpHolder> holders = 
runtimeFilterParallelismCollector.getHolders();
+    int bloomFilterSizeInBytes = 
queryContext.getOption(ExecConstants.HASHJOIN_BLOOM_FILTER_DEFAULT_SIZE_KEY).int_val;
+
+    for (HashJoinOpHolder holder : holders) {
+      List<CoordinationProtos.DrillbitEndpoint> probeSideEndpoints = 
holder.getProbeSideScanEndpoints();
+      int probeSideScanMajorId = holder.getProbeSideScanMajorId();
+      int joinNodeMajorId = holder.getJoinMajorId();
+      boolean sendToSelf = false;
+      if (holder.isBroadcastHashJoin) {
+        // send RuntimeFilter to the join node itself
+        sendToSelf = true;
+      }
+      //mark the runtime filter info to the profile
+      int probeSideScanOpId = holder.getProbeSideScanOpId();
+      List<BloomFilterDef> bloomFilterDefs = constructBloomFilterDefs(holder, 
bloomFilterSizeInBytes, sendToSelf);
+      String mjOpIdPair = String.format("%02d-%02d", probeSideScanMajorId, 
probeSideScanOpId);
+      StringBuilder stringBuilder = new StringBuilder();
+      stringBuilder.append("RuntimeFilter[");
+      for (BloomFilterDef bloomFilterDef : bloomFilterDefs) {
+        stringBuilder.append(bloomFilterDef.toString()).append(",");
+      }
+      stringBuilder.append("]");
+      String runtimeFiltersJson = stringBuilder.toString();
+      mjOpIdPair2runtimeFilter.put(mjOpIdPair, runtimeFiltersJson);
+      if (!sendToSelf) {
+        //send RuntimeFilter to Foreman
+        joinMjId2probdeScanEps.put(joinNodeMajorId, probeSideEndpoints);
+        joinMjId2scanSize.put(joinNodeMajorId, probeSideEndpoints.size());
+        joinMjId2ScanMjId.put(joinNodeMajorId, probeSideScanMajorId);
+      }
+    }
+    reconstructTextPlan(textPlan, mjOpIdPair2runtimeFilter);
+  }
+
+
+  public void waitForComplete() {
+    sendingAccountor.waitForSendComplete();
+  }
+
+  /**
+   * This method is passively invoked by receiving a runtime filter from the 
network
+   * @param runtimeFilterWritable
+   */
+  public void registerRuntimeFilter(RuntimeFilterWritable 
runtimeFilterWritable) {
+    BitData.RuntimeFilterBDef runtimeFilterB = 
runtimeFilterWritable.getRuntimeFilterBDef();
+    int majorId = runtimeFilterB.getMajorFragmentId();
+    UserBitShared.QueryId queryId = runtimeFilterB.getQueryId();
+    List<String> probeFields = runtimeFilterB.getProbeFieldsList();
+    logger.info("RuntimeFilterManager receives a runtime filter , majorId:{}, 
queryId:{}", majorId, QueryIdHelper.getQueryId(queryId));
+    int size;
+    synchronized (this) {
+      size = joinMjId2scanSize.get(majorId);
+      if (aggregatedRuntimeFilter == null) {
+        aggregatedRuntimeFilter = runtimeFilterWritable;
+      } else {
+        this.aggregatedRuntimeFilter.aggregate(runtimeFilterWritable);
+      }
+      size--;
+      joinMjId2scanSize.put(majorId, size);
+    }
+    if (size == 0) {
+      broadcastAggregatedRuntimeFilter(majorId, queryId, probeFields);
+    }
+  }
+
+
+  private static List<BloomFilterDef> 
constructBloomFilterDefs(HashJoinOpHolder holder, int bloomFilterSizeInBytes, 
boolean sendToSelf) {
+    List<JoinCondition> joinConditions = holder.hashJoinPOP.getConditions();
+    List<BloomFilterDef> bloomFilterDefs = new ArrayList<>();
+    for (JoinCondition joinCondition : joinConditions) {
+      LogicalExpression leftProExp = joinCondition.getLeft();
+      ParquetRGFilterEvaluator.FieldReferenceFinder fieldReferenceFinder = new 
ParquetRGFilterEvaluator.FieldReferenceFinder();
 
 Review comment:
   Is the fieldReferenceFinder only needed for ParquetRGFilterEvaluator ?  Or 
(most likely) you are just using it as a utility finder ?  It is a bit 
confusing to see reference to Parquet here. 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


> Support JPPD (Join Predicate Push Down)
> ---------------------------------------
>
>                 Key: DRILL-6385
>                 URL: https://issues.apache.org/jira/browse/DRILL-6385
>             Project: Apache Drill
>          Issue Type: New Feature
>          Components:  Server, Execution - Flow
>    Affects Versions: 1.14.0
>            Reporter: weijie.tong
>            Assignee: weijie.tong
>            Priority: Major
>
> This feature is to support the JPPD (Join Predicate Push Down). It will 
> benefit the HashJoin ,Broadcast HashJoin performance by reducing the number 
> of rows to send across the network ,the memory consumed. This feature is 
> already supported by Impala which calls it RuntimeFilter 
> ([https://www.cloudera.com/documentation/enterprise/5-9-x/topics/impala_runtime_filtering.html]).
>  The first PR will try to push down a bloom filter of HashJoin node to 
> Parquet’s scan node.   The propose basic procedure is described as follow:
>  # The HashJoin build side accumulate the equal join condition rows to 
> construct a bloom filter. Then it sends out the bloom filter to the foreman 
> node.
>  # The foreman node accept the bloom filters passively from all the fragments 
> that has the HashJoin operator. It then aggregates the bloom filters to form 
> a global bloom filter.
>  # The foreman node broadcasts the global bloom filter to all the probe side 
> scan nodes which maybe already have send out partial data to the hash join 
> nodes(currently the hash join node will prefetch one batch from both sides ).
>       4.  The scan node accepts a global bloom filter from the foreman node. 
> It will filter the rest rows satisfying the bloom filter.
>  
> To implement above execution flow, some main new notion described as below:
>       1. RuntimeFilter
> It’s a filter container which may contain BloomFilter or MinMaxFilter.
>       2. RuntimeFilterReporter
> It wraps the logic to send hash join’s bloom filter to the foreman.The 
> serialized bloom filter will be sent out through the data tunnel.This object 
> will be instanced by the FragmentExecutor and passed to the 
> FragmentContext.So the HashJoin operator can obtain it through the 
> FragmentContext.
>      3. RuntimeFilterRequestHandler
> It is responsible to accept a SendRuntimeFilterRequest RPC to strip the 
> actual BloomFilter from the network. It then translates this filter to the 
> WorkerBee’s new interface registerRuntimeFilter.
> Another RPC type is BroadcastRuntimeFilterRequest. It will register the 
> accepted global bloom filter to the WorkerBee by the registerRuntimeFilter 
> method and then propagate to the FragmentContext through which the probe side 
> scan node can fetch the aggregated bloom filter.
>       4.RuntimeFilterManager
> The foreman will instance a RuntimeFilterManager .It will indirectly get 
> every RuntimeFilter by the WorkerBee. Once all the BloomFilters have been 
> accepted and aggregated . It will broadcast the aggregated bloom filter to 
> all the probe side scan nodes through the data tunnel by a 
> BroadcastRuntimeFilterRequest RPC.
>      5. RuntimeFilterEnableOption 
>  A global option will be added to decide whether to enable this new feature.
>  
> Welcome suggestion and advice from you.The related PR will be presented as 
> soon as possible.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to