amansinha100 commented on a change in pull request #1334: DRILL-6385: Support 
JPPD feature
URL: https://github.com/apache/drill/pull/1334#discussion_r200818931
 
 

 ##########
 File path: 
exec/java-exec/src/main/java/org/apache/drill/exec/work/filter/RuntimeFilterManager.java
 ##########
 @@ -0,0 +1,755 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.work.filter;
+
+import com.google.common.collect.Sets;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.drill.common.expression.LogicalExpression;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.expression.visitors.AbstractExprVisitor;
+import org.apache.drill.common.logical.data.JoinCondition;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.ops.AccountingDataTunnel;
+import org.apache.drill.exec.ops.Consumer;
+import org.apache.drill.exec.ops.QueryContext;
+import org.apache.drill.exec.ops.SendingAccountor;
+import org.apache.drill.exec.ops.StatusHandler;
+import org.apache.drill.exec.physical.PhysicalPlan;
+
+import org.apache.drill.exec.physical.base.AbstractPhysicalVisitor;
+import org.apache.drill.exec.physical.base.Exchange;
+import org.apache.drill.exec.physical.base.GroupScan;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.physical.base.Store;
+import org.apache.drill.exec.physical.config.BroadcastExchange;
+import org.apache.drill.exec.physical.config.HashAggregate;
+import org.apache.drill.exec.physical.config.HashJoinPOP;
+import org.apache.drill.exec.physical.config.StreamingAggregate;
+import org.apache.drill.exec.planner.fragment.Fragment;
+import org.apache.drill.exec.planner.fragment.Wrapper;
+import org.apache.drill.exec.proto.BitData;
+import org.apache.drill.exec.proto.CoordinationProtos;
+import org.apache.drill.exec.proto.GeneralRPCProtos;
+import org.apache.drill.exec.proto.UserBitShared;
+import org.apache.drill.exec.proto.helper.QueryIdHelper;
+import org.apache.drill.exec.rpc.RpcException;
+import org.apache.drill.exec.rpc.RpcOutcomeListener;
+import org.apache.drill.exec.rpc.data.DataTunnel;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.util.Pointer;
+import org.apache.drill.exec.work.QueryWorkUnit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * This class traverses the physical operator tree to find the HashJoin 
operator
+ * for which is JPPD (join predicate push down) is possible. The prerequisite 
to do JPPD
+ * is:
+ * 1. The join condition is equality
+ * 2. The physical join node is a HashJoin one
+ * 3. The probe side children of the HashJoin node should not contain a 
blocking operator like HashAgg
+ */
+public class RuntimeFilterManager {
+
+  private Wrapper rootWrapper;
+  //HashJoin node's major fragment id to its corresponding probe side nodes's 
endpoints
+  private Map<Integer, List<CoordinationProtos.DrillbitEndpoint>> 
joinMjId2probdeScanEps = new HashMap<>();
+  //HashJoin node's major fragment id to its corresponding probe side nodes's 
number
+  private Map<Integer, Integer> joinMjId2scanSize = new ConcurrentHashMap<>();
+  //HashJoin node's major fragment id to its corresponding probe side scan 
node's belonging major fragment id
+  private Map<Integer, Integer> joinMjId2ScanMjId = new HashMap<>();
+
+  private RuntimeFilterWritable aggregatedRuntimeFilter;
+
+  private DrillbitContext drillbitContext;
+
+  private QueryContext queryContext;
+
+  private SendingAccountor sendingAccountor = new SendingAccountor();
+
+  private String lineSeparator;
+
+
+
+  private static final Logger logger = 
LoggerFactory.getLogger(RuntimeFilterManager.class);
+
+  /**
+   * This class maintains context for the runtime join push down's filter 
management. It
+   * does a traversal of the physical operators by leveraging the root wrapper 
which indirectly
+   * holds the global PhysicalOperator tree and contains the minor fragment 
endpoints.
+   * @param workUnit
+   * @param queryContext
+   */
+  public RuntimeFilterManager(QueryWorkUnit workUnit, QueryContext 
queryContext, DrillbitContext drillbitContext) {
+    this.rootWrapper = workUnit.getRootWrapper();
+    this.queryContext = queryContext;
+    this.drillbitContext = drillbitContext;
+    lineSeparator = java.security.AccessController.doPrivileged(new 
sun.security.action.GetPropertyAction("line.separator"));
+  }
+
+  /**
+   * Apply runtime filter to the physical plan if possible
+   * @param plan
+   * @param queryContext
+   */
+  public static void applyRuntimeFilter(PhysicalPlan plan, QueryContext 
queryContext) {
+    //TODO except the default configured bloom filter bytes size, we should 
also calculate it by the NDV (number of distinct value)
+    int bloomFilterSizeInBytes = 
queryContext.getOption(ExecConstants.HASHJOIN_BLOOM_FILTER_DEFAULT_SIZE_KEY).int_val;
+    boolean enableRuntimeFilter = 
queryContext.getOption(ExecConstants.HASHJOIN_ENABLE_RUNTIME_FILTER_KEY).bool_val;
+    if (!enableRuntimeFilter) {
+      return;
+    }
+    final PhysicalOperator rootOperator = 
plan.getSortedOperators(false).iterator().next();
+    CandidateHashJoinOpIdentifier candidateHashJoinOpIdentifier = new 
CandidateHashJoinOpIdentifier();
+    rootOperator.accept(candidateHashJoinOpIdentifier, null);
+    List<HashJoinOpHolder> qualifiedHolders = 
candidateHashJoinOpIdentifier.qualifiedHolders();
+    //set the RuntimeFilterDef to its corresponding HashJoin node from top to 
down.
+    for (HashJoinOpHolder holder : qualifiedHolders) {
+      boolean sendToSelf = false;
+      if (holder.isBroadcastHashJoin) {
+        // send RuntimeFilter to the join node itself
+        sendToSelf = true;
+      }
+      List<BloomFilterDef> bloomFilterDefs = constructBloomFilterDefs(holder, 
bloomFilterSizeInBytes, sendToSelf);
+      RuntimeFilterDef runtimeFilterDef = new RuntimeFilterDef(true, false, 
bloomFilterDefs, !sendToSelf);
+      holder.getHashJoinPOP().setRuntimeFilterDef(runtimeFilterDef);
+    }
+  }
+
+
+  /**
+   * This method is to find the possible HashJoin physical nodes from the 
PhysicalOperator tree to
+   * applyRuntimeFilter RuntimeFilter. Then it constructs a 
RuntimeFilterRouting to record the relationship between
+   * the RuntimeFilter producers and consumers.
+   */
+  public void collectRuntimeFilterControlInfo(Pointer<String> textPlan) {
+    Map<String, String> mjOpIdPair2runtimeFilter = new HashMap<>();
+    RuntimeFilterParallelismCollector runtimeFilterParallelismCollector = new 
RuntimeFilterParallelismCollector();
+    rootWrapper.getNode().getRoot().accept(runtimeFilterParallelismCollector, 
null);
+    List<HashJoinOpHolder> holders = 
runtimeFilterParallelismCollector.getHolders();
+    int bloomFilterSizeInBytes = 
queryContext.getOption(ExecConstants.HASHJOIN_BLOOM_FILTER_DEFAULT_SIZE_KEY).int_val;
+
+    for (HashJoinOpHolder holder : holders) {
+      List<CoordinationProtos.DrillbitEndpoint> probeSideEndpoints = 
holder.getProbeSideScanEndpoints();
+      int probeSideScanMajorId = holder.getProbeSideScanMajorId();
+      int joinNodeMajorId = holder.getJoinMajorId();
+      boolean sendToSelf = false;
+      if (holder.isBroadcastHashJoin) {
+        // send RuntimeFilter to the join node itself
+        sendToSelf = true;
+      }
+      //mark the runtime filter info to the profile
+      int probeSideScanOpId = holder.getProbeSideScanOpId();
+      List<BloomFilterDef> bloomFilterDefs = constructBloomFilterDefs(holder, 
bloomFilterSizeInBytes, sendToSelf);
+      String mjOpIdPair = String.format("%02d-%02d", probeSideScanMajorId, 
probeSideScanOpId);
+      StringBuilder stringBuilder = new StringBuilder();
+      stringBuilder.append("RuntimeFilter[");
+      for (BloomFilterDef bloomFilterDef : bloomFilterDefs) {
+        stringBuilder.append(bloomFilterDef.toString()).append(",");
+      }
+      stringBuilder.append("]");
+      String runtimeFiltersJson = stringBuilder.toString();
+      mjOpIdPair2runtimeFilter.put(mjOpIdPair, runtimeFiltersJson);
+      if (!sendToSelf) {
+        //send RuntimeFilter to Foreman
+        joinMjId2probdeScanEps.put(joinNodeMajorId, probeSideEndpoints);
+        joinMjId2scanSize.put(joinNodeMajorId, probeSideEndpoints.size());
+        joinMjId2ScanMjId.put(joinNodeMajorId, probeSideScanMajorId);
+      }
+    }
+    reconstructTextPlan(textPlan, mjOpIdPair2runtimeFilter);
+  }
+
+
+  public void waitForComplete() {
+    sendingAccountor.waitForSendComplete();
+  }
+
+  /**
+   * This method is passively invoked by receiving a runtime filter from the 
network
+   * @param runtimeFilterWritable
+   */
+  public void registerRuntimeFilter(RuntimeFilterWritable 
runtimeFilterWritable) {
+    BitData.RuntimeFilterBDef runtimeFilterB = 
runtimeFilterWritable.getRuntimeFilterBDef();
+    int majorId = runtimeFilterB.getMajorFragmentId();
+    UserBitShared.QueryId queryId = runtimeFilterB.getQueryId();
+    List<String> probeFields = runtimeFilterB.getProbeFieldsList();
+    logger.info("RuntimeFilterManager receives a runtime filter , majorId:{}, 
queryId:{}", majorId, QueryIdHelper.getQueryId(queryId));
+    int size;
+    synchronized (this) {
+      size = joinMjId2scanSize.get(majorId);
+      if (aggregatedRuntimeFilter == null) {
+        aggregatedRuntimeFilter = runtimeFilterWritable;
+      } else {
+        this.aggregatedRuntimeFilter.aggregate(runtimeFilterWritable);
+      }
+      size--;
+      joinMjId2scanSize.put(majorId, size);
+    }
+    if (size == 0) {
+      broadcastAggregatedRuntimeFilter(majorId, queryId, probeFields);
+    }
+  }
+
+
+  private static List<BloomFilterDef> 
constructBloomFilterDefs(HashJoinOpHolder holder, int bloomFilterSizeInBytes, 
boolean sendToSelf) {
+    List<JoinCondition> joinConditions = holder.hashJoinPOP.getConditions();
+    List<BloomFilterDef> bloomFilterDefs = new ArrayList<>();
+    for (JoinCondition joinCondition : joinConditions) {
+      LogicalExpression leftProExp = joinCondition.getLeft();
+      FieldReferenceFinder fieldReferenceFinder = new FieldReferenceFinder();
+      Set<SchemaPath> schemaPaths = leftProExp.accept(fieldReferenceFinder, 
(Void) null);
+      //find the probe side field which corresponds to the BloomFilter
+      String probeSideField = 
schemaPaths.iterator().next().getLastSegment().getNameSegment().getPath();
+      BloomFilterDef bloomFilterDef = new BloomFilterDef(0, 
bloomFilterSizeInBytes, sendToSelf, probeSideField);
+      bloomFilterDefs.add(bloomFilterDef);
+    }
+    return bloomFilterDefs;
+  }
+
+  private void reconstructTextPlan(Pointer<String> textPlan, Map<String, 
String> mjOpIdPair2runtimeFilter) {
+    if (textPlan != null && textPlan.value != null && 
!mjOpIdPair2runtimeFilter.isEmpty()) {
+      String[] lines = textPlan.value.split(lineSeparator);
+      Set<String> idPairs = mjOpIdPair2runtimeFilter.keySet();
+      StringBuilder stringBuilder = new StringBuilder();
+      for (String line : lines) {
+        for (String idPair : idPairs) {
+          if (line.startsWith(idPair)) {
+            line = line + " : " + mjOpIdPair2runtimeFilter.get(idPair);
+          }
+        }
+        stringBuilder.append(line).append(lineSeparator);
+      }
+      textPlan.value = stringBuilder.toString();
+    }
+  }
+
+  private void broadcastAggregatedRuntimeFilter(int joinMajorId, 
UserBitShared.QueryId queryId, List<String> probeFields) {
+    List<CoordinationProtos.DrillbitEndpoint> scanNodeEps = 
joinMjId2probdeScanEps.get(joinMajorId);
+    int scanNodeMjId = joinMjId2ScanMjId.get(joinMajorId);
+    for (int minorId = 0; minorId < scanNodeEps.size(); minorId++) {
+      BitData.RuntimeFilterBDef.Builder builder = 
BitData.RuntimeFilterBDef.newBuilder();
+      for (String probeField : probeFields) {
+        builder.addProbeFields(probeField);
+      }
+      BitData.RuntimeFilterBDef runtimeFilterBDef = builder
+        .setQueryId(queryId)
+        .setMajorFragmentId(scanNodeMjId)
+        .setMinorFragmentId(minorId)
+        .build();
+      RuntimeFilterWritable runtimeFilterWritable = new 
RuntimeFilterWritable();
+      runtimeFilterWritable.setRuntimeFilterBDef(runtimeFilterBDef);
+      runtimeFilterWritable.setData(aggregatedRuntimeFilter.getData());
+
+      CoordinationProtos.DrillbitEndpoint drillbitEndpoint = 
scanNodeEps.get(minorId);
+
+      DataTunnel dataTunnel = 
drillbitContext.getDataConnectionsPool().getTunnel(drillbitEndpoint);
+      Consumer<RpcException> exceptionConsumer = new Consumer<RpcException>() {
+        @Override
+        public void accept(final RpcException e) {
+          //logger.error("fail to broadcast a runtime filter to the probe side 
scan node", e);
+        }
+
+        @Override
+        public void interrupt(final InterruptedException e) {
+          //logger.error("fail to broadcast a runtime filter to the probe side 
scan node", e);
+        }
+      };
+      RpcOutcomeListener<GeneralRPCProtos.Ack> statusHandler = new 
StatusHandler(exceptionConsumer, sendingAccountor);
+      AccountingDataTunnel accountingDataTunnel = new 
AccountingDataTunnel(dataTunnel, sendingAccountor, statusHandler);
+      accountingDataTunnel.sendRuntimeFilter(runtimeFilterWritable);
+    }
+  }
+
+  /**
+   * Find all the candidate runtime filters to the input query plan.
+   */
+  private static class CandidateHashJoinOpIdentifier extends 
AbstractPhysicalVisitor<Void, HashJoinOpHolder, RuntimeException> {
+
+    private List<HashJoinOpHolder> possibleHolders = new ArrayList<>();
+
+    @Override
+    public Void visitExchange(Exchange exchange, HashJoinOpHolder holder) 
throws RuntimeException {
+      if (holder != null) {
+        boolean broadcastExchange = exchange instanceof BroadcastExchange;
+        if (holder.isFromBuildSide()) {
+          //To the build side ,we need to identify whether the HashJoin's 
direct children have a Broadcast node to mark
+          //this HashJoin as BroadcastHashJoin
+          holder.setMeetBroadcastExchange(broadcastExchange);
+        }
+      }
+      return visitOp(exchange, holder);
+    }
+
+
+    @Override
+    public Void visitGroupScan(GroupScan groupScan, HashJoinOpHolder holder) 
throws RuntimeException {
+      if (holder != null && holder.isFromProbeSide()) {
+        holder.setProbeSideScanOp(groupScan);
+      }
+      return visitOp(groupScan, holder);
+    }
+
+
+    @Override
+    public Void visitStore(Store store, HashJoinOpHolder holder) throws 
RuntimeException {
+      if (holder != null && holder.isFromProbeSide()) {
+        holder.setProbeSideScanOp(store);
+      }
+      return visitOp(store, holder);
+    }
+
+
+    @Override
+    public Void visitStreamingAggregate(StreamingAggregate agg, 
HashJoinOpHolder holder) throws RuntimeException {
+      if (holder != null && holder.isFromProbeSide()) {
+        holder.setMeetBlockedOperator(true);
+      }
+      return visitOp(agg, holder);
+    }
+
+
+    @Override
+    public Void visitHashAggregate(HashAggregate agg, HashJoinOpHolder holder) 
throws RuntimeException {
+      if (holder != null && holder.isFromProbeSide()) {
+        holder.setMeetBlockedOperator(true);
+      }
+      return visitOp(agg, holder);
+    }
+
+
+    @Override
+    public Void visitOp(PhysicalOperator op, HashJoinOpHolder holder) throws 
RuntimeException {
+      boolean isHashJoinOp = op instanceof HashJoinPOP;
+      if (isHashJoinOp) {
+        HashJoinPOP hashJoinPOP = (HashJoinPOP) op;
+        JoinRelType joinType = hashJoinPOP.getJoinType();
+        boolean isEqJoin = isEqJoin(hashJoinPOP);
+        if (isEqJoin && (joinType == JoinRelType.INNER || joinType == 
JoinRelType.RIGHT)) {
+          //it's not right to filter out rows of a left outer or full outer 
join
+          if (holder == null) {
+            holder = new HashJoinOpHolder();
+            holder.setHashJoinPOP(hashJoinPOP);
+            possibleHolders.add(holder);
+          } else {
+            //parent HashJoin node has a child HashJoin node
+            holder.setMeetBlockedOperator(true);
+          }
+          int joinOpId = hashJoinPOP.getOperatorId();
+          holder.setJoinOpId(joinOpId);
+          //left probe side tree should contain a scan node, should not 
contain blocking nodes like Agg
+          org.apache.drill.exec.physical.base.PhysicalOperator left = 
hashJoinPOP.getLeft();
+          holder.setFromProbeSide(true);
+          left.accept(this, holder);
+          boolean meetBlockedNode = holder.isMeetBlockedOperator();
+          if (!meetBlockedNode) {
+            PhysicalOperator probeSideScanOp = holder.getProbeSideScanOp();
+            int probeSideScanOpId = probeSideScanOp.getOperatorId();
+            holder.setProbeSideScanOpId(probeSideScanOpId);
+          }
+          //explore the right build side children to find potential 
RuntimeFilters.
+          PhysicalOperator right = hashJoinPOP.getRight();
+          holder.setFromBuildSide(true);
+          right.accept(this, holder);
+          return null;
+        }
+      }
+      return visitChildren(op, holder);
+    }
+
+    public List<HashJoinOpHolder> qualifiedHolders() {
+      List<HashJoinOpHolder> qualifiedHolders = new ArrayList<>();
+      for (HashJoinOpHolder candidate : possibleHolders) {
+        boolean meetProbeSideBlockedOp = candidate.isMeetBlockedOperator();
+        if (!meetProbeSideBlockedOp) {
+          boolean meetBroadcastExchangeBuildSide = 
candidate.isMeetBroadcastExchange();
+          if (meetBroadcastExchangeBuildSide) {
+            candidate.setBroadcastHashJoin(true);
+          }
+          //TODO we need to use NDV to calculate the percent not the cost.
+          /**
+          double threshold = 0.5;
+          double buildCost = candidate.hashJoinPOP.getRight().getCost();
+          double probeCost = candidate.hashJoinPOP.getLeft().getCost();
+          double percent = buildCost / probeCost;
+          if (percent > threshold) {
+            continue;
+          }
+           */
+          qualifiedHolders.add(candidate);
+        }
+      }
+      return qualifiedHolders;
+    }
+  }
+
+  /**
+   * Collect the runtime filter parallelism related information such as join 
node major/minor fragment id , probe side scan node's
+   * major/minor fragment id, probe side node's endpoints.
+   */
+  protected class RuntimeFilterParallelismCollector extends 
AbstractPhysicalVisitor<Void, HashJoinOpHolder, RuntimeException> {
+
+    private List<HashJoinOpHolder> possibleHolders = new ArrayList<>();
+
+    @Override
+    public Void visitExchange(Exchange exchange, HashJoinOpHolder holder) 
throws RuntimeException {
+      if (holder != null) {
+        boolean broadcastExchange = exchange instanceof BroadcastExchange;
+        if (holder.isFromBuildSide()) {
+          //To the build side ,we need to identify whether the HashJoin's 
direct children have a Broadcast node to mark
+          //this HashJoin as BroadcastHashJoin
+          holder.setMeetBroadcastExchange(broadcastExchange);
+        }
+      }
+      return visitOp(exchange, holder);
+    }
+
+
+    @Override
+    public Void visitGroupScan(GroupScan groupScan, HashJoinOpHolder holder) 
throws RuntimeException {
+      if (holder != null && holder.isFromProbeSide()) {
+        holder.setProbeSideScanOp(groupScan);
+      }
+      return visitOp(groupScan, holder);
+    }
+
+
+    @Override
+    public Void visitStore(Store store, HashJoinOpHolder holder) throws 
RuntimeException {
+      if (holder != null && holder.isFromProbeSide()) {
+        holder.setProbeSideScanOp(store);
+      }
+      return visitOp(store, holder);
+    }
+
+
+    @Override
+    public Void visitStreamingAggregate(StreamingAggregate agg, 
HashJoinOpHolder holder) throws RuntimeException {
+      if (holder != null && holder.isFromProbeSide()) {
+        holder.setMeetBlockedOperator(true);
+      }
+      return visitOp(agg, holder);
+    }
+
+
+    @Override
+    public Void visitHashAggregate(HashAggregate agg, HashJoinOpHolder holder) 
throws RuntimeException {
+      if (holder != null && holder.isFromProbeSide()) {
+        holder.setMeetBlockedOperator(true);
+      }
+      return visitOp(agg, holder);
+    }
+
+
+    @Override
+    public Void visitOp(PhysicalOperator op, HashJoinOpHolder holder) throws 
RuntimeException {
+      boolean isHashJoinOp = op instanceof HashJoinPOP;
+      if (isHashJoinOp) {
+        HashJoinPOP hashJoinPOP = (HashJoinPOP) op;
+        RuntimeFilterDef runtimeFilterDef = hashJoinPOP.getRuntimeFilterDef();
+        if (runtimeFilterDef != null) {
+          if (holder == null) {
+            holder = new HashJoinOpHolder();
+            holder.setHashJoinPOP(hashJoinPOP);
+            possibleHolders.add(holder);
+          } else {
+            //parent HashJoin node has a child HashJoin node
+            holder.setMeetBlockedOperator(true);
+          }
+          Wrapper container = findPhysicalOpContainer(rootWrapper, 
hashJoinPOP);
+          int majorFragmentId = container.getMajorFragmentId();
+          int joinOpId = hashJoinPOP.getOperatorId();
+          holder.setJoinMajorId(majorFragmentId);
+          holder.setJoinOpId(joinOpId);
+          //left probe side tree should contain a scan node, should not 
contain blocking nodes like Agg
+          org.apache.drill.exec.physical.base.PhysicalOperator left = 
hashJoinPOP.getLeft();
+          holder.setFromProbeSide(true);
+          left.accept(this, holder);
+          boolean meetBlockedNode = holder.isMeetBlockedOperator();
+          if (!meetBlockedNode) {
+            PhysicalOperator probeSideScanOp = holder.getProbeSideScanOp();
+            Wrapper probeSideScanContainer = 
findPhysicalOpContainer(container, probeSideScanOp);
+            int probeSideScanMjId = 
probeSideScanContainer.getMajorFragmentId();
+            int probeSideScanOpId = probeSideScanOp.getOperatorId();
+            List<CoordinationProtos.DrillbitEndpoint> probeSideScanEps = 
probeSideScanContainer.getAssignedEndpoints();
+            holder.setProbeSideScanEndpoints(probeSideScanEps);
+            holder.setProbeSideScanMajorId(probeSideScanMjId);
+            holder.setProbeSideScanOpId(probeSideScanOpId);
+          }
+          //explore the right build side children to find potential 
RuntimeFilters.
+          PhysicalOperator right = hashJoinPOP.getRight();
+          holder.setFromBuildSide(true);
+          right.accept(this, holder);
+          return null;
+        }
+      }
+      return visitChildren(op, holder);
+    }
+
+    public List<HashJoinOpHolder> getHolders() {
+      return possibleHolders;
+    }
+  }
+
+  private class WrapperOperatorsVisitor extends AbstractPhysicalVisitor<Void, 
Void, RuntimeException> {
+
+    private PhysicalOperator targetOp;
+
+    private Fragment fragment;
+
+    private boolean contain = false;
+
+    public WrapperOperatorsVisitor(PhysicalOperator targetOp, Fragment 
fragment) {
+      this.targetOp = targetOp;
+      this.fragment = fragment;
+    }
+
+    @Override
+    public Void visitExchange(Exchange exchange, Void value) throws 
RuntimeException {
+      List<Fragment.ExchangeFragmentPair> exchangeFragmentPairs = 
fragment.getReceivingExchangePairs();
+      for (Fragment.ExchangeFragmentPair exchangeFragmentPair : 
exchangeFragmentPairs) {
+        boolean same = exchange == exchangeFragmentPair.getExchange();
+        if (same) {
+          return null;
+        }
+      }
+      return exchange.getChild().accept(this, value);
+    }
+
+    @Override
+    public Void visitOp(PhysicalOperator op, Void value) throws 
RuntimeException {
+      boolean same = op == targetOp;
+      if (!same) {
+        for (PhysicalOperator child : op) {
+          child.accept(this, value);
+        }
+      } else {
+        contain = true;
+      }
+      return null;
+    }
+
+    public boolean isContain() {
+      return contain;
+    }
+  }
+
+  private boolean containsPhysicalOperator(Wrapper wrapper, PhysicalOperator 
op) {
+    WrapperOperatorsVisitor wrapperOpsVistitor = new 
WrapperOperatorsVisitor(op, wrapper.getNode());
+    wrapper.getNode().getRoot().accept(wrapperOpsVistitor, null);
+    return wrapperOpsVistitor.isContain();
+  }
+
+  private Wrapper findPhysicalOpContainer(Wrapper wrapper, PhysicalOperator 
op) {
+    boolean contain = containsPhysicalOperator(wrapper, op);
+    if (contain) {
+      return wrapper;
+    }
+    List<Wrapper> dependencies = wrapper.getFragmentDependencies();
+    if (CollectionUtils.isEmpty(dependencies)) {
+      return null;
+    }
+    for (Wrapper dependencyWrapper : dependencies) {
+      Wrapper opContainer = findPhysicalOpContainer(dependencyWrapper, op);
+      if (opContainer != null) {
+        return opContainer;
+      }
+    }
+    //should not be here
+    return null;
+  }
+
+  private static boolean isEqJoin(HashJoinPOP joinPOP) {
+    List<JoinCondition> joinConditions = joinPOP.getConditions();
+    for (JoinCondition joinCondition : joinConditions) {
+      String relationShip = joinCondition.getRelationship();
+      boolean equalRelationShip = "==".equals(relationShip) || 
"EQUALS".equalsIgnoreCase(relationShip);
+      if (!equalRelationShip) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  private static class HashJoinOpHolder {
+
+    private int joinOpId;
+
+    private int joinMajorId;
+
+    private HashJoinPOP hashJoinPOP;
+
+    private int probeSideScanMajorId;
+
+    private int probeSideScanOpId;
+
+    private PhysicalOperator probeSideScanOp;
+
+    private List<CoordinationProtos.DrillbitEndpoint> probeSideScanEndpoints;
+
+    //probe side's children have blocked nodes.
+    private boolean meetBlockedOperator;
+
+    //whethe this join operator is a partitioned HashJoin or broadcast HashJoin
 
 Review comment:
   Yeah, it would be fine to not do JPPD for that case .. we can document the 
expected behavior.  

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to