amansinha100 commented on a change in pull request #1334: DRILL-6385: Support 
JPPD feature
URL: https://github.com/apache/drill/pull/1334#discussion_r199354240
 
 

 ##########
 File path: 
exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
 ##########
 @@ -226,6 +244,96 @@ public IterOutcome next() {
     }
   }
 
+  private void applyRuntimeFilter() throws SchemaChangeException {
+    RuntimeFilterWritable runtimeFilterWritable = context.getRuntimeFilter();
+    if (runtimeFilterWritable == null) {
+      return;
+    }
+    if (recordCount <= 0) {
+      return;
+    }
+    List<BloomFilter> bloomFilters = runtimeFilterWritable.unwrap();
+    if (hash64 == null) {
+      ValueVectorHashHelper hashHelper = new ValueVectorHashHelper(this, 
context);
+      try {
+        //generate hash helper
+        this.toFilterFields = 
runtimeFilterWritable.getRuntimeFilterBDef().getProbeFieldsList();
+        List<LogicalExpression> hashFieldExps = new ArrayList<>();
+        List<TypedFieldId> typedFieldIds = new ArrayList<>();
+        for (String toFilterField : toFilterFields) {
+          SchemaPath schemaPath = new SchemaPath(new 
PathSegment.NameSegment(toFilterField), ExpressionPosition.UNKNOWN);
+          TypedFieldId typedFieldId = container.getValueVectorId(schemaPath);
+          this.field2id.put(toFilterField, typedFieldId.getFieldIds()[0]);
+          typedFieldIds.add(typedFieldId);
+          ValueVectorReadExpression toHashFieldExp = new 
ValueVectorReadExpression(typedFieldId);
+          hashFieldExps.add(toHashFieldExp);
+        }
+        hash64 = hashHelper.getHash64(hashFieldExps.toArray(new 
LogicalExpression[hashFieldExps.size()]), typedFieldIds.toArray(new 
TypedFieldId[typedFieldIds.size()]));
+      } catch (Exception e) {
+        throw UserException.internalError(e).build(logger);
+      }
+    }
+    selectionVector2.allocateNew(recordCount);
+    BitSet bitSet = new BitSet(recordCount);
+    for (int i = 0; i < toFilterFields.size(); i++) {
+      BloomFilter bloomFilter = bloomFilters.get(i);
+      String fieldName = toFilterFields.get(i);
+      computeBitSet(field2id.get(fieldName), bloomFilter, bitSet);
+    }
+    int svIndex = 0;
+    int tmpFilterRows = 0;
+    for (int i = 0; i < recordCount; i++) {
+      boolean contain = bitSet.get(i);
+      if (contain) {
+        selectionVector2.setIndex(svIndex, i);
+        svIndex++;
+      } else {
+        tmpFilterRows++;
+      }
+    }
+    selectionVector2.setRecordCount(svIndex);
+    if (tmpFilterRows > 0 && tmpFilterRows == recordCount) {
+      recordCount = 0;
+      selectionVector2.clear();
+      logger.debug("filter {} rows by the RuntimeFilter", tmpFilterRows);
+      return;
+    }
+    if (tmpFilterRows > 0 && tmpFilterRows != recordCount ) {
+      totalFilterRows = totalFilterRows + tmpFilterRows;
+      recordCount = svIndex;
+      BatchSchema batchSchema = this.schema;
+      VectorContainer backUpContainer = new 
VectorContainer(this.oContext.getAllocator(), batchSchema);
+      int fieldCount = batchSchema.getFieldCount();
+      for (int i = 0; i < fieldCount; i++) {
+        ValueVector from = 
this.getContainer().getValueVector(i).getValueVector();
+        ValueVector to = backUpContainer.getValueVector(i).getValueVector();
+        to.setInitialCapacity(svIndex);
+        for (int r = 0; r < svIndex; r++) {
+          to.copyEntry(r, from, selectionVector2.getIndex(r));
 
 Review comment:
   To summarize the way you have implemented this part:  suppose the original 
ScanBatch contained 100 rows and 10 of them qualified the bloom filter, you 
create an SV2 of size 10, set the original qualifying row's index in the SV2, 
then for each ValueVector you copy the qualifying row's data into the 
backupContainer, followed by exchanging it with the current output container.   
A couple of thoughts about this: 
     - Why not produce the SV2 in the output batch and let the downstream 
operator handle it ? Quite often there may be a Filter operator above the Scan  
which would be applying other filters (i.e not the run-time filters) and it can 
combine its own SV2 with the SV2 produced by the Scan.  That way you avoid the 
extra copying and let Filter handle it (note that Filter does code-gen so it is 
more efficient way to handle bulk filtering). 
    - There is clearly trade-offs with the extra copy approach : it depends on 
selectivity, i.e how many rows actually get eliminated by the run-time filter.  
I suppose you have mentioned this as a TODO depending on the NDV statistics ? 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to