siddharthteotia commented on a change in pull request #4535: Implement DISTINCT 
clause
URL: https://github.com/apache/incubator-pinot/pull/4535#discussion_r320654817
 
 

 ##########
 File path: 
pinot-core/src/main/java/org/apache/pinot/core/query/aggregation/DistinctTable.java
 ##########
 @@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.core.query.aggregation;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+import org.apache.pinot.common.data.FieldSpec;
+import org.apache.pinot.common.utils.DataSchema;
+import org.apache.pinot.common.utils.DataTable;
+import org.apache.pinot.core.common.datatable.DataTableBuilder;
+import org.apache.pinot.core.common.datatable.DataTableFactory;
+import org.apache.pinot.core.data.table.Key;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This serves the following purposes:
+ *
+ * (1) Intermediate result object for Distinct aggregation function
+ * (2) The same object is serialized by the server inside the data table
+ * for sending the results to broker. Broker deserializes it.
+ */
+public class DistinctTable {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(DistinctTable.class);
+  private final static int INITIAL_CAPACITY = 64000;
+  private FieldSpec.DataType[] _projectedColumnTypes;
+  private String[] _projectedColumnNames;
+  private int _recordLimit;
+  private Set<Key> _table;
+
+  /**
+   * Add a row to hash table
+   * @param key multi-column key to add
+   */
+  public void addKey(final Key key) {
+    if (_table.size() >= _recordLimit) {
+      LOGGER.info("Distinct table Reached allowed max cardinality of {}", 
_recordLimit);
+      return;
+    }
+
+    _table.add(key);
+  }
+
+  public DistinctTable(int recordLimit) {
+    _recordLimit = recordLimit;
+    _table = new HashSet<>(INITIAL_CAPACITY);
+  }
+
+  /**
+   * DESERIALIZE: Broker side
+   * @param byteBuffer data to deserialize
+   * @throws IOException
+   */
+  public DistinctTable(final ByteBuffer byteBuffer) throws IOException {
+    final DataTable dataTable = DataTableFactory.getDataTable(byteBuffer);
+    final DataSchema dataSchema = dataTable.getDataSchema();
+    final int numRows = dataTable.getNumberOfRows();
+    final int numColumns = dataSchema.size();
+
+    _table = new HashSet<>();
+
+    // extract rows from the datatable
+    for (int rowIndex  = 0; rowIndex < numRows; rowIndex++) {
+     Object[] columnValues = new Object[numColumns];
+      for (int colIndex = 0; colIndex < numColumns; colIndex++) {
+        DataSchema.ColumnDataType columnDataType = 
dataSchema.getColumnDataType(colIndex);
+        switch (columnDataType) {
+          case INT:
+            columnValues[colIndex] = dataTable.getInt(rowIndex, colIndex);
+            break;
+          case LONG:
+            columnValues[colIndex] = dataTable.getLong(rowIndex, colIndex);
+            break;
+          case STRING:
+            columnValues[colIndex] = dataTable.getString(rowIndex, colIndex);
+            break;
+          case FLOAT:
+            columnValues[colIndex] = dataTable.getFloat(rowIndex, colIndex);
+            break;
+          case DOUBLE:
+            columnValues[colIndex] = dataTable.getDouble(rowIndex, colIndex);
+            break;
+          default:
+            throw new UnsupportedOperationException("Unexpected column data 
type " + columnDataType + " in data table for DISTINCT query");
+        }
+      }
+
+      _table.add(new Key(columnValues));
+    }
+
+    _projectedColumnNames = dataSchema.getColumnNames();
+    _projectedColumnTypes = buildDataTypesFromColumnTypes(dataSchema);
+  }
+
+  private FieldSpec.DataType[] buildDataTypesFromColumnTypes(final DataSchema 
dataSchema) {
+    final int numColumns = dataSchema.size();
+    final FieldSpec.DataType[] columnTypes = new 
FieldSpec.DataType[numColumns];
+    for (int colIndex = 0; colIndex < numColumns; colIndex++) {
+      DataSchema.ColumnDataType columnDataType = 
dataSchema.getColumnDataType(colIndex);
+      switch (columnDataType) {
+        case INT:
+          columnTypes[colIndex] = FieldSpec.DataType.INT;
+          break;
+        case LONG:
+          columnTypes[colIndex] = FieldSpec.DataType.INT;
 
 Review comment:
   In fact this is not needed for deser. Removed the method
   
   > Please add a unit test for DistinctTable
   > 
   > Bonus points if you can also cover BrokerReduceService via unit tests
   
   Tests added in DistinctQueriesTest test the entire round trip of planning 
the query and executing from the broker -- thus all levels of execution code 
are exercised; segment, server level merge and broker level merge. Similar 
tests are also added in InterSegmentAggregationTests and 
OfflineClusterIntegrationTest
   
   The tests exercise each and every method of DistinctTable -- adding the key, 
iterator, serialization by server to send DataTable to broker and 
deserialization at broker to do the merge and then set broker response. As part 
of asserts in these tests, I check that distinct table works fine (both actual 
data and metadata like column names and types) by comparing the end result (at 
the broker level and segment level) with the expected table that the test code 
builds as it generates custom data. So DistinctTable's code has got 100% 
coverage through the tests added as follows:
   
   DistinctQueriesTest.java
   
   - Exercises segment level execution and checks the data (unique rows) and 
metadata (column names and types) in Distinct Table.
   - Exercises full inter segment and inter server execution via broker and 
checks the broker response data (actual unique rows) and metadata (column 
names) -- this broker response is set after deserializing the DistinctTable and 
doing the reduce/merge
   
   OfflineClusterIntegrationTest.java
   
   - Exercises full inter segment and inter server execution via broker and 
compares the result against H2
   
   InnerSegmentAggregationSingleValueQueriesTest
   
   - Exercises segment level execution and checks the data (unique rows) and 
metadata (column names and types) in Distinct Table.
   
   InterSegmentAggregationSingleValueQueriesTest
   
   - Exercises full inter segment and inter server execution via broker and 
checks the broker response data (actual unique rows) and metadata (column 
names) -- this broker response is set after deserializing the DistinctTable and 
doing the reduce/merge

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to