[ 
https://issues.apache.org/jira/browse/DRILL-5337?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16224932#comment-16224932
 ] 

ASF GitHub Bot commented on DRILL-5337:
---------------------------------------

Github user arina-ielchiieva commented on a diff in the pull request:

    https://github.com/apache/drill/pull/774#discussion_r146134712
  
    --- Diff: 
contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBRecordReader.java
 ---
    @@ -0,0 +1,263 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.drill.exec.store.openTSDB;
    +
    +import com.google.common.collect.ImmutableList;
    +import com.google.common.collect.ImmutableMap;
    +import org.apache.drill.common.exceptions.ExecutionSetupException;
    +import org.apache.drill.common.exceptions.UserException;
    +import org.apache.drill.common.expression.SchemaPath;
    +import org.apache.drill.common.logical.ValidationError;
    +import org.apache.drill.common.types.TypeProtos;
    +import org.apache.drill.common.types.TypeProtos.MajorType;
    +import org.apache.drill.common.types.TypeProtos.MinorType;
    +import org.apache.drill.common.types.Types;
    +import org.apache.drill.exec.exception.SchemaChangeException;
    +import org.apache.drill.exec.expr.TypeHelper;
    +import org.apache.drill.exec.ops.OperatorContext;
    +import org.apache.drill.exec.physical.impl.OutputMutator;
    +import org.apache.drill.exec.record.MaterializedField;
    +import org.apache.drill.exec.store.AbstractRecordReader;
    +import org.apache.drill.exec.store.openTSDB.client.OpenTSDBTypes;
    +import org.apache.drill.exec.store.openTSDB.client.Schema;
    +import org.apache.drill.exec.store.openTSDB.client.Service;
    +import org.apache.drill.exec.store.openTSDB.dto.ColumnDTO;
    +import org.apache.drill.exec.store.openTSDB.dto.MetricDTO;
    +import org.apache.drill.exec.vector.NullableFloat8Vector;
    +import org.apache.drill.exec.vector.NullableTimeStampVector;
    +import org.apache.drill.exec.vector.NullableVarCharVector;
    +import org.apache.drill.exec.vector.ValueVector;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
    +
    +import java.io.IOException;
    +import java.nio.ByteBuffer;
    +import java.util.Iterator;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Set;
    +
    +import static java.nio.charset.StandardCharsets.UTF_8;
    +
    +public class OpenTSDBRecordReader extends AbstractRecordReader {
    +
    +  private static final Logger log = 
LoggerFactory.getLogger(OpenTSDBRecordReader.class);
    +
    +  private static final Map<OpenTSDBTypes, MinorType> TYPES;
    +
    +  private Service db;
    +
    +  private Iterator<MetricDTO> tableIterator;
    +  private OutputMutator output;
    +  private ImmutableList<ProjectedColumnInfo> projectedCols;
    +  private OpenTSDBSubScan.OpenTSDBSubScanSpec subScanSpec;
    +
    +  OpenTSDBRecordReader(Service client, OpenTSDBSubScan.OpenTSDBSubScanSpec 
subScanSpec,
    +                       List<SchemaPath> projectedColumns) throws 
IOException {
    +    setColumns(projectedColumns);
    +    this.db = client;
    +    this.subScanSpec = subScanSpec;
    +    db.setupQueryParameters(subScanSpec.getTableName());
    +    log.debug("Scan spec: {}", subScanSpec);
    +  }
    +
    +  @Override
    +  public void setup(OperatorContext context, OutputMutator output) throws 
ExecutionSetupException {
    +    this.output = output;
    +    Set<MetricDTO> tables = db.getTablesFromDB();
    +    if (tables == null || tables.isEmpty()) {
    +      throw new ValidationError(String.format("Table '%s' not found or 
it's empty", subScanSpec.getTableName()));
    +    }
    +    this.tableIterator = tables.iterator();
    +  }
    +
    +  @Override
    +  public int next() {
    +    try {
    +      return processOpenTSDBTablesData();
    +    } catch (SchemaChangeException e) {
    +      log.info(e.toString());
    +      return 0;
    +    }
    +  }
    +
    +  @Override
    +  protected boolean isSkipQuery() {
    +    return super.isSkipQuery();
    +  }
    +
    +  @Override
    +  public void close() throws Exception {
    +  }
    +
    +  static {
    +    TYPES = ImmutableMap.<OpenTSDBTypes, MinorType>builder()
    +        .put(OpenTSDBTypes.STRING, MinorType.VARCHAR)
    +        .put(OpenTSDBTypes.DOUBLE, MinorType.FLOAT8)
    +        .put(OpenTSDBTypes.TIMESTAMP, MinorType.TIMESTAMP)
    +        .build();
    +  }
    +
    +  private static class ProjectedColumnInfo {
    +    ValueVector vv;
    +    ColumnDTO openTSDBColumn;
    +  }
    +
    +  private int processOpenTSDBTablesData() throws SchemaChangeException {
    +    int rowCounter = 0;
    +    while (tableIterator.hasNext()) {
    +      MetricDTO metricDTO = tableIterator.next();
    +      rowCounter = addRowResult(metricDTO, rowCounter);
    +    }
    +    return rowCounter;
    +  }
    +
    +  private int addRowResult(MetricDTO table, int rowCounter) throws 
SchemaChangeException {
    +    setupProjectedColsIfItNull();
    +    for (String time : table.getDps().keySet()) {
    +      String value = table.getDps().get(time);
    +      setupDataToDrillTable(table, time, value, table.getTags(), 
rowCounter);
    +      rowCounter++;
    +    }
    +    return rowCounter;
    +  }
    +
    +  private void setupProjectedColsIfItNull() throws SchemaChangeException {
    +    if (projectedCols == null) {
    +      initCols(new Schema(db, subScanSpec.getTableName()));
    +    }
    +  }
    +
    +  private void setupDataToDrillTable(MetricDTO table, String timestamp, 
String value, Map<String, String> tags, int rowCount) {
    +    for (ProjectedColumnInfo pci : projectedCols) {
    +      switch (pci.openTSDBColumn.getColumnName()) {
    +        case "metric":
    +          setStringColumnValue(table.getMetric(), pci, rowCount);
    +          break;
    +        case "aggregate tags":
    +          setStringColumnValue(table.getAggregateTags().toString(), pci, 
rowCount);
    +          break;
    +        case "timestamp":
    +          setTimestampColumnValue(timestamp, pci, rowCount);
    +          break;
    +        case "aggregated value":
    +          setDoubleColumnValue(value, pci, rowCount);
    +          break;
    +        default:
    +          
setStringColumnValue(tags.get(pci.openTSDBColumn.getColumnName()), pci, 
rowCount);
    +      }
    +    }
    +  }
    +
    +  private void setTimestampColumnValue(String timestamp, 
ProjectedColumnInfo pci, int rowCount) {
    +    setTimestampColumnValue(timestamp != null ? Long.parseLong(timestamp) 
: Long.parseLong("0"), pci, rowCount);
    +  }
    +
    +  private void setDoubleColumnValue(String value, ProjectedColumnInfo pci, 
int rowCount) {
    +    setDoubleColumnValue(value != null ? Double.parseDouble(value) : 0.0, 
pci, rowCount);
    +  }
    +
    +  private void setStringColumnValue(String data, ProjectedColumnInfo pci, 
int rowCount) {
    +    if (data == null) {
    +      data = "null";
    +    }
    +    ByteBuffer value = ByteBuffer.wrap(data.getBytes(UTF_8));
    +    ((NullableVarCharVector.Mutator) pci.vv.getMutator())
    +        .setSafe(rowCount, value, 0, value.remaining());
    +  }
    +
    +  private void setTimestampColumnValue(Long data, ProjectedColumnInfo pci, 
int rowCount) {
    +    ((NullableTimeStampVector.Mutator) pci.vv.getMutator())
    +        .setSafe(rowCount, data * 1000);
    +  }
    +
    +  private void setDoubleColumnValue(Double data, ProjectedColumnInfo pci, 
int rowCount) {
    +    ((NullableFloat8Vector.Mutator) pci.vv.getMutator())
    +        .setSafe(rowCount, data);
    +  }
    +
    +  private void initCols(Schema schema) throws SchemaChangeException {
    +    ImmutableList.Builder<ProjectedColumnInfo> pciBuilder = 
ImmutableList.builder();
    +
    +    for (int i = 0; i < schema.getColumnCount(); i++) {
    +
    +      ColumnDTO column = schema.getColumnByIndex(i);
    +      final String name = column.getColumnName();
    +      final OpenTSDBTypes type = column.getColumnType();
    +      TypeProtos.MinorType minorType = TYPES.get(type);
    +
    +      if (isMinorTypeNull(minorType)) {
    +        logExceptionMessage(name, type);
    --- End diff --
    
    Why don't we fail instead when we don't support data type so user know we 
can not query the data rather then just skipping?


> OpenTSDB storage plugin
> -----------------------
>
>                 Key: DRILL-5337
>                 URL: https://issues.apache.org/jira/browse/DRILL-5337
>             Project: Apache Drill
>          Issue Type: New Feature
>          Components: Storage - Other
>            Reporter: Dmitriy Gavrilovych
>            Assignee: Dmitriy Gavrilovych
>              Labels: features
>             Fix For: 1.12.0
>
>
> Storage plugin for OpenTSDB
> The plugin uses REST API to work with TSDB. 
> Expected queries are listed below:
> SELECT * FROM openTSDB.`warp.speed.test`;
> Return all elements from warp.speed.test table with default aggregator SUM
> SELECT * FROM openTSDB.`(metric=warp.speed.test)`;
> Return all elements from (metric=warp.speed.test) table as a previous query, 
> but with alternative FROM syntax
> SELECT * FROM openTSDB.`(metric=warp.speed.test, aggregator=avg)`;
> Return all elements from warp.speed.test table, but with the custom aggregator
> SELECT `timestamp`, sum(`aggregated value`) FROM 
> openTSDB.`(metric=warp.speed.test, aggregator=avg)` GROUP BY `timestamp`;
> Return aggregated and grouped value by standard drill functions from 
> warp.speed.test table, but with the custom aggregator
> SELECT * FROM openTSDB.`(metric=warp.speed.test, downsample=5m-avg)`
> Return data limited by downsample



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to