[ 
https://issues.apache.org/jira/browse/FLINK-6968?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16405945#comment-16405945
 ] 

ASF GitHub Bot commented on FLINK-6968:
---------------------------------------

Github user liurenjie1024 commented on a diff in the pull request:

    https://github.com/apache/flink/pull/5688#discussion_r175680499
  
    --- Diff: 
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/QueryableTableSink.scala
 ---
    @@ -0,0 +1,162 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.flink.table.sinks
    +
    +import java.lang.{Boolean => JBool}
    +
    +import org.apache.flink.api.common.state.{ValueState, ValueStateDescriptor}
    +import org.apache.flink.api.common.typeinfo.TypeInformation
    +import org.apache.flink.api.java.functions.KeySelector
    +import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
    +import org.apache.flink.api.java.typeutils.{ResultTypeQueryable, 
RowTypeInfo}
    +import org.apache.flink.configuration.Configuration
    +import org.apache.flink.streaming.api.datastream.DataStream
    +import org.apache.flink.streaming.api.functions.ProcessFunction
    +import org.apache.flink.table.api.StreamQueryConfig
    +import 
org.apache.flink.table.runtime.aggregate.ProcessFunctionWithCleanupState
    +import org.apache.flink.types.Row
    +import org.apache.flink.util.Collector
    +import org.slf4j.LoggerFactory
    +
    +class QueryableTableSink(
    +    private val namePrefix: String,
    +    private val queryConfig: StreamQueryConfig)
    +  extends UpsertStreamTableSink[Row]
    +    with TableSinkBase[JTuple2[JBool, Row]] {
    +  private var keys: Array[String] = _
    +
    +  override def setKeyFields(keys: Array[String]): Unit = {
    +    if (keys == null) {
    +      throw new IllegalArgumentException("keys can't be null!")
    +    }
    +    this.keys = keys
    +  }
    +
    +  override def setIsAppendOnly(isAppendOnly: JBool): Unit = {
    +    if (isAppendOnly) {
    +      throw new IllegalArgumentException("A QueryableTableSink can not be 
used with append-only " +
    +        "tables as the table would grow infinitely")
    +    }
    +  }
    +
    +  override def getRecordType: TypeInformation[Row] = new 
RowTypeInfo(getFieldTypes, getFieldNames)
    +
    +  override def emitDataStream(dataStream: DataStream[JTuple2[JBool, 
Row]]): Unit = {
    +    val keyIndices = keys.map(getFieldNames.indexOf(_))
    +    val keyTypes = keyIndices.map(getFieldTypes(_))
    +
    +    val keySelectorType = new RowTypeInfo(keyTypes, keys)
    +
    +    val processFunction = new QueryableStateProcessFunction(
    +      namePrefix,
    +      queryConfig,
    +      keys,
    +      getFieldNames,
    +      getFieldTypes)
    +
    +    dataStream.keyBy(new RowKeySelector(keyIndices, keySelectorType))
    +      .process(processFunction)
    +  }
    +
    +  override protected def copy: TableSinkBase[JTuple2[JBool, Row]] = {
    +    new QueryableTableSink(this.namePrefix, this.queryConfig)
    +  }
    +}
    +
    +class RowKeySelector(
    +  private val keyIndices: Array[Int],
    +  @transient private val returnType: TypeInformation[Row])
    +  extends KeySelector[JTuple2[JBool, Row], Row]
    +    with ResultTypeQueryable[Row] {
    +
    +  override def getKey(value: JTuple2[JBool, Row]): Row = {
    +    val keys = keyIndices
    +
    +    val srcRow = value.f1
    +
    +    val destRow = new Row(keys.length)
    +    var i = 0
    +    while (i < keys.length) {
    +      destRow.setField(i, srcRow.getField(keys(i)))
    +      i += 1
    +    }
    +
    +    destRow
    +  }
    +
    +  override def getProducedType: TypeInformation[Row] = returnType
    +}
    +
    +class QueryableStateProcessFunction(
    +  private val namePrefix: String,
    +  private val queryConfig: StreamQueryConfig,
    +  private val keyNames: Array[String],
    +  private val fieldNames: Array[String],
    +  private val fieldTypes: Array[TypeInformation[_]])
    +  extends ProcessFunctionWithCleanupState[JTuple2[JBool, Row], 
Void](queryConfig) {
    +
    +  @transient private var states = Array[ValueState[AnyRef]]()
    +  @transient private var nonKeyIndices = Array[Int]()
    +
    +  override def open(parameters: Configuration): Unit = {
    +    super.open(parameters)
    +
    +    nonKeyIndices = fieldNames.indices
    +      .filter(idx => !keyNames.contains(fieldNames(idx)))
    +      .toArray
    +
    +    val statesBuilder = Array.newBuilder[ValueState[AnyRef]]
    +
    +    for (i <- nonKeyIndices) {
    +      val stateDesc = new ValueStateDescriptor(fieldNames(i), 
fieldTypes(i))
    +      
stateDesc.initializeSerializerUnlessSet(getRuntimeContext.getExecutionConfig)
    +      stateDesc.setQueryable(fieldNames(i))
    +      statesBuilder += 
getRuntimeContext.getState(stateDesc).asInstanceOf[ValueState[AnyRef]]
    +    }
    +
    +    states = statesBuilder.result()
    +
    +    initCleanupTimeState("QueryableStateCleanupTime")
    +  }
    +
    +  override def processElement(
    +    value: JTuple2[JBool, Row],
    +    ctx: ProcessFunction[JTuple2[JBool, Row], Void]#Context,
    +    out: Collector[Void]): Unit = {
    +    if (value.f0) {
    +      for (i <- nonKeyIndices.indices) {
    +        states(i).update(value.f1.getField(nonKeyIndices(i)))
    +      }
    +
    +      val currentTime = ctx.timerService().currentProcessingTime()
    --- End diff --
    
    I think it's right to be able to process both time domain, and I've added.


> Store streaming, updating tables with unique key in queryable state
> -------------------------------------------------------------------
>
>                 Key: FLINK-6968
>                 URL: https://issues.apache.org/jira/browse/FLINK-6968
>             Project: Flink
>          Issue Type: New Feature
>          Components: Table API &amp; SQL
>            Reporter: Fabian Hueske
>            Assignee: Renjie Liu
>            Priority: Major
>
> Streaming tables with unique key are continuously updated. For example 
> queries with a non-windowed aggregation generate such tables. Commonly, such 
> updating tables are emitted via an upsert table sink to an external datastore 
> (k-v store, database) to make it accessible to applications.
> This issue is about adding a feature to store and maintain such a table as 
> queryable state in Flink. By storing the table in Flnk's queryable state, we 
> do not need an external data store to access the results of the query but can 
> query the results directly from Flink.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to