[1/2] flink git commit: [hotfix] [sql-client] Fix typo in SqlExecutionException

2018-07-18 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 4cd682ded -> 1e4cfc97b


[hotfix] [sql-client] Fix typo in SqlExecutionException

This closes #6364.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/71953b81
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/71953b81
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/71953b81

Branch: refs/heads/release-1.6
Commit: 71953b8160221ce22c87f030cfb54d72fba870a3
Parents: 4cd682d
Author: xueyu <278006...@qq.com>
Authored: Wed Jul 18 21:39:57 2018 +0800
Committer: Timo Walther 
Committed: Thu Jul 19 08:52:54 2018 +0200

--
 .../apache/flink/table/client/gateway/local/ExecutionContext.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/71953b81/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
index 926bdb0..9152908 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
@@ -236,7 +236,7 @@ public class ExecutionContext {

TableFactoryService.find(BatchTableSinkFactory.class, sinkProperties, 
classLoader);
return factory.createBatchTableSink(sinkProperties);
}
-   throw new SqlExecutionException("Unsupported execution type for 
sources.");
+   throw new SqlExecutionException("Unsupported execution type for 
sinks.");
}
 
// 




flink git commit: [FLINK-9881] [table] Fix a typo in table.scala

2018-07-19 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/master 3d1f67bda -> c5b3d1731


[FLINK-9881] [table] Fix a typo in table.scala

This closes #6354.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/c5b3d173
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/c5b3d173
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/c5b3d173

Branch: refs/heads/master
Commit: c5b3d173122b67893f9abb300fdceddef06e2207
Parents: 3d1f67b
Author: Ashwin Sinha 
Authored: Wed Jul 18 02:19:20 2018 +0530
Committer: Timo Walther 
Committed: Thu Jul 19 09:00:48 2018 +0200

--
 .../main/scala/org/apache/flink/table/api/table.scala   | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/c5b3d173/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
index 071cc69..a44bbaa 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
@@ -66,7 +66,7 @@ class Table(
 
   // Check if the plan has an unbounded TableFunctionCall as child node.
   //   A TableFunctionCall is tolerated as root node because the Table holds 
the initial call.
-  if (containsUnboudedUDTFCall(logicalPlan) &&
+  if (containsUnboundedUDTFCall(logicalPlan) &&
 !logicalPlan.isInstanceOf[LogicalTableFunctionCall]) {
 throw new ValidationException("TableFunction can only be used in join and 
leftOuterJoin.")
   }
@@ -87,7 +87,7 @@ class Table(
 
   def relBuilder: FlinkRelBuilder = tableEnv.getRelBuilder
 
-  def getRelNode: RelNode = if (containsUnboudedUDTFCall(logicalPlan)) {
+  def getRelNode: RelNode = if (containsUnboundedUDTFCall(logicalPlan)) {
 throw new ValidationException("Cannot translate a query with an unbounded 
table function call.")
   } else {
 logicalPlan.toRelNode(relBuilder)
@@ -504,7 +504,7 @@ class Table(
   private def join(right: Table, joinPredicate: Option[Expression], joinType: 
JoinType): Table = {
 
 // check if we join with a table or a table function
-if (!containsUnboudedUDTFCall(right.logicalPlan)) {
+if (!containsUnboundedUDTFCall(right.logicalPlan)) {
   // regular table-table join
 
   // check that the TableEnvironment of right table is not null
@@ -971,11 +971,11 @@ class Table(
 * @param n the node to check
 * @return true if the plan contains an unbounded UDTF call, false 
otherwise.
 */
-  private def containsUnboudedUDTFCall(n: LogicalNode): Boolean = {
+  private def containsUnboundedUDTFCall(n: LogicalNode): Boolean = {
 n match {
   case functionCall: LogicalTableFunctionCall if functionCall.child == 
null => true
-  case u: UnaryNode => containsUnboudedUDTFCall(u.child)
-  case b: BinaryNode => containsUnboudedUDTFCall(b.left) || 
containsUnboudedUDTFCall(b.right)
+  case u: UnaryNode => containsUnboundedUDTFCall(u.child)
+  case b: BinaryNode => containsUnboundedUDTFCall(b.left) || 
containsUnboundedUDTFCall(b.right)
   case _: LeafNode => false
 }
   }



flink git commit: [FLINK-9881] [table] Fix a typo in table.scala

2018-07-19 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 1e4cfc97b -> 4b7eff771


[FLINK-9881] [table] Fix a typo in table.scala

This closes #6354.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4b7eff77
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4b7eff77
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4b7eff77

Branch: refs/heads/release-1.6
Commit: 4b7eff7712ce009ef6641f94cf06848d83a2f2ec
Parents: 1e4cfc9
Author: Ashwin Sinha 
Authored: Wed Jul 18 02:19:20 2018 +0530
Committer: Timo Walther 
Committed: Thu Jul 19 09:04:34 2018 +0200

--
 .../main/scala/org/apache/flink/table/api/table.scala   | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/4b7eff77/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
index 071cc69..a44bbaa 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
@@ -66,7 +66,7 @@ class Table(
 
   // Check if the plan has an unbounded TableFunctionCall as child node.
   //   A TableFunctionCall is tolerated as root node because the Table holds 
the initial call.
-  if (containsUnboudedUDTFCall(logicalPlan) &&
+  if (containsUnboundedUDTFCall(logicalPlan) &&
 !logicalPlan.isInstanceOf[LogicalTableFunctionCall]) {
 throw new ValidationException("TableFunction can only be used in join and 
leftOuterJoin.")
   }
@@ -87,7 +87,7 @@ class Table(
 
   def relBuilder: FlinkRelBuilder = tableEnv.getRelBuilder
 
-  def getRelNode: RelNode = if (containsUnboudedUDTFCall(logicalPlan)) {
+  def getRelNode: RelNode = if (containsUnboundedUDTFCall(logicalPlan)) {
 throw new ValidationException("Cannot translate a query with an unbounded 
table function call.")
   } else {
 logicalPlan.toRelNode(relBuilder)
@@ -504,7 +504,7 @@ class Table(
   private def join(right: Table, joinPredicate: Option[Expression], joinType: 
JoinType): Table = {
 
 // check if we join with a table or a table function
-if (!containsUnboudedUDTFCall(right.logicalPlan)) {
+if (!containsUnboundedUDTFCall(right.logicalPlan)) {
   // regular table-table join
 
   // check that the TableEnvironment of right table is not null
@@ -971,11 +971,11 @@ class Table(
 * @param n the node to check
 * @return true if the plan contains an unbounded UDTF call, false 
otherwise.
 */
-  private def containsUnboudedUDTFCall(n: LogicalNode): Boolean = {
+  private def containsUnboundedUDTFCall(n: LogicalNode): Boolean = {
 n match {
   case functionCall: LogicalTableFunctionCall if functionCall.child == 
null => true
-  case u: UnaryNode => containsUnboudedUDTFCall(u.child)
-  case b: BinaryNode => containsUnboudedUDTFCall(b.left) || 
containsUnboudedUDTFCall(b.right)
+  case u: UnaryNode => containsUnboundedUDTFCall(u.child)
+  case b: BinaryNode => containsUnboundedUDTFCall(b.left) || 
containsUnboundedUDTFCall(b.right)
   case _: LeafNode => false
 }
   }



flink git commit: [FLINK-9881] [table] Fix a typo in table.scala

2018-07-19 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.5 dfbc6bfd1 -> 2986421df


[FLINK-9881] [table] Fix a typo in table.scala

This closes #6354.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/2986421d
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/2986421d
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/2986421d

Branch: refs/heads/release-1.5
Commit: 2986421df66daf3b93fef0e9971331e7dfbac28b
Parents: dfbc6bf
Author: Ashwin Sinha 
Authored: Wed Jul 18 02:19:20 2018 +0530
Committer: Timo Walther 
Committed: Thu Jul 19 09:06:32 2018 +0200

--
 .../main/scala/org/apache/flink/table/api/table.scala   | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/2986421d/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
index 071cc69..a44bbaa 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/table.scala
@@ -66,7 +66,7 @@ class Table(
 
   // Check if the plan has an unbounded TableFunctionCall as child node.
   //   A TableFunctionCall is tolerated as root node because the Table holds 
the initial call.
-  if (containsUnboudedUDTFCall(logicalPlan) &&
+  if (containsUnboundedUDTFCall(logicalPlan) &&
 !logicalPlan.isInstanceOf[LogicalTableFunctionCall]) {
 throw new ValidationException("TableFunction can only be used in join and 
leftOuterJoin.")
   }
@@ -87,7 +87,7 @@ class Table(
 
   def relBuilder: FlinkRelBuilder = tableEnv.getRelBuilder
 
-  def getRelNode: RelNode = if (containsUnboudedUDTFCall(logicalPlan)) {
+  def getRelNode: RelNode = if (containsUnboundedUDTFCall(logicalPlan)) {
 throw new ValidationException("Cannot translate a query with an unbounded 
table function call.")
   } else {
 logicalPlan.toRelNode(relBuilder)
@@ -504,7 +504,7 @@ class Table(
   private def join(right: Table, joinPredicate: Option[Expression], joinType: 
JoinType): Table = {
 
 // check if we join with a table or a table function
-if (!containsUnboudedUDTFCall(right.logicalPlan)) {
+if (!containsUnboundedUDTFCall(right.logicalPlan)) {
   // regular table-table join
 
   // check that the TableEnvironment of right table is not null
@@ -971,11 +971,11 @@ class Table(
 * @param n the node to check
 * @return true if the plan contains an unbounded UDTF call, false 
otherwise.
 */
-  private def containsUnboudedUDTFCall(n: LogicalNode): Boolean = {
+  private def containsUnboundedUDTFCall(n: LogicalNode): Boolean = {
 n match {
   case functionCall: LogicalTableFunctionCall if functionCall.child == 
null => true
-  case u: UnaryNode => containsUnboudedUDTFCall(u.child)
-  case b: BinaryNode => containsUnboudedUDTFCall(b.left) || 
containsUnboudedUDTFCall(b.right)
+  case u: UnaryNode => containsUnboundedUDTFCall(u.child)
+  case b: BinaryNode => containsUnboundedUDTFCall(b.left) || 
containsUnboundedUDTFCall(b.right)
   case _: LeafNode => false
 }
   }



[2/3] flink git commit: [FLINK-7251] [types] Remove the flink-java8 module and improve lambda type extraction

2018-07-19 Thread twalthr
http://git-wip-us.apache.org/repos/asf/flink/blob/ddba1b69/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
--
diff --git 
a/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
 
b/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
deleted file mode 100644
index b9dba77..000
--- 
a/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.streaming.examples.java8.wordcount;
-
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.examples.java.wordcount.util.WordCountData;
-import org.apache.flink.streaming.api.datastream.DataStream;
-import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
-import org.apache.flink.util.Collector;
-
-import java.util.Arrays;
-
-/**
- * Implements the streaming "WordCount" program that computes a simple word 
occurrences
- * over text files.
- *
- * The input is a plain text file with lines separated by newline 
characters.
- *
- * Usage: WordCount  
- * If no parameters are provided, the program is run with default data from 
{@link WordCountData}.
- *
- * This example shows how to:
- * 
- * write a compact Flink Streaming program with Java 8 Lambda Expressions.
- * 
- *
- */
-public class WordCount {
-
-   // 
*
-   // PROGRAM
-   // 
*
-
-   public static void main(String[] args) throws Exception {
-
-   if (!parseParameters(args)) {
-   return;
-   }
-
-   // set up the execution environment
-   final StreamExecutionEnvironment env = 
StreamExecutionEnvironment.getExecutionEnvironment();
-
-   // get input data
-   DataStream text = getTextDataStream(env);
-
-   DataStream> counts =
-   // normalize and split each line
-   text.map(line -> 
line.toLowerCase().split("\\W+"))
-   // convert split line in pairs (2-tuples) 
containing: (word,1)
-   .flatMap((String[] tokens, 
Collector> out) -> {
-   // emit the pairs with non-zero-length 
words
-   Arrays.stream(tokens)
-   .filter(t -> t.length() > 0)
-   .forEach(t -> out.collect(new 
Tuple2<>(t, 1)));
-   })
-   // group by the tuple field "0" and sum up 
tuple field "1"
-   .keyBy(0)
-   .sum(1);
-
-   // emit result
-   if (fileOutput) {
-   counts.writeAsCsv(outputPath);
-   } else {
-   counts.print();
-   }
-
-   // execute program
-   env.execute("Streaming WordCount Example");
-   }
-
-   // 
*
-   // UTIL METHODS
-   // 
*
-
-   private static boolean fileOutput = false;
-   private static String textPath;
-   private static String outputPath;
-
-   private static boolean parseParameters(String[] args) {
-
-   if (args.length > 0) {
-   // parse input arguments
-   fileOutput = true;
-   if (args.length == 2) {
-   textPath = args[0];
-   outputPath = args[1];
-   } else {
-   System.err.println("Usage: WordCount  ");
-   return false;
-   

[3/3] flink git commit: [FLINK-7251] [types] Remove the flink-java8 module and improve lambda type extraction

2018-07-19 Thread twalthr
[FLINK-7251] [types] Remove the flink-java8 module and improve lambda type 
extraction

This commit removes the flink-java8 module and merges some tests into 
flink-core/flink-runtime. It ensures to have the possibility for passing 
explicit type information in DataStream API as a fallback. Since the tycho 
compiler approach was very hacky and seems not to work anymore, this commit 
also removes all references in the docs and quickstarts.

This closes #6120.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/ddba1b69
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/ddba1b69
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/ddba1b69

Branch: refs/heads/master
Commit: ddba1b69f43cbb885e178dfaafa120f1fe196a13
Parents: 95eadfe
Author: Timo Walther 
Authored: Mon Jun 4 12:49:43 2018 +0200
Committer: Timo Walther 
Committed: Fri Jul 20 08:06:07 2018 +0200

--
 docs/dev/java8.md   | 198 --
 docs/dev/java_lambdas.md| 138 +++
 .../api/java/typeutils/TypeExtractionUtils.java |  23 ++
 .../flink/api/java/typeutils/TypeExtractor.java | 290 ++
 .../java/typeutils/LambdaExtractionTest.java| 340 
 .../examples/java/relational/TPCHQuery10.java   |  28 +-
 .../examples/java/wordcount/WordCount.java  |   3 +-
 .../streaming/examples/wordcount/WordCount.java |  14 +-
 flink-java8/pom.xml | 225 ---
 .../examples/java8/relational/TPCHQuery10.java  | 212 --
 .../examples/java8/wordcount/WordCount.java | 124 --
 .../examples/java8/wordcount/WordCount.java | 124 --
 .../java/type/lambdas/LambdaExtractionTest.java | 383 ---
 .../org/apache/flink/cep/CEPLambdaTest.java | 104 -
 .../runtime/util/JarFileCreatorLambdaTest.java  | 113 --
 .../util/jartestprogram/FilterLambda1.java  |  41 --
 .../util/jartestprogram/FilterLambda2.java  |  39 --
 .../util/jartestprogram/FilterLambda3.java  |  39 --
 .../util/jartestprogram/FilterLambda4.java  |  38 --
 .../util/jartestprogram/UtilFunction.java   |  30 --
 .../jartestprogram/UtilFunctionWrapper.java |  35 --
 .../runtime/util/jartestprogram/WordFilter.java |  28 --
 .../operators/lambdas/AllGroupReduceITCase.java |  59 ---
 .../java/operators/lambdas/CoGroupITCase.java   |  74 
 .../api/java/operators/lambdas/CrossITCase.java |  73 
 .../java/operators/lambdas/FilterITCase.java|  91 -
 .../java/operators/lambdas/FlatJoinITCase.java  |  68 
 .../java/operators/lambdas/FlatMapITCase.java   |  56 ---
 .../operators/lambdas/GroupReduceITCase.java|  69 
 .../api/java/operators/lambdas/JoinITCase.java  |  69 
 .../api/java/operators/lambdas/MapITCase.java   |  74 
 .../java/operators/lambdas/ReduceITCase.java| 109 --
 .../src/test/resources/log4j-test.properties|  19 -
 .../org/apache/flink/cep/PatternStream.java |  16 +-
 .../java/org/apache/flink/cep/CEPITCase.java|  35 +-
 .../flink/graph/asm/translate/Translate.java|   4 -
 .../main/resources/archetype-resources/pom.xml  |  19 -
 .../flink/runtime/util/JarFileCreatorTest.java  |  91 -
 .../jartestprogram/FilterWithIndirection.java   |  38 ++
 .../util/jartestprogram/FilterWithLambda.java   |  40 ++
 .../FilterWithMethodReference.java  |  41 ++
 .../util/jartestprogram/UtilFunction.java   |  32 ++
 .../jartestprogram/UtilFunctionWrapper.java |  37 ++
 .../runtime/util/jartestprogram/WordFilter.java |  29 ++
 .../api/datastream/AllWindowedStream.java   |   2 -
 .../api/datastream/AsyncDataStream.java |   1 -
 .../datastream/BroadcastConnectedStream.java|   4 -
 .../api/datastream/CoGroupedStreams.java|  40 +-
 .../api/datastream/ConnectedStreams.java|  28 +-
 .../streaming/api/datastream/DataStream.java|  16 +-
 .../api/datastream/IterativeStream.java |   4 +
 .../streaming/api/datastream/JoinedStreams.java |  43 ++-
 .../streaming/api/datastream/KeyedStream.java   |  65 ++--
 .../api/datastream/WindowedStream.java  |   2 -
 .../flink/streaming/api/TypeFillTest.java   |  65 +++-
 .../flink/test/operators/CoGroupITCase.java |  33 ++
 .../apache/flink/test/operators/MapITCase.java  |  34 ++
 .../flink/test/operators/ReduceITCase.java  |   6 +-
 pom.xml |   1 -
 59 files changed, 1223 insertions(+), 2833 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/ddba1b69/docs/dev/java8.md
--
diff --git a/docs/dev/java8.md b/docs/dev/java8.md
deleted file mode 100644
index 8e7e643..000
--- a/docs/dev/java8.md
+++ /dev/null
@@ -1,198 +0,0 @@

-title: "Java 

[1/3] flink git commit: [FLINK-7251] [types] Remove the flink-java8 module and improve lambda type extraction

2018-07-19 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/master 95eadfe15 -> ddba1b69f


http://git-wip-us.apache.org/repos/asf/flink/blob/ddba1b69/flink-runtime/src/test/java/org/apache/flink/runtime/util/JarFileCreatorTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/util/JarFileCreatorTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/util/JarFileCreatorTest.java
index 8f8016e..f5d07de 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/util/JarFileCreatorTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/util/JarFileCreatorTest.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,9 +16,11 @@
  * limitations under the License.
  */
 
-
 package org.apache.flink.runtime.util;
 
+import org.apache.flink.runtime.util.jartestprogram.FilterWithIndirection;
+import org.apache.flink.runtime.util.jartestprogram.FilterWithLambda;
+import org.apache.flink.runtime.util.jartestprogram.FilterWithMethodReference;
 import 
org.apache.flink.runtime.util.jartestprogram.WordCountWithAnonymousClass;
 import org.apache.flink.runtime.util.jartestprogram.WordCountWithExternalClass;
 import 
org.apache.flink.runtime.util.jartestprogram.WordCountWithExternalClass2;
@@ -28,6 +30,7 @@ import 
org.apache.flink.runtime.util.jartestprogram.AnonymousInNonStaticMethod;
 import 
org.apache.flink.runtime.util.jartestprogram.AnonymousInNonStaticMethod2;
 import org.apache.flink.runtime.util.jartestprogram.NestedAnonymousInnerClass;
 import org.junit.Assert;
+import org.junit.Ignore;
 import org.junit.Test;
 import java.io.File;
 import java.io.FileInputStream;
@@ -37,7 +40,6 @@ import java.util.Set;
 import java.util.jar.JarInputStream;
 import java.util.zip.ZipEntry;
 
-
 public class JarFileCreatorTest {
 
//anonymous inner class in static method accessing a local variable in 
its closure.
@@ -48,14 +50,14 @@ public class JarFileCreatorTest {
jfc.addClass(AnonymousInStaticMethod.class)
.createJarFile();
 
-   Set ans = new HashSet();
+   Set ans = new HashSet<>();

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInStaticMethod$1.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInStaticMethod$A.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInStaticMethod.class");
 
Assert.assertTrue("Jar file for Anonymous Inner Class is not 
correct", validate(ans, out));
 
-   out.delete();
+   Assert.assertTrue(out.delete());
}
 
//anonymous inner class in non static method accessing a local variable 
in its closure.
@@ -66,14 +68,14 @@ public class JarFileCreatorTest {
jfc.addClass(AnonymousInNonStaticMethod.class)
.createJarFile();
 
-   Set ans = new HashSet();
+   Set ans = new HashSet<>();

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod$1.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod$A.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod.class");
 
Assert.assertTrue("Jar file for Anonymous Inner Class is not 
correct", validate(ans, out));
 
-   out.delete();
+   Assert.assertTrue(out.delete());
}
 
//anonymous inner class in non static method accessing a field of its 
enclosing class.
@@ -84,14 +86,14 @@ public class JarFileCreatorTest {
jfc.addClass(AnonymousInNonStaticMethod2.class)
.createJarFile();
 
-   Set ans = new HashSet();
+   Set ans = new HashSet<>();

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod2$1.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod2$A.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod2.class");
 
Assert.assertTrue("Jar file for Anonymous Inner Class is not 
correct", validate(ans, out));
 
-   out.delete();
+   Assert.assertTrue(out.delete());
}
 
//anonymous inner class in an anonymous inner class accessing a field 
of the outermost enclosing class.
@@ -102,7 +104,7 @@ public class JarFileCreatorTest {
jfc.addClass(NestedAnonymousInnerClass.class)
.createJarFile();
 
-   Set ans = new HashSet();
+   Set ans = new HashSet<>();

ans.add(

[2/3] flink git commit: [FLINK-7251] [types] Remove the flink-java8 module and improve lambda type extraction

2018-07-19 Thread twalthr
http://git-wip-us.apache.org/repos/asf/flink/blob/4862101d/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
--
diff --git 
a/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
 
b/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
deleted file mode 100644
index b9dba77..000
--- 
a/flink-java8/src/main/java/org/apache/flink/streaming/examples/java8/wordcount/WordCount.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.streaming.examples.java8.wordcount;
-
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.examples.java.wordcount.util.WordCountData;
-import org.apache.flink.streaming.api.datastream.DataStream;
-import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
-import org.apache.flink.util.Collector;
-
-import java.util.Arrays;
-
-/**
- * Implements the streaming "WordCount" program that computes a simple word 
occurrences
- * over text files.
- *
- * The input is a plain text file with lines separated by newline 
characters.
- *
- * Usage: WordCount  
- * If no parameters are provided, the program is run with default data from 
{@link WordCountData}.
- *
- * This example shows how to:
- * 
- * write a compact Flink Streaming program with Java 8 Lambda Expressions.
- * 
- *
- */
-public class WordCount {
-
-   // 
*
-   // PROGRAM
-   // 
*
-
-   public static void main(String[] args) throws Exception {
-
-   if (!parseParameters(args)) {
-   return;
-   }
-
-   // set up the execution environment
-   final StreamExecutionEnvironment env = 
StreamExecutionEnvironment.getExecutionEnvironment();
-
-   // get input data
-   DataStream text = getTextDataStream(env);
-
-   DataStream> counts =
-   // normalize and split each line
-   text.map(line -> 
line.toLowerCase().split("\\W+"))
-   // convert split line in pairs (2-tuples) 
containing: (word,1)
-   .flatMap((String[] tokens, 
Collector> out) -> {
-   // emit the pairs with non-zero-length 
words
-   Arrays.stream(tokens)
-   .filter(t -> t.length() > 0)
-   .forEach(t -> out.collect(new 
Tuple2<>(t, 1)));
-   })
-   // group by the tuple field "0" and sum up 
tuple field "1"
-   .keyBy(0)
-   .sum(1);
-
-   // emit result
-   if (fileOutput) {
-   counts.writeAsCsv(outputPath);
-   } else {
-   counts.print();
-   }
-
-   // execute program
-   env.execute("Streaming WordCount Example");
-   }
-
-   // 
*
-   // UTIL METHODS
-   // 
*
-
-   private static boolean fileOutput = false;
-   private static String textPath;
-   private static String outputPath;
-
-   private static boolean parseParameters(String[] args) {
-
-   if (args.length > 0) {
-   // parse input arguments
-   fileOutput = true;
-   if (args.length == 2) {
-   textPath = args[0];
-   outputPath = args[1];
-   } else {
-   System.err.println("Usage: WordCount  ");
-   return false;
-   

[3/3] flink git commit: [FLINK-7251] [types] Remove the flink-java8 module and improve lambda type extraction

2018-07-19 Thread twalthr
[FLINK-7251] [types] Remove the flink-java8 module and improve lambda type 
extraction

This commit removes the flink-java8 module and merges some tests into 
flink-core/flink-runtime. It ensures to have the possibility for passing 
explicit type information in DataStream API as a fallback. Since the tycho 
compiler approach was very hacky and seems not to work anymore, this commit 
also removes all references in the docs and quickstarts.

This closes #6120.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/4862101d
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/4862101d
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/4862101d

Branch: refs/heads/release-1.6
Commit: 4862101ddcc5ede17fcd0a304583e91e2f266044
Parents: 402745e
Author: Timo Walther 
Authored: Mon Jun 4 12:49:43 2018 +0200
Committer: Timo Walther 
Committed: Fri Jul 20 08:09:15 2018 +0200

--
 docs/dev/java8.md   | 198 --
 docs/dev/java_lambdas.md| 138 +++
 .../api/java/typeutils/TypeExtractionUtils.java |  23 ++
 .../flink/api/java/typeutils/TypeExtractor.java | 290 ++
 .../java/typeutils/LambdaExtractionTest.java| 340 
 .../examples/java/relational/TPCHQuery10.java   |  28 +-
 .../examples/java/wordcount/WordCount.java  |   3 +-
 .../streaming/examples/wordcount/WordCount.java |  14 +-
 flink-java8/pom.xml | 225 ---
 .../examples/java8/relational/TPCHQuery10.java  | 212 --
 .../examples/java8/wordcount/WordCount.java | 124 --
 .../examples/java8/wordcount/WordCount.java | 124 --
 .../java/type/lambdas/LambdaExtractionTest.java | 383 ---
 .../org/apache/flink/cep/CEPLambdaTest.java | 104 -
 .../runtime/util/JarFileCreatorLambdaTest.java  | 113 --
 .../util/jartestprogram/FilterLambda1.java  |  41 --
 .../util/jartestprogram/FilterLambda2.java  |  39 --
 .../util/jartestprogram/FilterLambda3.java  |  39 --
 .../util/jartestprogram/FilterLambda4.java  |  38 --
 .../util/jartestprogram/UtilFunction.java   |  30 --
 .../jartestprogram/UtilFunctionWrapper.java |  35 --
 .../runtime/util/jartestprogram/WordFilter.java |  28 --
 .../operators/lambdas/AllGroupReduceITCase.java |  59 ---
 .../java/operators/lambdas/CoGroupITCase.java   |  74 
 .../api/java/operators/lambdas/CrossITCase.java |  73 
 .../java/operators/lambdas/FilterITCase.java|  91 -
 .../java/operators/lambdas/FlatJoinITCase.java  |  68 
 .../java/operators/lambdas/FlatMapITCase.java   |  56 ---
 .../operators/lambdas/GroupReduceITCase.java|  69 
 .../api/java/operators/lambdas/JoinITCase.java  |  69 
 .../api/java/operators/lambdas/MapITCase.java   |  74 
 .../java/operators/lambdas/ReduceITCase.java| 109 --
 .../src/test/resources/log4j-test.properties|  19 -
 .../org/apache/flink/cep/PatternStream.java |  16 +-
 .../java/org/apache/flink/cep/CEPITCase.java|  35 +-
 .../flink/graph/asm/translate/Translate.java|   4 -
 .../main/resources/archetype-resources/pom.xml  |  19 -
 .../flink/runtime/util/JarFileCreatorTest.java  |  91 -
 .../jartestprogram/FilterWithIndirection.java   |  38 ++
 .../util/jartestprogram/FilterWithLambda.java   |  40 ++
 .../FilterWithMethodReference.java  |  41 ++
 .../util/jartestprogram/UtilFunction.java   |  32 ++
 .../jartestprogram/UtilFunctionWrapper.java |  37 ++
 .../runtime/util/jartestprogram/WordFilter.java |  29 ++
 .../api/datastream/AllWindowedStream.java   |   2 -
 .../api/datastream/AsyncDataStream.java |   1 -
 .../datastream/BroadcastConnectedStream.java|   4 -
 .../api/datastream/CoGroupedStreams.java|  40 +-
 .../api/datastream/ConnectedStreams.java|  28 +-
 .../streaming/api/datastream/DataStream.java|  16 +-
 .../api/datastream/IterativeStream.java |   4 +
 .../streaming/api/datastream/JoinedStreams.java |  43 ++-
 .../streaming/api/datastream/KeyedStream.java   |  65 ++--
 .../api/datastream/WindowedStream.java  |   2 -
 .../flink/streaming/api/TypeFillTest.java   |  65 +++-
 .../flink/test/operators/CoGroupITCase.java |  33 ++
 .../apache/flink/test/operators/MapITCase.java  |  34 ++
 .../flink/test/operators/ReduceITCase.java  |   6 +-
 pom.xml |   1 -
 59 files changed, 1223 insertions(+), 2833 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/4862101d/docs/dev/java8.md
--
diff --git a/docs/dev/java8.md b/docs/dev/java8.md
deleted file mode 100644
index 8e7e643..000
--- a/docs/dev/java8.md
+++ /dev/null
@@ -1,198 +0,0 @@

-title: "

[1/3] flink git commit: [FLINK-7251] [types] Remove the flink-java8 module and improve lambda type extraction

2018-07-19 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 402745eba -> 4862101dd


http://git-wip-us.apache.org/repos/asf/flink/blob/4862101d/flink-runtime/src/test/java/org/apache/flink/runtime/util/JarFileCreatorTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/util/JarFileCreatorTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/util/JarFileCreatorTest.java
index 8f8016e..f5d07de 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/util/JarFileCreatorTest.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/util/JarFileCreatorTest.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,9 +16,11 @@
  * limitations under the License.
  */
 
-
 package org.apache.flink.runtime.util;
 
+import org.apache.flink.runtime.util.jartestprogram.FilterWithIndirection;
+import org.apache.flink.runtime.util.jartestprogram.FilterWithLambda;
+import org.apache.flink.runtime.util.jartestprogram.FilterWithMethodReference;
 import 
org.apache.flink.runtime.util.jartestprogram.WordCountWithAnonymousClass;
 import org.apache.flink.runtime.util.jartestprogram.WordCountWithExternalClass;
 import 
org.apache.flink.runtime.util.jartestprogram.WordCountWithExternalClass2;
@@ -28,6 +30,7 @@ import 
org.apache.flink.runtime.util.jartestprogram.AnonymousInNonStaticMethod;
 import 
org.apache.flink.runtime.util.jartestprogram.AnonymousInNonStaticMethod2;
 import org.apache.flink.runtime.util.jartestprogram.NestedAnonymousInnerClass;
 import org.junit.Assert;
+import org.junit.Ignore;
 import org.junit.Test;
 import java.io.File;
 import java.io.FileInputStream;
@@ -37,7 +40,6 @@ import java.util.Set;
 import java.util.jar.JarInputStream;
 import java.util.zip.ZipEntry;
 
-
 public class JarFileCreatorTest {
 
//anonymous inner class in static method accessing a local variable in 
its closure.
@@ -48,14 +50,14 @@ public class JarFileCreatorTest {
jfc.addClass(AnonymousInStaticMethod.class)
.createJarFile();
 
-   Set ans = new HashSet();
+   Set ans = new HashSet<>();

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInStaticMethod$1.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInStaticMethod$A.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInStaticMethod.class");
 
Assert.assertTrue("Jar file for Anonymous Inner Class is not 
correct", validate(ans, out));
 
-   out.delete();
+   Assert.assertTrue(out.delete());
}
 
//anonymous inner class in non static method accessing a local variable 
in its closure.
@@ -66,14 +68,14 @@ public class JarFileCreatorTest {
jfc.addClass(AnonymousInNonStaticMethod.class)
.createJarFile();
 
-   Set ans = new HashSet();
+   Set ans = new HashSet<>();

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod$1.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod$A.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod.class");
 
Assert.assertTrue("Jar file for Anonymous Inner Class is not 
correct", validate(ans, out));
 
-   out.delete();
+   Assert.assertTrue(out.delete());
}
 
//anonymous inner class in non static method accessing a field of its 
enclosing class.
@@ -84,14 +86,14 @@ public class JarFileCreatorTest {
jfc.addClass(AnonymousInNonStaticMethod2.class)
.createJarFile();
 
-   Set ans = new HashSet();
+   Set ans = new HashSet<>();

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod2$1.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod2$A.class");

ans.add("org/apache/flink/runtime/util/jartestprogram/AnonymousInNonStaticMethod2.class");
 
Assert.assertTrue("Jar file for Anonymous Inner Class is not 
correct", validate(ans, out));
 
-   out.delete();
+   Assert.assertTrue(out.delete());
}
 
//anonymous inner class in an anonymous inner class accessing a field 
of the outermost enclosing class.
@@ -102,7 +104,7 @@ public class JarFileCreatorTest {
jfc.addClass(NestedAnonymousInnerClass.class)
.createJarFile();
 
-   Set ans = new HashSet();
+   Set ans = new HashSet<>();

ans

[2/3] flink git commit: [FLINK-9852] [table] Expose descriptor-based sink creation and introduce update mode

2018-07-20 Thread twalthr
http://git-wip-us.apache.org/repos/asf/flink/blob/6fcc1e9a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalTableUtil.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalTableUtil.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalTableUtil.scala
new file mode 100644
index 000..ec57c5e
--- /dev/null
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalTableUtil.scala
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.catalog
+
+import org.apache.flink.table.api._
+import org.apache.flink.table.factories._
+import org.apache.flink.table.plan.schema._
+import org.apache.flink.table.plan.stats.FlinkStatistic
+import org.apache.flink.table.sinks.{BatchTableSink, StreamTableSink}
+import org.apache.flink.table.sources.{BatchTableSource, StreamTableSource}
+import org.apache.flink.table.util.Logging
+
+
+/**
+  * The utility class is used to convert [[ExternalCatalogTable]] to 
[[TableSourceSinkTable]].
+  *
+  * It uses [[TableFactoryService]] for discovering.
+  */
+object ExternalTableUtil extends Logging {
+
+  /**
+* Converts an [[ExternalCatalogTable]] instance to a [[TableSourceTable]] 
instance
+*
+* @param externalTable the [[ExternalCatalogTable]] instance which to 
convert
+* @return converted [[TableSourceTable]] instance from the input catalog 
table
+*/
+  def fromExternalCatalogTable[T1, T2](
+  tableEnv: TableEnvironment,
+  externalTable: ExternalCatalogTable)
+: TableSourceSinkTable[T1, T2] = {
+
+val statistics = new FlinkStatistic(externalTable.getTableStats)
+
+val source: Option[TableSourceTable[T1]] = if 
(externalTable.isTableSource) {
+  Some(createTableSource(tableEnv, externalTable, statistics))
+} else {
+  None
+}
+
+val sink: Option[TableSinkTable[T2]] = if (externalTable.isTableSink) {
+  Some(createTableSink(tableEnv, externalTable, statistics))
+} else {
+  None
+}
+
+new TableSourceSinkTable[T1, T2](source, sink)
+  }
+
+  private def createTableSource[T](
+  tableEnv: TableEnvironment,
+  externalTable: ExternalCatalogTable,
+  statistics: FlinkStatistic)
+: TableSourceTable[T] = tableEnv match {
+
+case _: BatchTableEnvironment if externalTable.isBatchTable =>
+  val source = TableFactoryUtil.findAndCreateTableSource(tableEnv, 
externalTable)
+  new BatchTableSourceTable[T](source.asInstanceOf[BatchTableSource[T]], 
statistics)
+
+case _: StreamTableEnvironment if externalTable.isStreamTable =>
+  val source = TableFactoryUtil.findAndCreateTableSource(tableEnv, 
externalTable)
+  new StreamTableSourceTable[T](source.asInstanceOf[StreamTableSource[T]], 
statistics)
+
+case _ =>
+  throw new ValidationException(
+"External catalog table does not support the current environment for a 
table source.")
+  }
+
+  private def createTableSink[T](
+  tableEnv: TableEnvironment,
+  externalTable: ExternalCatalogTable,
+  statistics: FlinkStatistic)
+: TableSinkTable[T] = tableEnv match {
+
+case _: BatchTableEnvironment if externalTable.isBatchTable =>
+  val sink = TableFactoryUtil.findAndCreateTableSink(tableEnv, 
externalTable)
+  new TableSinkTable[T](sink.asInstanceOf[BatchTableSink[T]], statistics)
+
+case _: StreamTableEnvironment if externalTable.isStreamTable =>
+  val sink = TableFactoryUtil.findAndCreateTableSink(tableEnv, 
externalTable)
+  new TableSinkTable[T](sink.asInstanceOf[StreamTableSink[T]], statistics)
+
+case _ =>
+  throw new ValidationException(
+"External catalog table does not support the current environment for a 
table sink.")
+  }
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/6fcc1e9a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/BatchTableDescriptor.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apach

[3/3] flink git commit: [FLINK-9852] [table] Expose descriptor-based sink creation and introduce update mode

2018-07-20 Thread twalthr
[FLINK-9852] [table] Expose descriptor-based sink creation and introduce update 
mode

This commit exposes the new unified sink creation through the table 
environments and the external catalog table. It introduce a new update-mode 
property in order to distinguish between append, retract, and upsert table 
sources and sinks. This commit refactors the top-level API classes a last time 
and adds more documentation. This commit completes the unified table 
sources/sinks story from an API point of view.

Brief change log:
- Introduction of TableEnvironment.connect() and corresponding API builder 
classes
- Introduction of property update-mode: and update of existing connectors
- External catalog support with proper source/sink discovery and API

This closes #6343.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/6fcc1e9a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/6fcc1e9a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/6fcc1e9a

Branch: refs/heads/master
Commit: 6fcc1e9a868ff3ce2bd0376b6b3493a08c7b604c
Parents: ddba1b6
Author: Timo Walther 
Authored: Fri Jul 20 08:47:53 2018 +0200
Committer: Timo Walther 
Committed: Fri Jul 20 09:29:55 2018 +0200

--
 .../kafka/Kafka010AvroTableSource.java  |   6 +-
 .../kafka/Kafka010JsonTableSource.java  |   6 +-
 .../kafka/Kafka011AvroTableSource.java  |   6 +-
 .../kafka/Kafka011JsonTableSource.java  |   6 +-
 .../kafka/Kafka08AvroTableSource.java   |   6 +-
 .../kafka/Kafka08JsonTableSource.java   |   6 +-
 .../kafka/Kafka09AvroTableSource.java   |   6 +-
 .../kafka/Kafka09JsonTableSource.java   |   6 +-
 .../connectors/kafka/KafkaAvroTableSource.java  |   6 +-
 .../connectors/kafka/KafkaJsonTableSource.java  |   4 +-
 .../connectors/kafka/KafkaTableSource.java  |   4 +-
 .../kafka/KafkaTableSourceFactory.java  |   3 +
 .../KafkaJsonTableSourceFactoryTestBase.java|  11 +-
 .../kafka/KafkaTableSourceFactoryTestBase.java  |  11 +-
 .../apache/flink/table/client/config/Sink.java  |   4 +-
 .../flink/table/client/config/Source.java   |   4 +-
 .../flink/table/client/config/SourceSink.java   |   2 +-
 .../client/gateway/local/EnvironmentTest.java   |  14 +-
 .../gateway/local/ExecutionContextTest.java |   6 +-
 .../gateway/local/LocalExecutorITCase.java  |  12 +-
 .../gateway/utils/TestTableSinkFactory.java |   3 +
 .../gateway/utils/TestTableSourceFactory.java   |   3 +
 .../resources/test-sql-client-defaults.yaml |   3 +
 .../test/resources/test-sql-client-factory.yaml |   1 +
 ...rg.apache.flink.table.factories.TableFactory |   6 +-
 .../flink/table/api/BatchTableEnvironment.scala |  36 +-
 .../table/api/StreamTableEnvironment.scala  |  26 +-
 .../flink/table/api/TableEnvironment.scala  |  32 +-
 .../table/catalog/ExternalCatalogSchema.scala   |   2 +-
 .../table/catalog/ExternalCatalogTable.scala| 335 +--
 .../table/catalog/ExternalTableSourceUtil.scala |  70 
 .../flink/table/catalog/ExternalTableUtil.scala | 102 ++
 .../descriptors/BatchTableDescriptor.scala  |  31 ++
 .../BatchTableSourceDescriptor.scala|  87 -
 .../descriptors/ConnectTableDescriptor.scala| 108 ++
 .../flink/table/descriptors/Descriptor.scala|   9 +-
 .../descriptors/DescriptorProperties.scala  |  10 +
 .../descriptors/RegistrableDescriptor.scala |  49 +++
 .../table/descriptors/SchematicDescriptor.scala |  35 ++
 .../descriptors/StreamTableDescriptor.scala | 101 ++
 .../StreamTableDescriptorValidator.scala|  48 +++
 .../StreamTableSourceDescriptor.scala   |  90 -
 .../descriptors/StreamableDescriptor.scala  |  67 
 .../table/descriptors/TableDescriptor.scala |  20 +-
 .../descriptors/TableDescriptorValidator.scala  |  29 --
 .../table/descriptors/TableSinkDescriptor.scala |  32 --
 .../descriptors/TableSourceDescriptor.scala |  57 
 .../flink/table/factories/TableFactory.scala|   6 +
 .../table/factories/TableFactoryService.scala   |   8 +-
 .../table/factories/TableFactoryUtil.scala  |  82 +
 .../table/factories/TableFormatFactory.scala|   2 +
 .../table/sinks/CsvAppendTableSinkFactory.scala |  45 +++
 .../table/sinks/CsvBatchTableSinkFactory.scala  |  38 +++
 .../flink/table/sinks/CsvTableSinkFactory.scala | 112 ---
 .../table/sinks/CsvTableSinkFactoryBase.scala   |  96 ++
 .../sources/CsvAppendTableSourceFactory.scala   |  45 +++
 .../sources/CsvBatchTableSourceFactory.scala|  38 +++
 .../table/sources/CsvTableSourceFactory.scala   | 139 
 .../sources/CsvTableSourceFactoryBase.scala | 123 +++
 .../flink/table/api/ExternalCatalogTest.scala   |  20 +-
 .../catalog/ExternalCatalogSchemaTest.scala |   2 +-
 .../catalog/InMemoryExternalCatalogTest.scala   |   8 +-
 .../table/des

[1/3] flink git commit: [FLINK-9852] [table] Expose descriptor-based sink creation and introduce update mode

2018-07-20 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/master ddba1b69f -> 6fcc1e9a8


http://git-wip-us.apache.org/repos/asf/flink/blob/6fcc1e9a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala
--
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala
index 7a98b0b..3f6426d 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala
@@ -18,6 +18,7 @@
 
 package org.apache.flink.table.descriptors
 
+import 
org.apache.flink.table.descriptors.StreamTableDescriptorValidator.{UPDATE_MODE, 
UPDATE_MODE_VALUE_APPEND, UPDATE_MODE_VALUE_RETRACT, UPDATE_MODE_VALUE_UPSERT}
 import org.apache.flink.util.Preconditions
 import org.junit.Assert.assertEquals
 import org.junit.Test
@@ -84,18 +85,44 @@ abstract class DescriptorTestBase {
   }
 }
 
-class TestTableSourceDescriptor(connector: ConnectorDescriptor)
-  extends TableSourceDescriptor {
+class TestTableDescriptor(connector: ConnectorDescriptor)
+  extends TableDescriptor
+  with SchematicDescriptor[TestTableDescriptor]
+  with StreamableDescriptor[TestTableDescriptor] {
 
-  this.connectorDescriptor = Some(connector)
+  private var formatDescriptor: Option[FormatDescriptor] = None
+  private var schemaDescriptor: Option[Schema] = None
+  private var updateMode: Option[String] = None
 
-  def addFormat(format: FormatDescriptor): TestTableSourceDescriptor = {
+  override private[flink] def addProperties(properties: DescriptorProperties): 
Unit = {
+connector.addProperties(properties)
+formatDescriptor.foreach(_.addProperties(properties))
+schemaDescriptor.foreach(_.addProperties(properties))
+updateMode.foreach(mode => properties.putString(UPDATE_MODE, mode))
+  }
+
+  override def withFormat(format: FormatDescriptor): TestTableDescriptor = {
 this.formatDescriptor = Some(format)
 this
   }
 
-  def addSchema(schema: Schema): TestTableSourceDescriptor = {
+  override def withSchema(schema: Schema): TestTableDescriptor = {
 this.schemaDescriptor = Some(schema)
 this
   }
+
+  override def inAppendMode(): TestTableDescriptor = {
+updateMode = Some(UPDATE_MODE_VALUE_APPEND)
+this
+  }
+
+  override def inRetractMode(): TestTableDescriptor = {
+updateMode = Some(UPDATE_MODE_VALUE_RETRACT)
+this
+  }
+
+  override def inUpsertMode(): TestTableDescriptor = {
+updateMode = Some(UPDATE_MODE_VALUE_UPSERT)
+this
+  }
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/6fcc1e9a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/TableDescriptorTest.scala
--
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/TableDescriptorTest.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/TableDescriptorTest.scala
new file mode 100644
index 000..ccac317
--- /dev/null
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/TableDescriptorTest.scala
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.descriptors
+
+import org.apache.flink.table.api.Types
+import org.apache.flink.table.utils.TableTestBase
+import org.junit.Assert.assertEquals
+import org.junit.Test
+
+import scala.collection.JavaConverters._
+
+/**
+  * Tests for [[TableDescriptor]].
+  */
+class TableDescriptorTest extends TableTestBase {
+
+  @Test
+  def testStreamTableSourceDescriptor(): Unit = {
+testTableSourceDescriptor(true)
+  }
+
+  @Test
+  def testBatchTableSourceDescriptor(): Unit = {
+testTableSourceDescriptor(false)
+  }
+
+  private def testTableSourceDescriptor(isStreaming: Boolean): Unit = {
+
+val schema = Schema()
+  .field("myfield", Types.STRING)
+  .field("myfield2", Types.

[2/3] flink git commit: [FLINK-9852] [table] Expose descriptor-based sink creation and introduce update mode

2018-07-20 Thread twalthr
http://git-wip-us.apache.org/repos/asf/flink/blob/7bb07e4e/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalTableUtil.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalTableUtil.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalTableUtil.scala
new file mode 100644
index 000..ec57c5e
--- /dev/null
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalTableUtil.scala
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.catalog
+
+import org.apache.flink.table.api._
+import org.apache.flink.table.factories._
+import org.apache.flink.table.plan.schema._
+import org.apache.flink.table.plan.stats.FlinkStatistic
+import org.apache.flink.table.sinks.{BatchTableSink, StreamTableSink}
+import org.apache.flink.table.sources.{BatchTableSource, StreamTableSource}
+import org.apache.flink.table.util.Logging
+
+
+/**
+  * The utility class is used to convert [[ExternalCatalogTable]] to 
[[TableSourceSinkTable]].
+  *
+  * It uses [[TableFactoryService]] for discovering.
+  */
+object ExternalTableUtil extends Logging {
+
+  /**
+* Converts an [[ExternalCatalogTable]] instance to a [[TableSourceTable]] 
instance
+*
+* @param externalTable the [[ExternalCatalogTable]] instance which to 
convert
+* @return converted [[TableSourceTable]] instance from the input catalog 
table
+*/
+  def fromExternalCatalogTable[T1, T2](
+  tableEnv: TableEnvironment,
+  externalTable: ExternalCatalogTable)
+: TableSourceSinkTable[T1, T2] = {
+
+val statistics = new FlinkStatistic(externalTable.getTableStats)
+
+val source: Option[TableSourceTable[T1]] = if 
(externalTable.isTableSource) {
+  Some(createTableSource(tableEnv, externalTable, statistics))
+} else {
+  None
+}
+
+val sink: Option[TableSinkTable[T2]] = if (externalTable.isTableSink) {
+  Some(createTableSink(tableEnv, externalTable, statistics))
+} else {
+  None
+}
+
+new TableSourceSinkTable[T1, T2](source, sink)
+  }
+
+  private def createTableSource[T](
+  tableEnv: TableEnvironment,
+  externalTable: ExternalCatalogTable,
+  statistics: FlinkStatistic)
+: TableSourceTable[T] = tableEnv match {
+
+case _: BatchTableEnvironment if externalTable.isBatchTable =>
+  val source = TableFactoryUtil.findAndCreateTableSource(tableEnv, 
externalTable)
+  new BatchTableSourceTable[T](source.asInstanceOf[BatchTableSource[T]], 
statistics)
+
+case _: StreamTableEnvironment if externalTable.isStreamTable =>
+  val source = TableFactoryUtil.findAndCreateTableSource(tableEnv, 
externalTable)
+  new StreamTableSourceTable[T](source.asInstanceOf[StreamTableSource[T]], 
statistics)
+
+case _ =>
+  throw new ValidationException(
+"External catalog table does not support the current environment for a 
table source.")
+  }
+
+  private def createTableSink[T](
+  tableEnv: TableEnvironment,
+  externalTable: ExternalCatalogTable,
+  statistics: FlinkStatistic)
+: TableSinkTable[T] = tableEnv match {
+
+case _: BatchTableEnvironment if externalTable.isBatchTable =>
+  val sink = TableFactoryUtil.findAndCreateTableSink(tableEnv, 
externalTable)
+  new TableSinkTable[T](sink.asInstanceOf[BatchTableSink[T]], statistics)
+
+case _: StreamTableEnvironment if externalTable.isStreamTable =>
+  val sink = TableFactoryUtil.findAndCreateTableSink(tableEnv, 
externalTable)
+  new TableSinkTable[T](sink.asInstanceOf[StreamTableSink[T]], statistics)
+
+case _ =>
+  throw new ValidationException(
+"External catalog table does not support the current environment for a 
table sink.")
+  }
+}

http://git-wip-us.apache.org/repos/asf/flink/blob/7bb07e4e/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/BatchTableDescriptor.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apach

[1/3] flink git commit: [FLINK-9852] [table] Expose descriptor-based sink creation and introduce update mode

2018-07-20 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 4862101dd -> 7bb07e4e7


http://git-wip-us.apache.org/repos/asf/flink/blob/7bb07e4e/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala
--
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala
index 7a98b0b..3f6426d 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala
@@ -18,6 +18,7 @@
 
 package org.apache.flink.table.descriptors
 
+import 
org.apache.flink.table.descriptors.StreamTableDescriptorValidator.{UPDATE_MODE, 
UPDATE_MODE_VALUE_APPEND, UPDATE_MODE_VALUE_RETRACT, UPDATE_MODE_VALUE_UPSERT}
 import org.apache.flink.util.Preconditions
 import org.junit.Assert.assertEquals
 import org.junit.Test
@@ -84,18 +85,44 @@ abstract class DescriptorTestBase {
   }
 }
 
-class TestTableSourceDescriptor(connector: ConnectorDescriptor)
-  extends TableSourceDescriptor {
+class TestTableDescriptor(connector: ConnectorDescriptor)
+  extends TableDescriptor
+  with SchematicDescriptor[TestTableDescriptor]
+  with StreamableDescriptor[TestTableDescriptor] {
 
-  this.connectorDescriptor = Some(connector)
+  private var formatDescriptor: Option[FormatDescriptor] = None
+  private var schemaDescriptor: Option[Schema] = None
+  private var updateMode: Option[String] = None
 
-  def addFormat(format: FormatDescriptor): TestTableSourceDescriptor = {
+  override private[flink] def addProperties(properties: DescriptorProperties): 
Unit = {
+connector.addProperties(properties)
+formatDescriptor.foreach(_.addProperties(properties))
+schemaDescriptor.foreach(_.addProperties(properties))
+updateMode.foreach(mode => properties.putString(UPDATE_MODE, mode))
+  }
+
+  override def withFormat(format: FormatDescriptor): TestTableDescriptor = {
 this.formatDescriptor = Some(format)
 this
   }
 
-  def addSchema(schema: Schema): TestTableSourceDescriptor = {
+  override def withSchema(schema: Schema): TestTableDescriptor = {
 this.schemaDescriptor = Some(schema)
 this
   }
+
+  override def inAppendMode(): TestTableDescriptor = {
+updateMode = Some(UPDATE_MODE_VALUE_APPEND)
+this
+  }
+
+  override def inRetractMode(): TestTableDescriptor = {
+updateMode = Some(UPDATE_MODE_VALUE_RETRACT)
+this
+  }
+
+  override def inUpsertMode(): TestTableDescriptor = {
+updateMode = Some(UPDATE_MODE_VALUE_UPSERT)
+this
+  }
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/7bb07e4e/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/TableDescriptorTest.scala
--
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/TableDescriptorTest.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/TableDescriptorTest.scala
new file mode 100644
index 000..ccac317
--- /dev/null
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/TableDescriptorTest.scala
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.descriptors
+
+import org.apache.flink.table.api.Types
+import org.apache.flink.table.utils.TableTestBase
+import org.junit.Assert.assertEquals
+import org.junit.Test
+
+import scala.collection.JavaConverters._
+
+/**
+  * Tests for [[TableDescriptor]].
+  */
+class TableDescriptorTest extends TableTestBase {
+
+  @Test
+  def testStreamTableSourceDescriptor(): Unit = {
+testTableSourceDescriptor(true)
+  }
+
+  @Test
+  def testBatchTableSourceDescriptor(): Unit = {
+testTableSourceDescriptor(false)
+  }
+
+  private def testTableSourceDescriptor(isStreaming: Boolean): Unit = {
+
+val schema = Schema()
+  .field("myfield", Types.STRING)
+  .field("myfield2", T

[3/3] flink git commit: [FLINK-9852] [table] Expose descriptor-based sink creation and introduce update mode

2018-07-20 Thread twalthr
[FLINK-9852] [table] Expose descriptor-based sink creation and introduce update 
mode

This commit exposes the new unified sink creation through the table 
environments and the external catalog table. It introduce a new update-mode 
property in order to distinguish between append, retract, and upsert table 
sources and sinks. This commit refactors the top-level API classes a last time 
and adds more documentation. This commit completes the unified table 
sources/sinks story from an API point of view.

Brief change log:
- Introduction of TableEnvironment.connect() and corresponding API builder 
classes
- Introduction of property update-mode: and update of existing connectors
- External catalog support with proper source/sink discovery and API

This closes #6343.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/7bb07e4e
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/7bb07e4e
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/7bb07e4e

Branch: refs/heads/release-1.6
Commit: 7bb07e4e74ed7167cd98599af1a851afa1d77252
Parents: 4862101
Author: Timo Walther 
Authored: Fri Jul 20 08:47:53 2018 +0200
Committer: Timo Walther 
Committed: Fri Jul 20 09:33:48 2018 +0200

--
 .../kafka/Kafka010AvroTableSource.java  |   6 +-
 .../kafka/Kafka010JsonTableSource.java  |   6 +-
 .../kafka/Kafka011AvroTableSource.java  |   6 +-
 .../kafka/Kafka011JsonTableSource.java  |   6 +-
 .../kafka/Kafka08AvroTableSource.java   |   6 +-
 .../kafka/Kafka08JsonTableSource.java   |   6 +-
 .../kafka/Kafka09AvroTableSource.java   |   6 +-
 .../kafka/Kafka09JsonTableSource.java   |   6 +-
 .../connectors/kafka/KafkaAvroTableSource.java  |   6 +-
 .../connectors/kafka/KafkaJsonTableSource.java  |   4 +-
 .../connectors/kafka/KafkaTableSource.java  |   4 +-
 .../kafka/KafkaTableSourceFactory.java  |   3 +
 .../KafkaJsonTableSourceFactoryTestBase.java|  11 +-
 .../kafka/KafkaTableSourceFactoryTestBase.java  |  11 +-
 .../apache/flink/table/client/config/Sink.java  |   4 +-
 .../flink/table/client/config/Source.java   |   4 +-
 .../flink/table/client/config/SourceSink.java   |   2 +-
 .../client/gateway/local/EnvironmentTest.java   |  14 +-
 .../gateway/local/ExecutionContextTest.java |   6 +-
 .../gateway/local/LocalExecutorITCase.java  |  12 +-
 .../gateway/utils/TestTableSinkFactory.java |   3 +
 .../gateway/utils/TestTableSourceFactory.java   |   3 +
 .../resources/test-sql-client-defaults.yaml |   3 +
 .../test/resources/test-sql-client-factory.yaml |   1 +
 ...rg.apache.flink.table.factories.TableFactory |   6 +-
 .../flink/table/api/BatchTableEnvironment.scala |  36 +-
 .../table/api/StreamTableEnvironment.scala  |  26 +-
 .../flink/table/api/TableEnvironment.scala  |  32 +-
 .../table/catalog/ExternalCatalogSchema.scala   |   2 +-
 .../table/catalog/ExternalCatalogTable.scala| 335 +--
 .../table/catalog/ExternalTableSourceUtil.scala |  70 
 .../flink/table/catalog/ExternalTableUtil.scala | 102 ++
 .../descriptors/BatchTableDescriptor.scala  |  31 ++
 .../BatchTableSourceDescriptor.scala|  87 -
 .../descriptors/ConnectTableDescriptor.scala| 108 ++
 .../flink/table/descriptors/Descriptor.scala|   9 +-
 .../descriptors/DescriptorProperties.scala  |  10 +
 .../descriptors/RegistrableDescriptor.scala |  49 +++
 .../table/descriptors/SchematicDescriptor.scala |  35 ++
 .../descriptors/StreamTableDescriptor.scala | 101 ++
 .../StreamTableDescriptorValidator.scala|  48 +++
 .../StreamTableSourceDescriptor.scala   |  90 -
 .../descriptors/StreamableDescriptor.scala  |  67 
 .../table/descriptors/TableDescriptor.scala |  20 +-
 .../descriptors/TableDescriptorValidator.scala  |  29 --
 .../table/descriptors/TableSinkDescriptor.scala |  32 --
 .../descriptors/TableSourceDescriptor.scala |  57 
 .../flink/table/factories/TableFactory.scala|   6 +
 .../table/factories/TableFactoryService.scala   |   8 +-
 .../table/factories/TableFactoryUtil.scala  |  82 +
 .../table/factories/TableFormatFactory.scala|   2 +
 .../table/sinks/CsvAppendTableSinkFactory.scala |  45 +++
 .../table/sinks/CsvBatchTableSinkFactory.scala  |  38 +++
 .../flink/table/sinks/CsvTableSinkFactory.scala | 112 ---
 .../table/sinks/CsvTableSinkFactoryBase.scala   |  96 ++
 .../sources/CsvAppendTableSourceFactory.scala   |  45 +++
 .../sources/CsvBatchTableSourceFactory.scala|  38 +++
 .../table/sources/CsvTableSourceFactory.scala   | 139 
 .../sources/CsvTableSourceFactoryBase.scala | 123 +++
 .../flink/table/api/ExternalCatalogTest.scala   |  20 +-
 .../catalog/ExternalCatalogSchemaTest.scala |   2 +-
 .../catalog/InMemoryExternalCatalogTest.scala   |   8 +-
 .../tabl

flink git commit: [FLINK-9886] [sql-client] Build SQL jars with every build

2018-07-20 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/master 6fcc1e9a8 -> 46334e2f3


[FLINK-9886] [sql-client] Build SQL jars with every build

This closes #6366.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/46334e2f
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/46334e2f
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/46334e2f

Branch: refs/heads/master
Commit: 46334e2f39253cece08357269a0f0392c8bd5a9c
Parents: 6fcc1e9
Author: Timo Walther 
Authored: Wed Jul 18 13:30:28 2018 +0200
Committer: Timo Walther 
Committed: Fri Jul 20 09:53:23 2018 +0200

--
 flink-connectors/flink-connector-kafka-0.10/pom.xml | 6 +++---
 flink-connectors/flink-connector-kafka-0.11/pom.xml | 6 +++---
 flink-connectors/flink-connector-kafka-0.9/pom.xml  | 6 +++---
 flink-formats/flink-avro/pom.xml| 9 +++--
 flink-formats/flink-json/pom.xml| 6 +++---
 5 files changed, 19 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/46334e2f/flink-connectors/flink-connector-kafka-0.10/pom.xml
--
diff --git a/flink-connectors/flink-connector-kafka-0.10/pom.xml 
b/flink-connectors/flink-connector-kafka-0.10/pom.xml
index f054843..135dc59 100644
--- a/flink-connectors/flink-connector-kafka-0.10/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.10/pom.xml
@@ -203,12 +203,12 @@ under the License.

 

+   

-   
-   release
+   sql-jars


-   release
+   !skipSqlJars




http://git-wip-us.apache.org/repos/asf/flink/blob/46334e2f/flink-connectors/flink-connector-kafka-0.11/pom.xml
--
diff --git a/flink-connectors/flink-connector-kafka-0.11/pom.xml 
b/flink-connectors/flink-connector-kafka-0.11/pom.xml
index 6de0496..bf04aeb 100644
--- a/flink-connectors/flink-connector-kafka-0.11/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.11/pom.xml
@@ -212,12 +212,12 @@ under the License.

 

+   

-   
-   release
+   sql-jars


-   release
+   !skipSqlJars




http://git-wip-us.apache.org/repos/asf/flink/blob/46334e2f/flink-connectors/flink-connector-kafka-0.9/pom.xml
--
diff --git a/flink-connectors/flink-connector-kafka-0.9/pom.xml 
b/flink-connectors/flink-connector-kafka-0.9/pom.xml
index f8d4a89..3e0aa46 100644
--- a/flink-connectors/flink-connector-kafka-0.9/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.9/pom.xml
@@ -191,12 +191,12 @@ under the License.

 

+   

-   
-   release
+   sql-jars


-   release
+   !skipSqlJars




http://git-wip-us.apache.org/repos/asf/flink/blob/46334e2f/flink-formats/flink-avro/pom.xml
--
diff --git a/flink-formats/flink-avro/pom.xml b/flink-formats/flink-avro/pom.xml
index 9461719..9d844c9 100644
--- a/flink-formats/flink-avro/pom.xml
+++ b/flink-formats/flink-avro/pom.xml
@@ -109,9 +109,14 @@ under the License.

 

+   

-   
-   release
+   sql-jars
+   
+   
+   !skipSqlJars
+   
+   




http://git-wip-us.apache.org/repos/asf/flink/blob/46334e2f/flink-formats/flink-json/pom.xml
--
diff --git a/flink-formats/flink-json/pom.xml b/flink-formats/flink-json/pom.xml
index fbe0136..0cb3f8d 100644
--- a/flink-formats/flink-json/pom.xml
+++ b/flink-formats/flink-json/pom.xml
@@ -86,12 +86,12 @@ under the License.

 
   

flink git commit: [FLINK-9886] [sql-client] Build SQL jars with every build

2018-07-20 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 7bb07e4e7 -> 36ae5cd3c


[FLINK-9886] [sql-client] Build SQL jars with every build

This closes #6366.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/36ae5cd3
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/36ae5cd3
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/36ae5cd3

Branch: refs/heads/release-1.6
Commit: 36ae5cd3ceb1a52b73ac8bfb845a15d361c9f390
Parents: 7bb07e4
Author: Timo Walther 
Authored: Wed Jul 18 13:30:28 2018 +0200
Committer: Timo Walther 
Committed: Fri Jul 20 09:55:33 2018 +0200

--
 flink-connectors/flink-connector-kafka-0.10/pom.xml | 6 +++---
 flink-connectors/flink-connector-kafka-0.11/pom.xml | 6 +++---
 flink-connectors/flink-connector-kafka-0.9/pom.xml  | 6 +++---
 flink-formats/flink-avro/pom.xml| 9 +++--
 flink-formats/flink-json/pom.xml| 6 +++---
 5 files changed, 19 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/36ae5cd3/flink-connectors/flink-connector-kafka-0.10/pom.xml
--
diff --git a/flink-connectors/flink-connector-kafka-0.10/pom.xml 
b/flink-connectors/flink-connector-kafka-0.10/pom.xml
index 2fb7a32..9b2353d 100644
--- a/flink-connectors/flink-connector-kafka-0.10/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.10/pom.xml
@@ -203,12 +203,12 @@ under the License.

 

+   

-   
-   release
+   sql-jars


-   release
+   !skipSqlJars




http://git-wip-us.apache.org/repos/asf/flink/blob/36ae5cd3/flink-connectors/flink-connector-kafka-0.11/pom.xml
--
diff --git a/flink-connectors/flink-connector-kafka-0.11/pom.xml 
b/flink-connectors/flink-connector-kafka-0.11/pom.xml
index aa60004..4ff1d96 100644
--- a/flink-connectors/flink-connector-kafka-0.11/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.11/pom.xml
@@ -212,12 +212,12 @@ under the License.

 

+   

-   
-   release
+   sql-jars


-   release
+   !skipSqlJars




http://git-wip-us.apache.org/repos/asf/flink/blob/36ae5cd3/flink-connectors/flink-connector-kafka-0.9/pom.xml
--
diff --git a/flink-connectors/flink-connector-kafka-0.9/pom.xml 
b/flink-connectors/flink-connector-kafka-0.9/pom.xml
index 20d2991..74ada66 100644
--- a/flink-connectors/flink-connector-kafka-0.9/pom.xml
+++ b/flink-connectors/flink-connector-kafka-0.9/pom.xml
@@ -191,12 +191,12 @@ under the License.

 

+   

-   
-   release
+   sql-jars


-   release
+   !skipSqlJars




http://git-wip-us.apache.org/repos/asf/flink/blob/36ae5cd3/flink-formats/flink-avro/pom.xml
--
diff --git a/flink-formats/flink-avro/pom.xml b/flink-formats/flink-avro/pom.xml
index dbf7fd0..f313978 100644
--- a/flink-formats/flink-avro/pom.xml
+++ b/flink-formats/flink-avro/pom.xml
@@ -109,9 +109,14 @@ under the License.

 

+   

-   
-   release
+   sql-jars
+   
+   
+   !skipSqlJars
+   
+   




http://git-wip-us.apache.org/repos/asf/flink/blob/36ae5cd3/flink-formats/flink-json/pom.xml
--
diff --git a/flink-formats/flink-json/pom.xml b/flink-formats/flink-json/pom.xml
index 4e31766..9884ff6 100644
--- a/flink-formats/flink-json/pom.xml
+++ b/flink-formats/flink-json/pom.xml
@@ -86,12 +86,12 @@ under the License.

flink git commit: [hotfix] [table] Deprecate SchemaValidator#deriveTableSinkSchema

2018-07-22 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/master 690ab2c31 -> 48791c1ea


[hotfix] [table] Deprecate SchemaValidator#deriveTableSinkSchema

The method combines two separate concepts of table schema and field
mapping. This should be split into two methods once we have support
for the corresponding interfaces (see FLINK-9870).


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/48791c1e
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/48791c1e
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/48791c1e

Branch: refs/heads/master
Commit: 48791c1ea538727a83ac39613a07c0e6214a8b1d
Parents: 690ab2c
Author: Timo Walther 
Authored: Fri Jul 20 12:59:41 2018 +0200
Committer: Timo Walther 
Committed: Mon Jul 23 05:46:49 2018 +0200

--
 .../client/gateway/utils/TestTableSourceFactory.java   |  2 +-
 .../flink/table/descriptors/SchemaValidator.scala  | 13 +
 .../flink/table/sinks/CsvTableSinkFactoryBase.scala|  2 +-
 .../flink/table/utils/InMemoryTableFactory.scala   |  2 +-
 4 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/48791c1e/flink-libraries/flink-sql-client/src/test/java/org/apache/flink/table/client/gateway/utils/TestTableSourceFactory.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/test/java/org/apache/flink/table/client/gateway/utils/TestTableSourceFactory.java
 
b/flink-libraries/flink-sql-client/src/test/java/org/apache/flink/table/client/gateway/utils/TestTableSourceFactory.java
index 81f00e5..b0b8848 100644
--- 
a/flink-libraries/flink-sql-client/src/test/java/org/apache/flink/table/client/gateway/utils/TestTableSourceFactory.java
+++ 
b/flink-libraries/flink-sql-client/src/test/java/org/apache/flink/table/client/gateway/utils/TestTableSourceFactory.java
@@ -83,7 +83,7 @@ public class TestTableSourceFactory implements 
StreamTableSourceFactory {
final Optional proctime = 
SchemaValidator.deriveProctimeAttribute(params);
final List rowtime = 
SchemaValidator.deriveRowtimeAttributes(params);
return new TestTableSource(
-   SchemaValidator.deriveTableSourceSchema(params),
+   params.getTableSchema(SCHEMA()),
properties.get(CONNECTOR_TEST_PROPERTY),
proctime.orElse(null),
rowtime);

http://git-wip-us.apache.org/repos/asf/flink/blob/48791c1e/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/SchemaValidator.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/SchemaValidator.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/SchemaValidator.scala
index ec83b3c..af2baba 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/SchemaValidator.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/SchemaValidator.scala
@@ -175,17 +175,14 @@ object SchemaValidator {
   }
 
   /**
-* Derives the table schema for a table source. A table source can directly 
use "name" and
-* "type" and needs no special handling for time attributes or aliasing.
-*/
-  def deriveTableSourceSchema(properties: DescriptorProperties): TableSchema = 
{
-properties.getTableSchema(SCHEMA)
-  }
-
-  /**
 * Derives the table schema for a table sink. A sink ignores a proctime 
attribute and
 * needs to track the origin of a rowtime field.
+*
+* @deprecated This method combines two separate concepts of table schema 
and field mapping.
+* This should be split into two methods once we have support 
for
+* the corresponding interfaces (see FLINK-9870).
 */
+  @deprecated
   def deriveTableSinkSchema(properties: DescriptorProperties): TableSchema = {
 val builder = TableSchema.builder()
 

http://git-wip-us.apache.org/repos/asf/flink/blob/48791c1e/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/CsvTableSinkFactoryBase.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/CsvTableSinkFactoryBase.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/CsvTableSinkFactoryBase.scala
index 6ceba4c..849d16c 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/CsvTableSinkFactoryBase.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/CsvTableSinkFactoryBase.scala
@@ -77,7 +77,7 @@ abst

flink git commit: [hotfix] [table] Deprecate SchemaValidator#deriveTableSinkSchema

2018-07-22 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 8b1cc1674 -> b6fb4f636


[hotfix] [table] Deprecate SchemaValidator#deriveTableSinkSchema

The method combines two separate concepts of table schema and field
mapping. This should be split into two methods once we have support
for the corresponding interfaces (see FLINK-9870).


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/b6fb4f63
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/b6fb4f63
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/b6fb4f63

Branch: refs/heads/release-1.6
Commit: b6fb4f636716be12445ff9facb76382cac1e1e56
Parents: 8b1cc16
Author: Timo Walther 
Authored: Fri Jul 20 12:59:41 2018 +0200
Committer: Timo Walther 
Committed: Mon Jul 23 06:03:30 2018 +0200

--
 .../client/gateway/utils/TestTableSourceFactory.java   |  2 +-
 .../flink/table/descriptors/SchemaValidator.scala  | 13 +
 .../flink/table/sinks/CsvTableSinkFactoryBase.scala|  2 +-
 .../flink/table/utils/InMemoryTableFactory.scala   |  2 +-
 4 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/b6fb4f63/flink-libraries/flink-sql-client/src/test/java/org/apache/flink/table/client/gateway/utils/TestTableSourceFactory.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/test/java/org/apache/flink/table/client/gateway/utils/TestTableSourceFactory.java
 
b/flink-libraries/flink-sql-client/src/test/java/org/apache/flink/table/client/gateway/utils/TestTableSourceFactory.java
index 81f00e5..b0b8848 100644
--- 
a/flink-libraries/flink-sql-client/src/test/java/org/apache/flink/table/client/gateway/utils/TestTableSourceFactory.java
+++ 
b/flink-libraries/flink-sql-client/src/test/java/org/apache/flink/table/client/gateway/utils/TestTableSourceFactory.java
@@ -83,7 +83,7 @@ public class TestTableSourceFactory implements 
StreamTableSourceFactory {
final Optional proctime = 
SchemaValidator.deriveProctimeAttribute(params);
final List rowtime = 
SchemaValidator.deriveRowtimeAttributes(params);
return new TestTableSource(
-   SchemaValidator.deriveTableSourceSchema(params),
+   params.getTableSchema(SCHEMA()),
properties.get(CONNECTOR_TEST_PROPERTY),
proctime.orElse(null),
rowtime);

http://git-wip-us.apache.org/repos/asf/flink/blob/b6fb4f63/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/SchemaValidator.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/SchemaValidator.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/SchemaValidator.scala
index ec83b3c..af2baba 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/SchemaValidator.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/SchemaValidator.scala
@@ -175,17 +175,14 @@ object SchemaValidator {
   }
 
   /**
-* Derives the table schema for a table source. A table source can directly 
use "name" and
-* "type" and needs no special handling for time attributes or aliasing.
-*/
-  def deriveTableSourceSchema(properties: DescriptorProperties): TableSchema = 
{
-properties.getTableSchema(SCHEMA)
-  }
-
-  /**
 * Derives the table schema for a table sink. A sink ignores a proctime 
attribute and
 * needs to track the origin of a rowtime field.
+*
+* @deprecated This method combines two separate concepts of table schema 
and field mapping.
+* This should be split into two methods once we have support 
for
+* the corresponding interfaces (see FLINK-9870).
 */
+  @deprecated
   def deriveTableSinkSchema(properties: DescriptorProperties): TableSchema = {
 val builder = TableSchema.builder()
 

http://git-wip-us.apache.org/repos/asf/flink/blob/b6fb4f63/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/CsvTableSinkFactoryBase.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/CsvTableSinkFactoryBase.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/CsvTableSinkFactoryBase.scala
index 6ceba4c..849d16c 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/CsvTableSinkFactoryBase.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sinks/CsvTableSinkFactoryBase.scala
@@ -77,7 +77

flink git commit: [hotfix] [sql-client] Wrap exceptions thrown during environment instance creation

2018-07-23 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/master d850fdec9 -> 73088749e


[hotfix] [sql-client] Wrap exceptions thrown during environment instance 
creation


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/73088749
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/73088749
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/73088749

Branch: refs/heads/master
Commit: 73088749e6377ad40bfed3a9da1b306bfbc4c2f4
Parents: d850fde
Author: Timo Walther 
Authored: Mon Jul 23 13:48:45 2018 +0200
Committer: Timo Walther 
Committed: Mon Jul 23 13:48:45 2018 +0200

--
 .../flink/table/client/gateway/local/ExecutionContext.java| 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/73088749/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
index 9152908..4283953 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
@@ -166,7 +166,12 @@ public class ExecutionContext {
}
 
public EnvironmentInstance createEnvironmentInstance() {
-   return new EnvironmentInstance();
+   try {
+   return new EnvironmentInstance();
+   } catch (Throwable t) {
+   // catch everything such that a wrong environment does 
not affect invocations
+   throw new SqlExecutionException("Could not create 
environment instance.", t);
+   }
}
 
public Map> getTableSources() {



flink git commit: [hotfix] [sql-client] Wrap exceptions thrown during environment instance creation

2018-07-23 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 0da30b713 -> 5d8431474


[hotfix] [sql-client] Wrap exceptions thrown during environment instance 
creation


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/5d843147
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/5d843147
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/5d843147

Branch: refs/heads/release-1.6
Commit: 5d8431474b6e1c082fc06c83f739a85211cb1482
Parents: 0da30b7
Author: Timo Walther 
Authored: Mon Jul 23 13:48:45 2018 +0200
Committer: Timo Walther 
Committed: Mon Jul 23 13:55:01 2018 +0200

--
 .../flink/table/client/gateway/local/ExecutionContext.java| 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/5d843147/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
index 9152908..4283953 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
@@ -166,7 +166,12 @@ public class ExecutionContext {
}
 
public EnvironmentInstance createEnvironmentInstance() {
-   return new EnvironmentInstance();
+   try {
+   return new EnvironmentInstance();
+   } catch (Throwable t) {
+   // catch everything such that a wrong environment does 
not affect invocations
+   throw new SqlExecutionException("Could not create 
environment instance.", t);
+   }
}
 
public Map> getTableSources() {



[2/2] flink git commit: [FLINK-9846] [table] Add a Kafka table sink factory

2018-07-23 Thread twalthr
[FLINK-9846] [table] Add a Kafka table sink factory

Adds a Kafka table sink factory with format discovery. Currently, this enables
the SQL Client to write Avro and JSON data to Kafka. The functionality is
limited due to FLINK-9870. Therefore, it is currently not possible
to use time attributes in the output.

Changes:
- Decouple Kafka sink from formats and deprecate old classes
- Add a Kafka table sink factory
- Existing tests for the KafkaTableSourceFactory have been
  generalized to support sinks as well.

This closes #6387.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/57b3cde8
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/57b3cde8
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/57b3cde8

Branch: refs/heads/master
Commit: 57b3cde863922094be4f395063317e42349aedb3
Parents: 9e348d3
Author: Timo Walther 
Authored: Mon Jul 23 08:12:00 2018 +0200
Committer: Timo Walther 
Committed: Mon Jul 23 18:17:28 2018 +0200

--
 .../connectors/kafka/Kafka010JsonTableSink.java |  19 +-
 .../connectors/kafka/Kafka010TableSink.java |  61 
 .../kafka/Kafka010TableSourceFactory.java   |  72 
 .../kafka/Kafka010TableSourceSinkFactory.java   |  90 +
 ...rg.apache.flink.table.factories.TableFactory |   2 +-
 .../kafka/Kafka010JsonTableSinkTest.java|   4 +
 .../kafka/Kafka010TableSourceFactoryTest.java   |  74 -
 .../Kafka010TableSourceSinkFactoryTest.java |  99 ++
 .../connectors/kafka/Kafka011TableSink.java |  64 
 .../connectors/kafka/Kafka011TableSource.java   |   3 +-
 .../kafka/Kafka011TableSourceFactory.java   |  72 
 .../kafka/Kafka011TableSourceSinkFactory.java   |  90 +
 ...rg.apache.flink.table.factories.TableFactory |   2 +-
 .../kafka/Kafka011TableSourceFactoryTest.java   |  74 -
 .../Kafka011TableSourceSinkFactoryTest.java |  99 ++
 .../connectors/kafka/Kafka08JsonTableSink.java  |  19 +-
 .../connectors/kafka/Kafka08TableSink.java  |  61 
 .../connectors/kafka/Kafka08TableSource.java|   3 +-
 .../kafka/Kafka08TableSourceFactory.java|  72 
 .../kafka/Kafka08TableSourceSinkFactory.java|  90 +
 ...rg.apache.flink.table.factories.TableFactory |   2 +-
 .../kafka/Kafka08JsonTableSinkTest.java |   4 +
 .../kafka/Kafka08TableSourceFactoryTest.java|  74 -
 .../Kafka08TableSourceSinkFactoryTest.java  |  99 ++
 .../connectors/kafka/Kafka09JsonTableSink.java  |  19 +-
 .../connectors/kafka/Kafka09TableSink.java  |  61 
 .../connectors/kafka/Kafka09TableSource.java|   3 +-
 .../kafka/Kafka09TableSourceFactory.java|  72 
 .../kafka/Kafka09TableSourceSinkFactory.java|  90 +
 ...rg.apache.flink.table.factories.TableFactory |   2 +-
 .../kafka/Kafka09JsonTableSinkTest.java |   4 +
 .../kafka/Kafka09TableSourceFactoryTest.java|  74 -
 .../Kafka09TableSourceSinkFactoryTest.java  |  99 ++
 .../connectors/kafka/KafkaJsonTableSink.java|   5 +
 .../connectors/kafka/KafkaTableSink.java| 112 ++-
 .../kafka/KafkaTableSourceFactory.java  | 251 --
 .../kafka/KafkaTableSourceSinkFactoryBase.java  | 330 +++
 .../partitioner/FlinkFixedPartitioner.java  |  10 +
 .../kafka/KafkaTableSinkTestBase.java   |   7 +-
 .../kafka/KafkaTableSourceFactoryTestBase.java  | 196 ---
 .../KafkaTableSourceSinkFactoryTestBase.java| 299 +
 .../utils/TestSerializationSchema.scala |  16 +-
 .../utils/TestTableFormatFactory.scala  |  18 +-
 43 files changed, 1852 insertions(+), 1065 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/57b3cde8/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
--
diff --git 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
index ef33cd5..2ad3142 100644
--- 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
+++ 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
@@ -18,18 +18,23 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import 
org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
 import 
org.apache.flink.streaming.connectors.kafk

[1/2] flink git commit: [FLINK-9846] [table] Add a Kafka table sink factory

2018-07-23 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/master 9e348d32c -> 57b3cde86


http://git-wip-us.apache.org/repos/asf/flink/blob/57b3cde8/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java
--
diff --git 
a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java
 
b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java
deleted file mode 100644
index b976e14..000
--- 
a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.streaming.connectors.kafka;
-
-import org.apache.flink.api.common.serialization.DeserializationSchema;
-import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
-import 
org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
-import org.apache.flink.table.api.TableSchema;
-import org.apache.flink.table.descriptors.KafkaValidator;
-import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
-import org.apache.flink.types.Row;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-
-/**
- * Test for {@link Kafka09TableSource} created by {@link 
Kafka09TableSourceFactory}.
- */
-public class Kafka09TableSourceFactoryTest extends 
KafkaTableSourceFactoryTestBase {
-
-   @Override
-   protected String getKafkaVersion() {
-   return KafkaValidator.CONNECTOR_VERSION_VALUE_09;
-   }
-
-   @Override
-   @SuppressWarnings("unchecked")
-   protected Class> 
getExpectedFlinkKafkaConsumer() {
-   return (Class) FlinkKafkaConsumer09.class;
-   }
-
-   @Override
-   protected KafkaTableSource getExpectedKafkaTableSource(
-   TableSchema schema,
-   Optional proctimeAttribute,
-   List 
rowtimeAttributeDescriptors,
-   Map fieldMapping,
-   String topic,
-   Properties properties,
-   DeserializationSchema deserializationSchema,
-   StartupMode startupMode,
-   Map specificStartupOffsets) {
-
-   return new Kafka09TableSource(
-   schema,
-   proctimeAttribute,
-   rowtimeAttributeDescriptors,
-   Optional.of(fieldMapping),
-   topic,
-   properties,
-   deserializationSchema,
-   startupMode,
-   specificStartupOffsets
-   );
-   }
-}

http://git-wip-us.apache.org/repos/asf/flink/blob/57b3cde8/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
--
diff --git 
a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
 
b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
new file mode 100644
index 000..a6c8bd4
--- /dev/null
+++ 
b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a c

[2/2] flink git commit: [FLINK-9846] [table] Add a Kafka table sink factory

2018-07-23 Thread twalthr
[FLINK-9846] [table] Add a Kafka table sink factory

Adds a Kafka table sink factory with format discovery. Currently, this enables
the SQL Client to write Avro and JSON data to Kafka. The functionality is
limited due to FLINK-9870. Therefore, it is currently not possible
to use time attributes in the output.

Changes:
- Decouple Kafka sink from formats and deprecate old classes
- Add a Kafka table sink factory
- Existing tests for the KafkaTableSourceFactory have been
  generalized to support sinks as well.

This closes #6387.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/aa25b4b3
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/aa25b4b3
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/aa25b4b3

Branch: refs/heads/release-1.6
Commit: aa25b4b324b4c025fe9e58e081677faf0ab50a7d
Parents: 702f773
Author: Timo Walther 
Authored: Mon Jul 23 08:12:00 2018 +0200
Committer: Timo Walther 
Committed: Mon Jul 23 18:19:53 2018 +0200

--
 .../connectors/kafka/Kafka010JsonTableSink.java |  19 +-
 .../connectors/kafka/Kafka010TableSink.java |  61 
 .../kafka/Kafka010TableSourceFactory.java   |  72 
 .../kafka/Kafka010TableSourceSinkFactory.java   |  90 +
 ...rg.apache.flink.table.factories.TableFactory |   2 +-
 .../kafka/Kafka010JsonTableSinkTest.java|   4 +
 .../kafka/Kafka010TableSourceFactoryTest.java   |  74 -
 .../Kafka010TableSourceSinkFactoryTest.java |  99 ++
 .../connectors/kafka/Kafka011TableSink.java |  64 
 .../connectors/kafka/Kafka011TableSource.java   |   3 +-
 .../kafka/Kafka011TableSourceFactory.java   |  72 
 .../kafka/Kafka011TableSourceSinkFactory.java   |  90 +
 ...rg.apache.flink.table.factories.TableFactory |   2 +-
 .../kafka/Kafka011TableSourceFactoryTest.java   |  74 -
 .../Kafka011TableSourceSinkFactoryTest.java |  99 ++
 .../connectors/kafka/Kafka08JsonTableSink.java  |  19 +-
 .../connectors/kafka/Kafka08TableSink.java  |  61 
 .../connectors/kafka/Kafka08TableSource.java|   3 +-
 .../kafka/Kafka08TableSourceFactory.java|  72 
 .../kafka/Kafka08TableSourceSinkFactory.java|  90 +
 ...rg.apache.flink.table.factories.TableFactory |   2 +-
 .../kafka/Kafka08JsonTableSinkTest.java |   4 +
 .../kafka/Kafka08TableSourceFactoryTest.java|  74 -
 .../Kafka08TableSourceSinkFactoryTest.java  |  99 ++
 .../connectors/kafka/Kafka09JsonTableSink.java  |  19 +-
 .../connectors/kafka/Kafka09TableSink.java  |  61 
 .../connectors/kafka/Kafka09TableSource.java|   3 +-
 .../kafka/Kafka09TableSourceFactory.java|  72 
 .../kafka/Kafka09TableSourceSinkFactory.java|  90 +
 ...rg.apache.flink.table.factories.TableFactory |   2 +-
 .../kafka/Kafka09JsonTableSinkTest.java |   4 +
 .../kafka/Kafka09TableSourceFactoryTest.java|  74 -
 .../Kafka09TableSourceSinkFactoryTest.java  |  99 ++
 .../connectors/kafka/KafkaJsonTableSink.java|   5 +
 .../connectors/kafka/KafkaTableSink.java| 112 ++-
 .../kafka/KafkaTableSourceFactory.java  | 251 --
 .../kafka/KafkaTableSourceSinkFactoryBase.java  | 330 +++
 .../partitioner/FlinkFixedPartitioner.java  |  10 +
 .../kafka/KafkaTableSinkTestBase.java   |   7 +-
 .../kafka/KafkaTableSourceFactoryTestBase.java  | 196 ---
 .../KafkaTableSourceSinkFactoryTestBase.java| 299 +
 .../utils/TestSerializationSchema.scala |  16 +-
 .../utils/TestTableFormatFactory.scala  |  18 +-
 43 files changed, 1852 insertions(+), 1065 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/aa25b4b3/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
--
diff --git 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
index ef33cd5..2ad3142 100644
--- 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
+++ 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
@@ -18,18 +18,23 @@
 
 package org.apache.flink.streaming.connectors.kafka;
 
-import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.serialization.SerializationSchema;
 import 
org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
 import 
org.apache.flink.streaming.connectors

[1/2] flink git commit: [FLINK-9846] [table] Add a Kafka table sink factory

2018-07-23 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 702f77355 -> aa25b4b32


http://git-wip-us.apache.org/repos/asf/flink/blob/aa25b4b3/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java
--
diff --git 
a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java
 
b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java
deleted file mode 100644
index b976e14..000
--- 
a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceFactoryTest.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.streaming.connectors.kafka;
-
-import org.apache.flink.api.common.serialization.DeserializationSchema;
-import org.apache.flink.streaming.connectors.kafka.config.StartupMode;
-import 
org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
-import org.apache.flink.table.api.TableSchema;
-import org.apache.flink.table.descriptors.KafkaValidator;
-import org.apache.flink.table.sources.RowtimeAttributeDescriptor;
-import org.apache.flink.types.Row;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-
-/**
- * Test for {@link Kafka09TableSource} created by {@link 
Kafka09TableSourceFactory}.
- */
-public class Kafka09TableSourceFactoryTest extends 
KafkaTableSourceFactoryTestBase {
-
-   @Override
-   protected String getKafkaVersion() {
-   return KafkaValidator.CONNECTOR_VERSION_VALUE_09;
-   }
-
-   @Override
-   @SuppressWarnings("unchecked")
-   protected Class> 
getExpectedFlinkKafkaConsumer() {
-   return (Class) FlinkKafkaConsumer09.class;
-   }
-
-   @Override
-   protected KafkaTableSource getExpectedKafkaTableSource(
-   TableSchema schema,
-   Optional proctimeAttribute,
-   List 
rowtimeAttributeDescriptors,
-   Map fieldMapping,
-   String topic,
-   Properties properties,
-   DeserializationSchema deserializationSchema,
-   StartupMode startupMode,
-   Map specificStartupOffsets) {
-
-   return new Kafka09TableSource(
-   schema,
-   proctimeAttribute,
-   rowtimeAttributeDescriptors,
-   Optional.of(fieldMapping),
-   topic,
-   properties,
-   deserializationSchema,
-   startupMode,
-   specificStartupOffsets
-   );
-   }
-}

http://git-wip-us.apache.org/repos/asf/flink/blob/aa25b4b3/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
--
diff --git 
a/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
 
b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
new file mode 100644
index 000..a6c8bd4
--- /dev/null
+++ 
b/flink-connectors/flink-connector-kafka-0.9/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka09TableSourceSinkFactoryTest.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtai

flink git commit: [FLINK-9765] [sql-client] Improve CLI responsiveness when cluster is not reachable

2018-07-24 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/master b9a916afe -> 605a2


[FLINK-9765] [sql-client] Improve CLI responsiveness when cluster is not 
reachable

Moves the job cancellation into the final phase of the refresh thread in order 
to
keep the CLI responsive.

This closes #6265.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/605a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/605a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/605a

Branch: refs/heads/master
Commit: 605a2b8ddbff7dc0bea9d5ecd55ce0031a9f
Parents: b9a916a
Author: Timo Walther 
Authored: Thu Jul 5 13:25:28 2018 +0200
Committer: Timo Walther 
Committed: Tue Jul 24 11:58:56 2018 +0200

--
 .../flink/table/client/cli/CliResultView.java  | 17 +
 .../flink/table/client/gateway/Executor.java   |  3 ++-
 2 files changed, 11 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/605a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
index 9f893bb..df42edd 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
@@ -217,15 +217,7 @@ public abstract class CliResultView> 
extends CliView
 
@Override
protected void cleanUp() {
-   // stop retrieval
stopRetrieval();
-
-   // cancel table program
-   try {
-   client.getExecutor().cancelQuery(client.getContext(), 
resultDescriptor.getResultId());
-   } catch (SqlExecutionException e) {
-   // ignore further exceptions
-   }
}
 
// 

@@ -285,6 +277,15 @@ public abstract class CliResultView> 
extends CliView
display();
}
}
+
+   // cancel table program
+   try {
+   // the cancellation happens in the refresh 
thread in order to keep the main thread
+   // responsive at all times; esp. if the cluster 
is not available
+   
client.getExecutor().cancelQuery(client.getContext(), 
resultDescriptor.getResultId());
+   } catch (SqlExecutionException e) {
+   // ignore further exceptions
+   }
}
}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/605a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
index 7f903a4..3a4dd81 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
@@ -83,7 +83,8 @@ public interface Executor {
List retrieveResultPage(String resultId, int page) throws 
SqlExecutionException;
 
/**
-* Cancels a table program and stops the result retrieval.
+* Cancels a table program and stops the result retrieval. Blocking 
until cancellation command has
+* been sent to cluster.
 */
void cancelQuery(SessionContext session, String resultId) throws 
SqlExecutionException;
 



flink git commit: [FLINK-9765] [sql-client] Improve CLI responsiveness when cluster is not reachable

2018-07-24 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 004d1162c -> 02d8865ce


[FLINK-9765] [sql-client] Improve CLI responsiveness when cluster is not 
reachable

Moves the job cancellation into the final phase of the refresh thread in order 
to
keep the CLI responsive.

This closes #6265.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/02d8865c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/02d8865c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/02d8865c

Branch: refs/heads/release-1.6
Commit: 02d8865ce7880a8fc4d98d38640222dddb155956
Parents: 004d116
Author: Timo Walther 
Authored: Thu Jul 5 13:25:28 2018 +0200
Committer: Timo Walther 
Committed: Tue Jul 24 12:26:22 2018 +0200

--
 .../flink/table/client/cli/CliResultView.java  | 17 +
 .../flink/table/client/gateway/Executor.java   |  3 ++-
 2 files changed, 11 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/02d8865c/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
index 9f893bb..df42edd 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
@@ -217,15 +217,7 @@ public abstract class CliResultView> 
extends CliView
 
@Override
protected void cleanUp() {
-   // stop retrieval
stopRetrieval();
-
-   // cancel table program
-   try {
-   client.getExecutor().cancelQuery(client.getContext(), 
resultDescriptor.getResultId());
-   } catch (SqlExecutionException e) {
-   // ignore further exceptions
-   }
}
 
// 

@@ -285,6 +277,15 @@ public abstract class CliResultView> 
extends CliView
display();
}
}
+
+   // cancel table program
+   try {
+   // the cancellation happens in the refresh 
thread in order to keep the main thread
+   // responsive at all times; esp. if the cluster 
is not available
+   
client.getExecutor().cancelQuery(client.getContext(), 
resultDescriptor.getResultId());
+   } catch (SqlExecutionException e) {
+   // ignore further exceptions
+   }
}
}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/02d8865c/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
index 7f903a4..3a4dd81 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
@@ -83,7 +83,8 @@ public interface Executor {
List retrieveResultPage(String resultId, int page) throws 
SqlExecutionException;
 
/**
-* Cancels a table program and stops the result retrieval.
+* Cancels a table program and stops the result retrieval. Blocking 
until cancellation command has
+* been sent to cluster.
 */
void cancelQuery(SessionContext session, String resultId) throws 
SqlExecutionException;
 



flink git commit: [FLINK-9765] [sql-client] Improve CLI responsiveness when cluster is not reachable

2018-07-24 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.5 52006567b -> 860be1dcb


[FLINK-9765] [sql-client] Improve CLI responsiveness when cluster is not 
reachable

Moves the job cancellation into the final phase of the refresh thread in order 
to
keep the CLI responsive.

This closes #6265.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/860be1dc
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/860be1dc
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/860be1dc

Branch: refs/heads/release-1.5
Commit: 860be1dcbc155360c53bb9378d5a49933fbb3809
Parents: 5200656
Author: Timo Walther 
Authored: Thu Jul 5 13:25:28 2018 +0200
Committer: Timo Walther 
Committed: Tue Jul 24 12:28:26 2018 +0200

--
 .../flink/table/client/cli/CliResultView.java  | 17 +
 .../flink/table/client/gateway/Executor.java   |  3 ++-
 2 files changed, 11 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/860be1dc/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
index 9f893bb..df42edd 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliResultView.java
@@ -217,15 +217,7 @@ public abstract class CliResultView> 
extends CliView
 
@Override
protected void cleanUp() {
-   // stop retrieval
stopRetrieval();
-
-   // cancel table program
-   try {
-   client.getExecutor().cancelQuery(client.getContext(), 
resultDescriptor.getResultId());
-   } catch (SqlExecutionException e) {
-   // ignore further exceptions
-   }
}
 
// 

@@ -285,6 +277,15 @@ public abstract class CliResultView> 
extends CliView
display();
}
}
+
+   // cancel table program
+   try {
+   // the cancellation happens in the refresh 
thread in order to keep the main thread
+   // responsive at all times; esp. if the cluster 
is not available
+   
client.getExecutor().cancelQuery(client.getContext(), 
resultDescriptor.getResultId());
+   } catch (SqlExecutionException e) {
+   // ignore further exceptions
+   }
}
}
 }

http://git-wip-us.apache.org/repos/asf/flink/blob/860be1dc/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
--
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
index 74e6a6b..bcc5798 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/Executor.java
@@ -78,7 +78,8 @@ public interface Executor {
List retrieveResultPage(String resultId, int page) throws 
SqlExecutionException;
 
/**
-* Cancels a table program and stops the result retrieval.
+* Cancels a table program and stops the result retrieval. Blocking 
until cancellation command has
+* been sent to cluster.
 */
void cancelQuery(SessionContext session, String resultId) throws 
SqlExecutionException;
 



[1/2] flink git commit: [FLINK-9296] [table] Add support for non-windowed DISTINCT aggregates.

2018-07-24 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/master 22b322044 -> 365d9c141


[FLINK-9296] [table] Add support for non-windowed DISTINCT aggregates.

This closes #6393.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/9f181a48
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/9f181a48
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/9f181a48

Branch: refs/heads/master
Commit: 9f181a48c6e192b099a23b8fb46e3a604c22cb53
Parents: 22b3220
Author: Fabian Hueske 
Authored: Mon Jul 23 16:02:58 2018 +0200
Committer: Timo Walther 
Committed: Tue Jul 24 14:41:27 2018 +0200

--
 docs/dev/table/sql.md   |  1 -
 .../DataStreamGroupAggregateRule.scala  |  8 +-
 .../api/stream/sql/DistinctAggregateTest.scala  | 22 +
 .../table/runtime/stream/sql/SqlITCase.scala| 26 
 4 files changed, 49 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/9f181a48/docs/dev/table/sql.md
--
diff --git a/docs/dev/table/sql.md b/docs/dev/table/sql.md
index 57e0ba5..b968c43 100644
--- a/docs/dev/table/sql.md
+++ b/docs/dev/table/sql.md
@@ -2645,7 +2645,6 @@ The following functions are not supported yet:
 
 - Binary string operators and functions
 - System functions
-- Distinct aggregate functions like COUNT DISTINCT
 
 {% top %}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/9f181a48/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamGroupAggregateRule.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamGroupAggregateRule.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamGroupAggregateRule.scala
index 0b8e411..e99a86e 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamGroupAggregateRule.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamGroupAggregateRule.scala
@@ -43,19 +43,13 @@ class DataStreamGroupAggregateRule
   override def matches(call: RelOptRuleCall): Boolean = {
 val agg: FlinkLogicalAggregate = 
call.rel(0).asInstanceOf[FlinkLogicalAggregate]
 
-// check if we have distinct aggregates
-val distinctAggs = agg.getAggCallList.exists(_.isDistinct)
-if (distinctAggs) {
-  throw TableException("DISTINCT aggregates are currently not supported.")
-}
-
 // check if we have grouping sets
 val groupSets = agg.getGroupSets.size() != 1 || agg.getGroupSets.get(0) != 
agg.getGroupSet
 if (groupSets || agg.indicator) {
   throw TableException("GROUPING SETS are currently not supported.")
 }
 
-!distinctAggs && !groupSets && !agg.indicator
+!groupSets && !agg.indicator
   }
 
   override def convert(rel: RelNode): RelNode = {

http://git-wip-us.apache.org/repos/asf/flink/blob/9f181a48/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/DistinctAggregateTest.scala
--
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/DistinctAggregateTest.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/DistinctAggregateTest.scala
index 1ce63c6..95aa3ea 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/DistinctAggregateTest.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/DistinctAggregateTest.scala
@@ -72,6 +72,28 @@ class DistinctAggregateTest extends TableTestBase {
   }
 
   @Test
+  def testDistinctAggregate(): Unit = {
+val sqlQuery = "SELECT " +
+  "  c, SUM(DISTINCT a), SUM(a), COUNT(DISTINCT b) " +
+  "FROM MyTable " +
+  "GROUP BY c "
+
+val expected =
+  unaryNode(
+"DataStreamGroupAggregate",
+unaryNode(
+  "DataStreamCalc",
+  streamTableNode(0),
+  term("select", "c", "a", "b")
+),
+term("groupBy", "c"),
+term("select", "c",
+  "SUM(DISTINCT a) AS EXPR$1", "SUM(a) AS EXPR$2", "COUNT(DISTINCT b) 
AS EXPR$3")
+  )
+streamUtil.verifySql(sqlQuery, expected)
+  }
+
+  @Test
   def testDistinctAggregateOnTumbleWindow(): Unit = {
 val sqlQuery = "SELECT COUNT(DISTINCT a), " +
   "  SUM(a) " +

http://git-wip-us.apache.org/repos/asf/flink/blob/9f181a48/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SqlITCase.scala
---

[2/2] flink git commit: [FLINK-9296] [table] Add documentation for DISTINCT aggregates

2018-07-24 Thread twalthr
[FLINK-9296] [table] Add documentation for DISTINCT aggregates


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/365d9c14
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/365d9c14
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/365d9c14

Branch: refs/heads/master
Commit: 365d9c141cce1f792c3bfd8fa9a9bf3ebf0c1cde
Parents: 9f181a4
Author: Timo Walther 
Authored: Tue Jul 24 14:38:39 2018 +0200
Committer: Timo Walther 
Committed: Tue Jul 24 14:41:35 2018 +0200

--
 docs/dev/table/sql.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/365d9c14/docs/dev/table/sql.md
--
diff --git a/docs/dev/table/sql.md b/docs/dev/table/sql.md
index b968c43..5b83e9d 100644
--- a/docs/dev/table/sql.md
+++ b/docs/dev/table/sql.md
@@ -,7 +,7 @@ COUNT(value [, value]* )
 {% endhighlight %}
   
   
-Returns the number of input rows for which value is not 
null.
+Returns the number of input rows for which value is not 
null. Use COUNT(DISTINCT value) for the number of unique values in 
the column or expression.
   
 
 
@@ -2255,7 +2255,7 @@ SUM(numeric)
 {% endhighlight %}
   
   
-Returns the sum of numeric across all input values.
+Returns the sum of numeric across all input values. Use 
SUM(DISTINCT value) for the sum of unique values in the column or 
expression.
   
 
 



[2/2] flink git commit: [FLINK-9296] [table] Add documentation for DISTINCT aggregates

2018-07-24 Thread twalthr
[FLINK-9296] [table] Add documentation for DISTINCT aggregates


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/ae80215c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/ae80215c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/ae80215c

Branch: refs/heads/release-1.6
Commit: ae80215c866678450b1a4c5d904b8b8e1b86df21
Parents: 8450dab
Author: Timo Walther 
Authored: Tue Jul 24 14:38:39 2018 +0200
Committer: Timo Walther 
Committed: Tue Jul 24 14:51:31 2018 +0200

--
 docs/dev/table/sql.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/ae80215c/docs/dev/table/sql.md
--
diff --git a/docs/dev/table/sql.md b/docs/dev/table/sql.md
index b968c43..5b83e9d 100644
--- a/docs/dev/table/sql.md
+++ b/docs/dev/table/sql.md
@@ -,7 +,7 @@ COUNT(value [, value]* )
 {% endhighlight %}
   
   
-Returns the number of input rows for which value is not 
null.
+Returns the number of input rows for which value is not 
null. Use COUNT(DISTINCT value) for the number of unique values in 
the column or expression.
   
 
 
@@ -2255,7 +2255,7 @@ SUM(numeric)
 {% endhighlight %}
   
   
-Returns the sum of numeric across all input values.
+Returns the sum of numeric across all input values. Use 
SUM(DISTINCT value) for the sum of unique values in the column or 
expression.
   
 
 



[1/2] flink git commit: [FLINK-9296] [table] Add support for non-windowed DISTINCT aggregates.

2018-07-24 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 c6a3efef1 -> ae80215c8


[FLINK-9296] [table] Add support for non-windowed DISTINCT aggregates.

This closes #6393.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/8450dab8
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/8450dab8
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/8450dab8

Branch: refs/heads/release-1.6
Commit: 8450dab8b586b8b4aaf06e09a3842ab2a972cd18
Parents: c6a3efe
Author: Fabian Hueske 
Authored: Mon Jul 23 16:02:58 2018 +0200
Committer: Timo Walther 
Committed: Tue Jul 24 14:51:24 2018 +0200

--
 docs/dev/table/sql.md   |  1 -
 .../DataStreamGroupAggregateRule.scala  |  8 +-
 .../api/stream/sql/DistinctAggregateTest.scala  | 22 +
 .../table/runtime/stream/sql/SqlITCase.scala| 26 
 4 files changed, 49 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/8450dab8/docs/dev/table/sql.md
--
diff --git a/docs/dev/table/sql.md b/docs/dev/table/sql.md
index 57e0ba5..b968c43 100644
--- a/docs/dev/table/sql.md
+++ b/docs/dev/table/sql.md
@@ -2645,7 +2645,6 @@ The following functions are not supported yet:
 
 - Binary string operators and functions
 - System functions
-- Distinct aggregate functions like COUNT DISTINCT
 
 {% top %}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/8450dab8/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamGroupAggregateRule.scala
--
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamGroupAggregateRule.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamGroupAggregateRule.scala
index 0b8e411..e99a86e 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamGroupAggregateRule.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/datastream/DataStreamGroupAggregateRule.scala
@@ -43,19 +43,13 @@ class DataStreamGroupAggregateRule
   override def matches(call: RelOptRuleCall): Boolean = {
 val agg: FlinkLogicalAggregate = 
call.rel(0).asInstanceOf[FlinkLogicalAggregate]
 
-// check if we have distinct aggregates
-val distinctAggs = agg.getAggCallList.exists(_.isDistinct)
-if (distinctAggs) {
-  throw TableException("DISTINCT aggregates are currently not supported.")
-}
-
 // check if we have grouping sets
 val groupSets = agg.getGroupSets.size() != 1 || agg.getGroupSets.get(0) != 
agg.getGroupSet
 if (groupSets || agg.indicator) {
   throw TableException("GROUPING SETS are currently not supported.")
 }
 
-!distinctAggs && !groupSets && !agg.indicator
+!groupSets && !agg.indicator
   }
 
   override def convert(rel: RelNode): RelNode = {

http://git-wip-us.apache.org/repos/asf/flink/blob/8450dab8/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/DistinctAggregateTest.scala
--
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/DistinctAggregateTest.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/DistinctAggregateTest.scala
index 1ce63c6..95aa3ea 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/DistinctAggregateTest.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/DistinctAggregateTest.scala
@@ -72,6 +72,28 @@ class DistinctAggregateTest extends TableTestBase {
   }
 
   @Test
+  def testDistinctAggregate(): Unit = {
+val sqlQuery = "SELECT " +
+  "  c, SUM(DISTINCT a), SUM(a), COUNT(DISTINCT b) " +
+  "FROM MyTable " +
+  "GROUP BY c "
+
+val expected =
+  unaryNode(
+"DataStreamGroupAggregate",
+unaryNode(
+  "DataStreamCalc",
+  streamTableNode(0),
+  term("select", "c", "a", "b")
+),
+term("groupBy", "c"),
+term("select", "c",
+  "SUM(DISTINCT a) AS EXPR$1", "SUM(a) AS EXPR$2", "COUNT(DISTINCT b) 
AS EXPR$3")
+  )
+streamUtil.verifySql(sqlQuery, expected)
+  }
+
+  @Test
   def testDistinctAggregateOnTumbleWindow(): Unit = {
 val sqlQuery = "SELECT COUNT(DISTINCT a), " +
   "  SUM(a) " +

http://git-wip-us.apache.org/repos/asf/flink/blob/8450dab8/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SqlITCase.scala
-

flink git commit: [FLINK-9934] [table] Fix invalid field mapping by Kafka table source factory

2018-07-24 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/master 3f2232668 -> 378cbb7c2


[FLINK-9934] [table] Fix invalid field mapping by Kafka table source factory

According to the DefinedFieldMapping interface the field mapping can also 
contain
the input fields. However, the Kafka table source factory was calling
SchemaValidator#deriveFieldMapping with its own schema instead of the input 
type.

This closes #6403.
This closes #3124.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/378cbb7c
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/378cbb7c
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/378cbb7c

Branch: refs/heads/master
Commit: 378cbb7c2e580ba73f215234e7dff542c3e2bc97
Parents: 3f22326
Author: Timo Walther 
Authored: Tue Jul 24 11:40:36 2018 +0200
Committer: Timo Walther 
Committed: Wed Jul 25 08:01:07 2018 +0200

--
 .../kafka/KafkaTableSourceSinkFactoryBase.java  | 14 -
 .../KafkaJsonTableSourceFactoryTestBase.java|  5 +
 .../KafkaTableSourceSinkFactoryTestBase.java|  5 +
 .../table/descriptors/SchemaValidator.scala | 21 +---
 .../table/descriptors/SchemaValidatorTest.scala |  4 ++--
 5 files changed, 35 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/378cbb7c/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
--
diff --git 
a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
 
b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
index 3307994..27b2e67 100644
--- 
a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
+++ 
b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
@@ -132,18 +132,20 @@ public abstract class KafkaTableSourceSinkFactoryBase 
implements
public StreamTableSource createStreamTableSource(Map properties) {
final DescriptorProperties descriptorProperties = 
getValidatedProperties(properties);
 
-   final TableSchema schema = 
descriptorProperties.getTableSchema(SCHEMA());
final String topic = 
descriptorProperties.getString(CONNECTOR_TOPIC);
+   final DeserializationSchema deserializationSchema = 
getDeserializationSchema(properties);
final StartupOptions startupOptions = 
getStartupOptions(descriptorProperties, topic);
 
return createKafkaTableSource(
-   schema,
+   descriptorProperties.getTableSchema(SCHEMA()),

SchemaValidator.deriveProctimeAttribute(descriptorProperties),

SchemaValidator.deriveRowtimeAttributes(descriptorProperties),
-   
SchemaValidator.deriveFieldMapping(descriptorProperties, Optional.of(schema)),
+   SchemaValidator.deriveFieldMapping(
+   descriptorProperties,
+   
Optional.of(deserializationSchema.getProducedType())),
topic,
getKafkaProperties(descriptorProperties),
-   getDeserializationSchema(properties),
+   deserializationSchema,
startupOptions.startupMode,
startupOptions.specificOffsets);
}
@@ -318,7 +320,9 @@ public abstract class KafkaTableSourceSinkFactoryBase 
implements
}
 
private boolean checkForCustomFieldMapping(DescriptorProperties 
descriptorProperties, TableSchema schema) {
-   final Map fieldMapping = 
SchemaValidator.deriveFieldMapping(descriptorProperties, Optional.of(schema));
+   final Map fieldMapping = 
SchemaValidator.deriveFieldMapping(
+   descriptorProperties,
+   Optional.of(schema.toRowType())); // until FLINK-9870 
is fixed we assume that the table schema is the output type
return fieldMapping.size() != schema.getColumnNames().length ||
!fieldMapping.entrySet().stream().allMatch(mapping -> 
mapping.getKey().equals(mapping.getValue()));
}

http://git-wip-us.apache.org/repos/asf/flink/blob/378cbb7c/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSourceFactoryTestBase.java

flink git commit: [FLINK-9934] [table] Fix invalid field mapping by Kafka table source factory

2018-07-24 Thread twalthr
Repository: flink
Updated Branches:
  refs/heads/release-1.6 d180d599a -> d7b80b0aa


[FLINK-9934] [table] Fix invalid field mapping by Kafka table source factory

According to the DefinedFieldMapping interface the field mapping can also 
contain
the input fields. However, the Kafka table source factory was calling
SchemaValidator#deriveFieldMapping with its own schema instead of the input 
type.

This closes #6403.
This closes #3124.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/d7b80b0a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/d7b80b0a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/d7b80b0a

Branch: refs/heads/release-1.6
Commit: d7b80b0aa0ae6451da46b910b07b17415cb2530a
Parents: d180d59
Author: Timo Walther 
Authored: Tue Jul 24 11:40:36 2018 +0200
Committer: Timo Walther 
Committed: Wed Jul 25 08:03:28 2018 +0200

--
 .../kafka/KafkaTableSourceSinkFactoryBase.java  | 14 -
 .../KafkaJsonTableSourceFactoryTestBase.java|  5 +
 .../KafkaTableSourceSinkFactoryTestBase.java|  5 +
 .../table/descriptors/SchemaValidator.scala | 21 +---
 .../table/descriptors/SchemaValidatorTest.scala |  4 ++--
 5 files changed, 35 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/d7b80b0a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
--
diff --git 
a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
 
b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
index 3307994..27b2e67 100644
--- 
a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
+++ 
b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
@@ -132,18 +132,20 @@ public abstract class KafkaTableSourceSinkFactoryBase 
implements
public StreamTableSource createStreamTableSource(Map properties) {
final DescriptorProperties descriptorProperties = 
getValidatedProperties(properties);
 
-   final TableSchema schema = 
descriptorProperties.getTableSchema(SCHEMA());
final String topic = 
descriptorProperties.getString(CONNECTOR_TOPIC);
+   final DeserializationSchema deserializationSchema = 
getDeserializationSchema(properties);
final StartupOptions startupOptions = 
getStartupOptions(descriptorProperties, topic);
 
return createKafkaTableSource(
-   schema,
+   descriptorProperties.getTableSchema(SCHEMA()),

SchemaValidator.deriveProctimeAttribute(descriptorProperties),

SchemaValidator.deriveRowtimeAttributes(descriptorProperties),
-   
SchemaValidator.deriveFieldMapping(descriptorProperties, Optional.of(schema)),
+   SchemaValidator.deriveFieldMapping(
+   descriptorProperties,
+   
Optional.of(deserializationSchema.getProducedType())),
topic,
getKafkaProperties(descriptorProperties),
-   getDeserializationSchema(properties),
+   deserializationSchema,
startupOptions.startupMode,
startupOptions.specificOffsets);
}
@@ -318,7 +320,9 @@ public abstract class KafkaTableSourceSinkFactoryBase 
implements
}
 
private boolean checkForCustomFieldMapping(DescriptorProperties 
descriptorProperties, TableSchema schema) {
-   final Map fieldMapping = 
SchemaValidator.deriveFieldMapping(descriptorProperties, Optional.of(schema));
+   final Map fieldMapping = 
SchemaValidator.deriveFieldMapping(
+   descriptorProperties,
+   Optional.of(schema.toRowType())); // until FLINK-9870 
is fixed we assume that the table schema is the output type
return fieldMapping.size() != schema.getColumnNames().length ||
!fieldMapping.entrySet().stream().allMatch(mapping -> 
mapping.getKey().equals(mapping.getValue()));
}

http://git-wip-us.apache.org/repos/asf/flink/blob/d7b80b0a/flink-connectors/flink-connector-kafka-base/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaJsonTableSourceFactoryTestBase.java
--

[flink] branch master updated: [hotfix] [table] Remove wrong ISO references for timestamp extraction

2018-07-26 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new c503c21  [hotfix] [table] Remove wrong ISO references for timestamp 
extraction
c503c21 is described below

commit c503c214fa720eeeba21bce5bbcecb2d30655f7c
Author: Timo Walther 
AuthorDate: Thu Jul 26 09:07:11 2018 +0200

[hotfix] [table] Remove wrong ISO references for timestamp extraction

The docs and timestamp extractors mention ISO timestamps, however, according
to ISO 8601 timestamps have a different format.
---
 docs/dev/table/sourceSinks.md  |  3 +--
 .../flink/table/sources/tsextractors/ExistingField.scala   | 10 ++
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/docs/dev/table/sourceSinks.md b/docs/dev/table/sourceSinks.md
index 4c5f2e2..3c2152b 100644
--- a/docs/dev/table/sourceSinks.md
+++ b/docs/dev/table/sourceSinks.md
@@ -460,8 +460,7 @@ val source: KafkaTableSource = 
Kafka010JsonTableSource.builder()
 Flink provides `TimestampExtractor` implementations for common use cases.
 The following `TimestampExtractor` implementations are currently available:
 
-* `ExistingField(fieldName)`: Extracts the value of a rowtime attribute from 
an existing `LONG` or `SQL_TIMESTAMP`, or ISO date formatted `STRING` field.
-  * One example of ISO date format would be '2018-05-28 12:34:56.000'.
+* `ExistingField(fieldName)`: Extracts the value of a rowtime attribute from 
an existing `LONG`, `SQL_TIMESTAMP`, or timestamp formatted `STRING` field. One 
example of such a string would be '2018-05-28 12:34:56.000'.
 * `StreamRecordTimestamp()`: Extracts the value of a rowtime attribute from 
the timestamp of the `DataStream` `StreamRecord`. Note, this 
`TimestampExtractor` is not available for batch table sources.
 
 A custom `TimestampExtractor` can be defined by implementing the corresponding 
interface.
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/tsextractors/ExistingField.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/tsextractors/ExistingField.scala
index 9b091ee..979f869 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/tsextractors/ExistingField.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/tsextractors/ExistingField.scala
@@ -23,8 +23,9 @@ import org.apache.flink.table.api.{Types, ValidationException}
 import org.apache.flink.table.expressions.{Cast, Expression, 
ResolvedFieldReference}
 
 /**
-  * Converts an existing [[Long]] or [[java.sql.Timestamp]], or
-  * ISO date formatted [[java.lang.String]] field into a rowtime attribute.
+  * Converts an existing [[Long]], [[java.sql.Timestamp]], or
+  * timestamp formatted [[java.lang.String]] field (e.g., "2018-05-28 
12:34:56.000") into
+  * a rowtime attribute.
   *
   * @param field The field to convert into a rowtime attribute.
   */
@@ -47,8 +48,9 @@ final class ExistingField(val field: String) extends 
TimestampExtractor {
   }
 
   /**
-* Returns an [[Expression]] that casts a [[Long]] or 
[[java.sql.Timestamp]], or
-* ISO date formatted [[java.lang.String]] field into a rowtime attribute.
+* Returns an [[Expression]] that casts a [[Long]], [[java.sql.Timestamp]], 
or
+* timestamp formatted [[java.lang.String]] field (e.g., "2018-05-28 
12:34:56.000")
+* into a rowtime attribute.
 */
   override def getExpression(fieldAccesses: Array[ResolvedFieldReference]): 
Expression = {
 val fieldAccess: Expression = fieldAccesses(0)



[flink] branch release-1.6 updated: [hotfix] [table] Remove wrong ISO references for timestamp extraction

2018-07-26 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 20802bd  [hotfix] [table] Remove wrong ISO references for timestamp 
extraction
20802bd is described below

commit 20802bd0eb0d537cd3dd5913ccef4c7584e798eb
Author: Timo Walther 
AuthorDate: Thu Jul 26 09:07:11 2018 +0200

[hotfix] [table] Remove wrong ISO references for timestamp extraction

The docs and timestamp extractors mention ISO timestamps, however, according
to ISO 8601 timestamps have a different format.
---
 docs/dev/table/sourceSinks.md  |  3 +--
 .../flink/table/sources/tsextractors/ExistingField.scala   | 10 ++
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/docs/dev/table/sourceSinks.md b/docs/dev/table/sourceSinks.md
index 4c5f2e2..3c2152b 100644
--- a/docs/dev/table/sourceSinks.md
+++ b/docs/dev/table/sourceSinks.md
@@ -460,8 +460,7 @@ val source: KafkaTableSource = 
Kafka010JsonTableSource.builder()
 Flink provides `TimestampExtractor` implementations for common use cases.
 The following `TimestampExtractor` implementations are currently available:
 
-* `ExistingField(fieldName)`: Extracts the value of a rowtime attribute from 
an existing `LONG` or `SQL_TIMESTAMP`, or ISO date formatted `STRING` field.
-  * One example of ISO date format would be '2018-05-28 12:34:56.000'.
+* `ExistingField(fieldName)`: Extracts the value of a rowtime attribute from 
an existing `LONG`, `SQL_TIMESTAMP`, or timestamp formatted `STRING` field. One 
example of such a string would be '2018-05-28 12:34:56.000'.
 * `StreamRecordTimestamp()`: Extracts the value of a rowtime attribute from 
the timestamp of the `DataStream` `StreamRecord`. Note, this 
`TimestampExtractor` is not available for batch table sources.
 
 A custom `TimestampExtractor` can be defined by implementing the corresponding 
interface.
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/tsextractors/ExistingField.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/tsextractors/ExistingField.scala
index 9b091ee..979f869 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/tsextractors/ExistingField.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/tsextractors/ExistingField.scala
@@ -23,8 +23,9 @@ import org.apache.flink.table.api.{Types, ValidationException}
 import org.apache.flink.table.expressions.{Cast, Expression, 
ResolvedFieldReference}
 
 /**
-  * Converts an existing [[Long]] or [[java.sql.Timestamp]], or
-  * ISO date formatted [[java.lang.String]] field into a rowtime attribute.
+  * Converts an existing [[Long]], [[java.sql.Timestamp]], or
+  * timestamp formatted [[java.lang.String]] field (e.g., "2018-05-28 
12:34:56.000") into
+  * a rowtime attribute.
   *
   * @param field The field to convert into a rowtime attribute.
   */
@@ -47,8 +48,9 @@ final class ExistingField(val field: String) extends 
TimestampExtractor {
   }
 
   /**
-* Returns an [[Expression]] that casts a [[Long]] or 
[[java.sql.Timestamp]], or
-* ISO date formatted [[java.lang.String]] field into a rowtime attribute.
+* Returns an [[Expression]] that casts a [[Long]], [[java.sql.Timestamp]], 
or
+* timestamp formatted [[java.lang.String]] field (e.g., "2018-05-28 
12:34:56.000")
+* into a rowtime attribute.
 */
   override def getExpression(fieldAccesses: Array[ResolvedFieldReference]): 
Expression = {
 val fieldAccess: Expression = fieldAccesses(0)



[flink] branch master updated: [FLINK-9790] [docs] Add documentation for UDFs in SQL Client

2018-07-31 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new af5bbfc  [FLINK-9790] [docs] Add documentation for UDFs in SQL Client
af5bbfc is described below

commit af5bbfcf7f381bc96cf960e0f1da847f0c7e08c6
Author: Xingcan Cui 
AuthorDate: Wed Jul 18 00:22:26 2018 +0800

[FLINK-9790] [docs] Add documentation for UDFs in SQL Client

This closes #6356.
---
 docs/dev/table/sqlClient.md | 94 +
 1 file changed, 94 insertions(+)

diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index 24af655..9472ba7 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -181,6 +181,16 @@ tables:
   line-delimiter: "\n"
   comment-prefix: "#"
 
+# Define user-defined functions here.
+
+functions:
+  - name: myUDF
+from: class
+class: foo.bar.AggregateUDF
+constructor:
+  - 7.6
+  - false
+
 # Execution properties allow for changing the behavior of a table program.
 
 execution:
@@ -202,6 +212,7 @@ deployment:
 This configuration:
 
 - defines an environment with a table source `MyTableName` that reads from a 
CSV file,
+- defines a user-defined function `myUDF` that can be instantiated using the 
class name and two constructor parameters,
 - specifies a parallelism of 1 for queries executed in this streaming 
environment,
 - specifies an event-time characteristic, and
 - runs queries in the `table` result mode.
@@ -633,6 +644,89 @@ Make sure to download the [Apache Avro SQL 
JAR](sqlClient.html#dependencies) fil
 
 {% top %}
 
+User-defined Functions
+
+The SQL Client allows users to create custom, user-defined functions to be 
used in SQL queries. Currently, these functions are restricted to be defined 
programmatically in Java/Scala classes.
+
+In order to provide a user-defined function, you need to first implement and 
compile a function class that extends `ScalarFunction`, `AggregateFunction` or 
`TableFunction` (see [User-defined Functions]({{ site.baseurl 
}}/dev/table/udfs.html)). One or more functions can then be packaged into a 
dependency JAR for the SQL Client.
+
+All functions must be declared in an environment file before being called. For 
each item in the list of `functions`, one must specify
+
+- a `name` under which the function is registered,
+- the source of the function using `from` (restricted to be `class` for now),
+- the `class` which indicates the fully qualified class name of the function 
and an optional list of `constructor` parameters for instantiation.
+
+{% highlight yaml %}
+functions:
+  - name: ...   # required: name of the function
+from: class # required: source of the function (can only be 
"class" for now)
+class: ...  # required: fully qualified class name of the 
function
+constructor:# optimal: constructor parameters of the function 
class
+  - ... # optimal: a literal parameter with implicit type
+  - class: ...  # optimal: full class name of the parameter
+constructor:# optimal: constructor parameters of the 
parameter's class
+  - type: ...   # optimal: type of the literal parameter
+value: ...  # optimal: value of the literal parameter
+{% endhighlight %}
+
+Make sure that the order and types of the specified parameters strictly match 
one of the constructors of your function class.
+
+### Constructor Parameters
+
+Depending on the user-defined function, it might be necessary to parameterize 
the implementation before using it in SQL statements.
+
+As shown in the example before, when declaring a user-defined function, a 
class can be configured using constructor parameters in one of the following 
three ways:
+
+**A literal value with implicit type:** The SQL Client will automatically 
derive the type according to the literal value itself. Currently, only values 
of `BOOLEAN`, `INT`, `DOUBLE` and `VARCHAR` are supported here.
+If the automatic derivation does not work as expected (e.g., you need a 
VARCHAR `false`), use explicit types instead.
+
+{% highlight yaml %}
+- true # -> BOOLEAN (case sensitive)
+- 42   # -> INT
+- 1234.222 # -> DOUBLE
+- foo  # -> VARCHAR
+{% endhighlight %}
+
+**A literal value with explicit type:** Explicitly declare the parameter with 
`type` and `value` properties for type-safety.
+
+{% highlight yaml %}
+- type: DECIMAL
+  value: 1
+{% endhighlight %}
+
+The table below illustrates the supported Java parameter types and the 
corresponding SQL type strings.
+
+| Java type   |  SQL type |
+| :-- | : |
+| `java.math.BigDecimal`  | `DECIMAL` 

[flink] branch release-1.6 updated: [FLINK-9790] [docs] Add documentation for UDFs in SQL Client

2018-07-31 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 412f614  [FLINK-9790] [docs] Add documentation for UDFs in SQL Client
412f614 is described below

commit 412f6146e6aa1072d5f692dd4c86b685714ae0ff
Author: Xingcan Cui 
AuthorDate: Wed Jul 18 00:22:26 2018 +0800

[FLINK-9790] [docs] Add documentation for UDFs in SQL Client

This closes #6356.
---
 docs/dev/table/sqlClient.md | 94 +
 1 file changed, 94 insertions(+)

diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index 24af655..9472ba7 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -181,6 +181,16 @@ tables:
   line-delimiter: "\n"
   comment-prefix: "#"
 
+# Define user-defined functions here.
+
+functions:
+  - name: myUDF
+from: class
+class: foo.bar.AggregateUDF
+constructor:
+  - 7.6
+  - false
+
 # Execution properties allow for changing the behavior of a table program.
 
 execution:
@@ -202,6 +212,7 @@ deployment:
 This configuration:
 
 - defines an environment with a table source `MyTableName` that reads from a 
CSV file,
+- defines a user-defined function `myUDF` that can be instantiated using the 
class name and two constructor parameters,
 - specifies a parallelism of 1 for queries executed in this streaming 
environment,
 - specifies an event-time characteristic, and
 - runs queries in the `table` result mode.
@@ -633,6 +644,89 @@ Make sure to download the [Apache Avro SQL 
JAR](sqlClient.html#dependencies) fil
 
 {% top %}
 
+User-defined Functions
+
+The SQL Client allows users to create custom, user-defined functions to be 
used in SQL queries. Currently, these functions are restricted to be defined 
programmatically in Java/Scala classes.
+
+In order to provide a user-defined function, you need to first implement and 
compile a function class that extends `ScalarFunction`, `AggregateFunction` or 
`TableFunction` (see [User-defined Functions]({{ site.baseurl 
}}/dev/table/udfs.html)). One or more functions can then be packaged into a 
dependency JAR for the SQL Client.
+
+All functions must be declared in an environment file before being called. For 
each item in the list of `functions`, one must specify
+
+- a `name` under which the function is registered,
+- the source of the function using `from` (restricted to be `class` for now),
+- the `class` which indicates the fully qualified class name of the function 
and an optional list of `constructor` parameters for instantiation.
+
+{% highlight yaml %}
+functions:
+  - name: ...   # required: name of the function
+from: class # required: source of the function (can only be 
"class" for now)
+class: ...  # required: fully qualified class name of the 
function
+constructor:# optimal: constructor parameters of the function 
class
+  - ... # optimal: a literal parameter with implicit type
+  - class: ...  # optimal: full class name of the parameter
+constructor:# optimal: constructor parameters of the 
parameter's class
+  - type: ...   # optimal: type of the literal parameter
+value: ...  # optimal: value of the literal parameter
+{% endhighlight %}
+
+Make sure that the order and types of the specified parameters strictly match 
one of the constructors of your function class.
+
+### Constructor Parameters
+
+Depending on the user-defined function, it might be necessary to parameterize 
the implementation before using it in SQL statements.
+
+As shown in the example before, when declaring a user-defined function, a 
class can be configured using constructor parameters in one of the following 
three ways:
+
+**A literal value with implicit type:** The SQL Client will automatically 
derive the type according to the literal value itself. Currently, only values 
of `BOOLEAN`, `INT`, `DOUBLE` and `VARCHAR` are supported here.
+If the automatic derivation does not work as expected (e.g., you need a 
VARCHAR `false`), use explicit types instead.
+
+{% highlight yaml %}
+- true # -> BOOLEAN (case sensitive)
+- 42   # -> INT
+- 1234.222 # -> DOUBLE
+- foo  # -> VARCHAR
+{% endhighlight %}
+
+**A literal value with explicit type:** Explicitly declare the parameter with 
`type` and `value` properties for type-safety.
+
+{% highlight yaml %}
+- type: DECIMAL
+  value: 1
+{% endhighlight %}
+
+The table below illustrates the supported Java parameter types and the 
corresponding SQL type strings.
+
+| Java type   |  SQL type |
+| :-- | : |
+| `java.math.BigDecima

[flink] branch master updated: [FLINK-9874][E2E Tests] Fix set_ssl_conf for macOS

2018-08-01 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 891d3a3  [FLINK-9874][E2E Tests] Fix set_ssl_conf for macOS
891d3a3 is described below

commit 891d3a36fca8569e61956ba5c2addbe9ac15e2f4
Author: Florian Schmidt 
AuthorDate: Mon Jul 30 11:36:09 2018 +0200

[FLINK-9874][E2E Tests] Fix set_ssl_conf for macOS

This fixes the set_ssl_conf utility function under macOS. The previous
version was using `hostname -I` regardless of the OS, but the -I option
is not available on the BSD version of hostname.

This is now fixed by checking for all IPv4 addresses from ifconfig if the
OS is macOS and formatting the output to be identical to `hostname -I`.

Additionally the filtering of the output is removed so that now all
ip addresses are appended to the SANSTRING instead of just one.

This closes #9874.
---
 flink-end-to-end-tests/test-scripts/common.sh | 26 +-
 1 file changed, 25 insertions(+), 1 deletion(-)

diff --git a/flink-end-to-end-tests/test-scripts/common.sh 
b/flink-end-to-end-tests/test-scripts/common.sh
index 621db11..0ab2ffa 100644
--- a/flink-end-to-end-tests/test-scripts/common.sh
+++ b/flink-end-to-end-tests/test-scripts/common.sh
@@ -146,6 +146,27 @@ function create_ha_config() {
 EOL
 }
 
+function get_node_ip {
+local ip_addr
+
+if [[ ${OS_TYPE} == "linux" ]]; then
+ip_addr=$(hostname -I)
+elif [[ ${OS_TYPE} == "mac" ]]; then
+ip_addr=$(
+ifconfig |
+grep -E "([0-9]{1,3}\.){3}[0-9]{1,3}" | # grep IPv4 addresses only
+grep -v 127.0.0.1 | # do not use 127.0.0.1 (to 
be consistent with hostname -I)
+awk '{ print $2 }' |# extract ip from row
+paste -sd " " - # combine everything to 
one line
+)
+else
+echo "Warning: Unsupported OS_TYPE '${OS_TYPE}' for 'get_node_ip'. 
Falling back to 'hostname -I' (linux)"
+ip_addr=$(hostname -I)
+fi
+
+echo ${ip_addr}
+}
+
 function set_conf_ssl {
 
 # clean up the dir that will be used for SSL certificates and trust stores
@@ -154,12 +175,15 @@ function set_conf_ssl {
rm -rf "${TEST_DATA_DIR}/ssl"
 fi
 mkdir -p "${TEST_DATA_DIR}/ssl"
+
 NODENAME=`hostname -f`
 SANSTRING="dns:${NODENAME}"
-for NODEIP in `hostname -I | cut -d' ' -f1` ; do
+for NODEIP in $(get_node_ip) ; do
 SANSTRING="${SANSTRING},ip:${NODEIP}"
 done
 
+echo "Using SAN ${SANSTRING}"
+
 # create certificates
 keytool -genkeypair -alias ca -keystore "${TEST_DATA_DIR}/ssl/ca.keystore" 
-dname "CN=Sample CA" -storepass password -keypass password -keyalg RSA -ext 
bc=ca:true
 keytool -keystore "${TEST_DATA_DIR}/ssl/ca.keystore" -storepass password 
-alias ca -exportcert > "${TEST_DATA_DIR}/ssl/ca.cer"



[flink] branch release-1.6 updated: [FLINK-9874][E2E Tests] Fix set_ssl_conf for macOS

2018-08-01 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new ed20d4d  [FLINK-9874][E2E Tests] Fix set_ssl_conf for macOS
ed20d4d is described below

commit ed20d4dd76b1107bdd0987b3e6439a52c8aa6535
Author: Florian Schmidt 
AuthorDate: Mon Jul 30 11:36:09 2018 +0200

[FLINK-9874][E2E Tests] Fix set_ssl_conf for macOS

This fixes the set_ssl_conf utility function under macOS. The previous
version was using `hostname -I` regardless of the OS, but the -I option
is not available on the BSD version of hostname.

This is now fixed by checking for all IPv4 addresses from ifconfig if the
OS is macOS and formatting the output to be identical to `hostname -I`.

Additionally the filtering of the output is removed so that now all
ip addresses are appended to the SANSTRING instead of just one.

This closes #9874.
---
 flink-end-to-end-tests/test-scripts/common.sh | 26 +-
 1 file changed, 25 insertions(+), 1 deletion(-)

diff --git a/flink-end-to-end-tests/test-scripts/common.sh 
b/flink-end-to-end-tests/test-scripts/common.sh
index 621db11..0ab2ffa 100644
--- a/flink-end-to-end-tests/test-scripts/common.sh
+++ b/flink-end-to-end-tests/test-scripts/common.sh
@@ -146,6 +146,27 @@ function create_ha_config() {
 EOL
 }
 
+function get_node_ip {
+local ip_addr
+
+if [[ ${OS_TYPE} == "linux" ]]; then
+ip_addr=$(hostname -I)
+elif [[ ${OS_TYPE} == "mac" ]]; then
+ip_addr=$(
+ifconfig |
+grep -E "([0-9]{1,3}\.){3}[0-9]{1,3}" | # grep IPv4 addresses only
+grep -v 127.0.0.1 | # do not use 127.0.0.1 (to 
be consistent with hostname -I)
+awk '{ print $2 }' |# extract ip from row
+paste -sd " " - # combine everything to 
one line
+)
+else
+echo "Warning: Unsupported OS_TYPE '${OS_TYPE}' for 'get_node_ip'. 
Falling back to 'hostname -I' (linux)"
+ip_addr=$(hostname -I)
+fi
+
+echo ${ip_addr}
+}
+
 function set_conf_ssl {
 
 # clean up the dir that will be used for SSL certificates and trust stores
@@ -154,12 +175,15 @@ function set_conf_ssl {
rm -rf "${TEST_DATA_DIR}/ssl"
 fi
 mkdir -p "${TEST_DATA_DIR}/ssl"
+
 NODENAME=`hostname -f`
 SANSTRING="dns:${NODENAME}"
-for NODEIP in `hostname -I | cut -d' ' -f1` ; do
+for NODEIP in $(get_node_ip) ; do
 SANSTRING="${SANSTRING},ip:${NODEIP}"
 done
 
+echo "Using SAN ${SANSTRING}"
+
 # create certificates
 keytool -genkeypair -alias ca -keystore "${TEST_DATA_DIR}/ssl/ca.keystore" 
-dname "CN=Sample CA" -storepass password -keypass password -keyalg RSA -ext 
bc=ca:true
 keytool -keystore "${TEST_DATA_DIR}/ssl/ca.keystore" -storepass password 
-alias ca -exportcert > "${TEST_DATA_DIR}/ssl/ca.cer"



[flink] branch master updated: [FLINK-9979] [table] Support a FlinkKafkaPartitioner for Kafka table sink factory

2018-08-01 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 628b71d  [FLINK-9979] [table] Support a FlinkKafkaPartitioner for 
Kafka table sink factory
628b71d is described below

commit 628b71dfabe72a99eb6d54994fc870abed1f0268
Author: Timo Walther 
AuthorDate: Fri Jul 27 14:03:50 2018 +0200

[FLINK-9979] [table] Support a FlinkKafkaPartitioner for Kafka table sink 
factory

Adds the possibility to add a FlinkKafkaPartitioner to a Kafka table sink
factory. It povides shortcuts for the built-in "fixed" and "round-robin" 
partitioning.

This closes #6440.
---
 .../connectors/kafka/Kafka010JsonTableSink.java| 14 +++-
 .../connectors/kafka/Kafka010TableSink.java|  7 +-
 .../kafka/Kafka010TableSourceSinkFactory.java  |  2 +-
 .../kafka/Kafka010JsonTableSinkTest.java   |  5 +-
 .../kafka/Kafka010TableSourceSinkFactoryTest.java  |  2 +-
 .../connectors/kafka/Kafka011TableSink.java|  6 +-
 .../kafka/Kafka011TableSourceSinkFactory.java  |  2 +-
 .../kafka/Kafka011TableSourceSinkFactoryTest.java  |  2 +-
 .../connectors/kafka/Kafka08JsonTableSink.java | 14 +++-
 .../connectors/kafka/Kafka08TableSink.java |  7 +-
 .../kafka/Kafka08TableSourceSinkFactory.java   |  2 +-
 .../connectors/kafka/Kafka08JsonTableSinkTest.java |  5 +-
 .../kafka/Kafka08TableSourceSinkFactoryTest.java   |  2 +-
 .../connectors/kafka/Kafka09JsonTableSink.java | 14 +++-
 .../connectors/kafka/Kafka09TableSink.java |  7 +-
 .../kafka/Kafka09TableSourceSinkFactory.java   |  2 +-
 .../connectors/kafka/Kafka09JsonTableSinkTest.java |  5 +-
 .../kafka/Kafka09TableSourceSinkFactoryTest.java   |  2 +-
 .../streaming/connectors/kafka/KafkaTableSink.java | 10 +--
 .../kafka/KafkaTableSourceSinkFactoryBase.java | 33 +++--
 .../org/apache/flink/table/descriptors/Kafka.java  | 78 ++
 .../flink/table/descriptors/KafkaValidator.java| 33 -
 .../connectors/kafka/KafkaTableSinkTestBase.java   |  5 +-
 .../kafka/KafkaTableSourceSinkFactoryTestBase.java |  6 +-
 .../apache/flink/table/descriptors/KafkaTest.java  |  7 +-
 25 files changed, 223 insertions(+), 49 deletions(-)

diff --git 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
index 2ad3142..8471908 100644
--- 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
+++ 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
@@ -24,6 +24,7 @@ import 
org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartiti
 import org.apache.flink.table.descriptors.ConnectorDescriptor;
 import org.apache.flink.types.Row;
 
+import java.util.Optional;
 import java.util.Properties;
 
 /**
@@ -73,16 +74,23 @@ public class Kafka010JsonTableSink extends 
KafkaJsonTableSink {
}
 
@Override
-   protected FlinkKafkaProducerBase createKafkaProducer(String topic, 
Properties properties, SerializationSchema serializationSchema, 
FlinkKafkaPartitioner partitioner) {
+   protected FlinkKafkaProducerBase createKafkaProducer(
+   String topic,
+   Properties properties,
+   SerializationSchema serializationSchema,
+   Optional> partitioner) {
return new FlinkKafkaProducer010<>(
topic,
serializationSchema,
properties,
-   partitioner);
+   partitioner.orElse(new FlinkFixedPartitioner<>()));
}
 
@Override
protected Kafka010JsonTableSink createCopy() {
-   return new Kafka010JsonTableSink(topic, properties, 
partitioner);
+   return new Kafka010JsonTableSink(
+   topic,
+   properties,
+   partitioner.orElse(new FlinkFixedPartitioner<>()));
}
 }
diff --git 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSink.java
 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSink.java
index a8c6553..1d408b8 100644
--- 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSink.java
+++ 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connect

[flink] branch release-1.6 updated: [FLINK-9979] [table] Support a FlinkKafkaPartitioner for Kafka table sink factory

2018-08-01 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 1339fbb  [FLINK-9979] [table] Support a FlinkKafkaPartitioner for 
Kafka table sink factory
1339fbb is described below

commit 1339fbb8f08b93f1a77b39ea17e9ed12834250b4
Author: Timo Walther 
AuthorDate: Fri Jul 27 14:03:50 2018 +0200

[FLINK-9979] [table] Support a FlinkKafkaPartitioner for Kafka table sink 
factory

Adds the possibility to add a FlinkKafkaPartitioner to a Kafka table sink
factory. It povides shortcuts for the built-in "fixed" and "round-robin" 
partitioning.

This closes #6440.
---
 .../connectors/kafka/Kafka010JsonTableSink.java| 14 +++-
 .../connectors/kafka/Kafka010TableSink.java|  7 +-
 .../kafka/Kafka010TableSourceSinkFactory.java  |  2 +-
 .../kafka/Kafka010JsonTableSinkTest.java   |  5 +-
 .../kafka/Kafka010TableSourceSinkFactoryTest.java  |  2 +-
 .../connectors/kafka/Kafka011TableSink.java|  6 +-
 .../kafka/Kafka011TableSourceSinkFactory.java  |  2 +-
 .../kafka/Kafka011TableSourceSinkFactoryTest.java  |  2 +-
 .../connectors/kafka/Kafka08JsonTableSink.java | 14 +++-
 .../connectors/kafka/Kafka08TableSink.java |  7 +-
 .../kafka/Kafka08TableSourceSinkFactory.java   |  2 +-
 .../connectors/kafka/Kafka08JsonTableSinkTest.java |  5 +-
 .../kafka/Kafka08TableSourceSinkFactoryTest.java   |  2 +-
 .../connectors/kafka/Kafka09JsonTableSink.java | 14 +++-
 .../connectors/kafka/Kafka09TableSink.java |  7 +-
 .../kafka/Kafka09TableSourceSinkFactory.java   |  2 +-
 .../connectors/kafka/Kafka09JsonTableSinkTest.java |  5 +-
 .../kafka/Kafka09TableSourceSinkFactoryTest.java   |  2 +-
 .../streaming/connectors/kafka/KafkaTableSink.java | 10 +--
 .../kafka/KafkaTableSourceSinkFactoryBase.java | 33 +++--
 .../org/apache/flink/table/descriptors/Kafka.java  | 78 ++
 .../flink/table/descriptors/KafkaValidator.java| 33 -
 .../connectors/kafka/KafkaTableSinkTestBase.java   |  5 +-
 .../kafka/KafkaTableSourceSinkFactoryTestBase.java |  6 +-
 .../apache/flink/table/descriptors/KafkaTest.java  |  7 +-
 25 files changed, 223 insertions(+), 49 deletions(-)

diff --git 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
index 2ad3142..8471908 100644
--- 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
+++ 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010JsonTableSink.java
@@ -24,6 +24,7 @@ import 
org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartiti
 import org.apache.flink.table.descriptors.ConnectorDescriptor;
 import org.apache.flink.types.Row;
 
+import java.util.Optional;
 import java.util.Properties;
 
 /**
@@ -73,16 +74,23 @@ public class Kafka010JsonTableSink extends 
KafkaJsonTableSink {
}
 
@Override
-   protected FlinkKafkaProducerBase createKafkaProducer(String topic, 
Properties properties, SerializationSchema serializationSchema, 
FlinkKafkaPartitioner partitioner) {
+   protected FlinkKafkaProducerBase createKafkaProducer(
+   String topic,
+   Properties properties,
+   SerializationSchema serializationSchema,
+   Optional> partitioner) {
return new FlinkKafkaProducer010<>(
topic,
serializationSchema,
properties,
-   partitioner);
+   partitioner.orElse(new FlinkFixedPartitioner<>()));
}
 
@Override
protected Kafka010JsonTableSink createCopy() {
-   return new Kafka010JsonTableSink(topic, properties, 
partitioner);
+   return new Kafka010JsonTableSink(
+   topic,
+   properties,
+   partitioner.orElse(new FlinkFixedPartitioner<>()));
}
 }
diff --git 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSink.java
 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSink.java
index a8c6553..1d408b8 100644
--- 
a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/Kafka010TableSink.java
+++ 
b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streamin

[flink] branch master updated: [FLINK-9688] [table] Add ATAN2 SQL function support

2018-08-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 295e42f  [FLINK-9688] [table] Add ATAN2 SQL function support
295e42f is described below

commit 295e42f3eeea4558fd46cc0f0890126dbb5a50c5
Author: snuyanzin 
AuthorDate: Thu Jun 28 10:05:33 2018 +0300

[FLINK-9688] [table] Add ATAN2 SQL function support

This closes #6223.
---
 docs/dev/table/sql.md  | 11 ++
 docs/dev/table/tableApi.md | 22 +++
 .../flink/table/api/scala/expressionDsl.scala  | 18 +
 .../flink/table/codegen/calls/BuiltInMethods.scala | 19 +
 .../table/codegen/calls/FunctionGenerator.scala| 12 ++
 .../flink/table/expressions/InputTypeSpec.scala|  8 +++-
 .../flink/table/expressions/mathExpressions.scala  | 20 ++
 .../table/runtime/functions/ScalarFunctions.scala  |  6 +++
 .../flink/table/validate/FunctionCatalog.scala |  2 +
 .../table/expressions/ScalarFunctionsTest.scala| 45 ++
 .../table/expressions/SqlExpressionTest.scala  |  1 +
 11 files changed, 162 insertions(+), 2 deletions(-)

diff --git a/docs/dev/table/sql.md b/docs/dev/table/sql.md
index 366e3fd..94fd59b 100644
--- a/docs/dev/table/sql.md
+++ b/docs/dev/table/sql.md
@@ -1514,6 +1514,17 @@ COT(numeric)
 
   
 {% highlight text %}
+ATAN2(numeric, numeric)
+{% endhighlight %}
+  
+  
+Calculates the arc tangent of a given coordinate.
+  
+
+
+
+  
+{% highlight text %}
 ASIN(numeric)
 {% endhighlight %}
   
diff --git a/docs/dev/table/tableApi.md b/docs/dev/table/tableApi.md
index 6e202f1..9abe17f 100644
--- a/docs/dev/table/tableApi.md
+++ b/docs/dev/table/tableApi.md
@@ -2182,6 +2182,17 @@ NUMERIC.asin()
 
   
 {% highlight java %}
+atan2(NUMERIC, NUMERIC)
+{% endhighlight %}
+  
+  
+Calculates the arc tangent of a given coordinate.
+  
+
+
+
+  
+{% highlight java %}
 NUMERIC.acos()
 {% endhighlight %}
   
@@ -3746,6 +3757,17 @@ NUMERIC.cot()
 
   
 {% highlight scala %}
+atan2(NUMERIC, NUMERIC)
+{% endhighlight %}
+  
+  
+Calculates the arc tangent of a given coordinate.
+  
+
+
+
+  
+{% highlight scala %}
 NUMERIC.asin()
 {% endhighlight %}
   
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
index 35d2167..8aa5f8a 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
@@ -1185,12 +1185,30 @@ object randInteger {
   * Returns NULL if any argument is NULL.
   */
 object concat {
+
+  /**
+* Returns the string that results from concatenating the arguments.
+* Returns NULL if any argument is NULL.
+*/
   def apply(string: Expression, strings: Expression*): Expression = {
 Concat(Seq(string) ++ strings)
   }
 }
 
 /**
+  * Calculates the arc tangent of a given coordinate.
+  */
+object atan2 {
+
+  /**
+* Calculates the arc tangent of a given coordinate.
+*/
+  def apply(y: Expression, x: Expression): Expression = {
+Atan2(y, x)
+  }
+}
+
+/**
   * Returns the string that results from concatenating the arguments and 
separator.
   * Returns NULL If the separator is NULL.
   *
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
index 0e0f709..d08334f 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
@@ -24,6 +24,14 @@ import org.apache.calcite.linq4j.tree.Types
 import org.apache.calcite.runtime.SqlFunctions
 import org.apache.flink.table.runtime.functions.ScalarFunctions
 
+/**
+  * Contains references to built-in functions.
+  *
+  * NOTE: When adding functions here. Check if Calcite provides it in
+  * [[org.apache.calcite.util.BuiltInMethod]]. The function generator supports 
Java's auto casting
+  * so we don't need the full matrix of data types for every function. Only 
[[JBigDecimal]] needs
+  * special handling.
+  */
 object BuiltInMethods {
 
   val LOG = Types.lookupMethod(classOf[ScalarFunctions], "log", 
classOf[Double])
@@ -75,6 +83,17 @@ object BuiltInMethods {
   val ATAN = Types.lookupMethod(classOf[Math], "atan", cl

[flink] branch master updated: [FLINK-9833] [e2e] Add a SQL Client end-to-end test with unified source/sink/format

2018-08-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 404eda8  [FLINK-9833] [e2e] Add a SQL Client end-to-end test with 
unified source/sink/format
404eda8 is described below

commit 404eda81753788cda619abf3f42ded6ea5cf4f66
Author: Timo Walther 
AuthorDate: Fri Jul 20 16:55:48 2018 +0200

[FLINK-9833] [e2e] Add a SQL Client end-to-end test with unified 
source/sink/format

Adds a SQL Client end-to-end test with Kafka/Filesystem and Avro/JSON/CSV 
components.
It reads JSON from Kafka, uses a UDF for transformation, writes to Kafka 
Avro, reads
from Kafka Avro, and writes to Filesystem CSV again. It also tests the 
available
SQL jars for correctness.

This closes #6422.
---
 .../flink-sql-client-test/pom.xml  | 124 +
 .../table/toolbox/StringRegexReplaceFunction.java  |  31 +++
 flink-end-to-end-tests/pom.xml |   1 +
 flink-end-to-end-tests/run-nightly-tests.sh|   2 +
 .../test-scripts/test_sql_client.sh| 288 +
 5 files changed, 446 insertions(+)

diff --git a/flink-end-to-end-tests/flink-sql-client-test/pom.xml 
b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
new file mode 100644
index 000..dc8be37
--- /dev/null
+++ b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
@@ -0,0 +1,124 @@
+
+http://maven.apache.org/POM/4.0.0";
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+   
+   flink-end-to-end-tests
+   org.apache.flink
+   1.7-SNAPSHOT
+   
+   4.0.0
+
+   flink-sql-client-test
+   flink-sql-client-test
+   jar
+
+   
+   
+   org.apache.flink
+   
flink-table_${scala.binary.version}
+   ${project.version}
+   provided
+   
+   
+   org.scala-lang
+   scala-compiler
+   provided
+   
+   
+
+   
+   
+   
+   
+   org.apache.maven.plugins
+   maven-shade-plugin
+   3.1.1
+   
+   
+   package
+   
+   shade
+   
+   
+   
SqlToolbox
+   
+   
+   
+   
+
+   
+   
+   org.apache.maven.plugins
+   maven-dependency-plugin
+   
+   
+   copy
+   package
+   
+   copy
+   
+   
+   
${project.build.directory}/sql-jars
+   
+   
+   
+   
org.apache.flink
+   
flink-avro
+   
${project.version}
+   
sql-jar
+   
jar
+   
+   
+   
org.apache.flink
+   
flink-json
+   
${project.version}
+   
sql-jar
+   

[flink] branch master updated: [hotfix] [e2e] Remove explicit Maven plugin version

2018-08-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 1fe5e4b  [hotfix] [e2e] Remove explicit Maven plugin version
1fe5e4b is described below

commit 1fe5e4b05a05fc22103d2588c0551b74a85698a5
Author: Timo Walther 
AuthorDate: Thu Aug 2 17:19:04 2018 +0200

[hotfix] [e2e] Remove explicit Maven plugin version
---
 flink-end-to-end-tests/flink-sql-client-test/pom.xml | 1 -
 1 file changed, 1 deletion(-)

diff --git a/flink-end-to-end-tests/flink-sql-client-test/pom.xml 
b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
index dc8be37..46bc2ba 100644
--- a/flink-end-to-end-tests/flink-sql-client-test/pom.xml
+++ b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
@@ -50,7 +50,6 @@ under the License.

org.apache.maven.plugins
maven-shade-plugin
-   3.1.1


package



[flink] branch release-1.6 updated (6006f93 -> 01a8471)

2018-08-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a change to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git.


from 6006f93  [hotfix] Correct compilation error from rebase
 new 6e1ad2d  [FLINK-9833] [e2e] Add a SQL Client end-to-end test with 
unified source/sink/format
 new 01a8471  [hotfix] [e2e] Remove explicit Maven plugin version

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../flink-sql-client-test/pom.xml  | 123 +
 .../table/toolbox/StringRegexReplaceFunction.java  |  16 +-
 flink-end-to-end-tests/pom.xml |   1 +
 flink-end-to-end-tests/run-nightly-tests.sh|   2 +
 .../test-scripts/test_sql_client.sh| 288 +
 5 files changed, 420 insertions(+), 10 deletions(-)
 create mode 100644 flink-end-to-end-tests/flink-sql-client-test/pom.xml
 copy 
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/hadoop/mapred/wrapper/HadoopDummyProgressable.java
 => 
flink-end-to-end-tests/flink-sql-client-test/src/main/java/org/apache/flink/table/toolbox/StringRegexReplaceFunction.java
 (68%)
 create mode 100755 flink-end-to-end-tests/test-scripts/test_sql_client.sh



[flink] 01/02: [FLINK-9833] [e2e] Add a SQL Client end-to-end test with unified source/sink/format

2018-08-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 6e1ad2d7c5dc5f2da0e18b23b21b69c1bfc5a333
Author: Timo Walther 
AuthorDate: Fri Jul 20 16:55:48 2018 +0200

[FLINK-9833] [e2e] Add a SQL Client end-to-end test with unified 
source/sink/format

Adds a SQL Client end-to-end test with Kafka/Filesystem and Avro/JSON/CSV 
components.
It reads JSON from Kafka, uses a UDF for transformation, writes to Kafka 
Avro, reads
from Kafka Avro, and writes to Filesystem CSV again. It also tests the 
available
SQL jars for correctness.

This closes #6422.
---
 .../flink-sql-client-test/pom.xml  | 124 +
 .../table/toolbox/StringRegexReplaceFunction.java  |  31 +++
 flink-end-to-end-tests/pom.xml |   1 +
 flink-end-to-end-tests/run-nightly-tests.sh|   2 +
 .../test-scripts/test_sql_client.sh| 288 +
 5 files changed, 446 insertions(+)

diff --git a/flink-end-to-end-tests/flink-sql-client-test/pom.xml 
b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
new file mode 100644
index 000..d2bfe4b
--- /dev/null
+++ b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
@@ -0,0 +1,124 @@
+
+http://maven.apache.org/POM/4.0.0";
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+   
+   flink-end-to-end-tests
+   org.apache.flink
+   1.6-SNAPSHOT
+   
+   4.0.0
+
+   flink-sql-client-test
+   flink-sql-client-test
+   jar
+
+   
+   
+   org.apache.flink
+   
flink-table_${scala.binary.version}
+   ${project.version}
+   provided
+   
+   
+   org.scala-lang
+   scala-compiler
+   provided
+   
+   
+
+   
+   
+   
+   
+   org.apache.maven.plugins
+   maven-shade-plugin
+   3.1.1
+   
+   
+   package
+   
+   shade
+   
+   
+   
SqlToolbox
+   
+   
+   
+   
+
+   
+   
+   org.apache.maven.plugins
+   maven-dependency-plugin
+   
+   
+   copy
+   package
+   
+   copy
+   
+   
+   
${project.build.directory}/sql-jars
+   
+   
+   
+   
org.apache.flink
+   
flink-avro
+   
${project.version}
+   
sql-jar
+   
jar
+   
+   
+   
org.apache.flink
+   
flink-json
+   
${project.version}
+   
sql-jar
+   
jar
+   
+   
+

[flink] 02/02: [hotfix] [e2e] Remove explicit Maven plugin version

2018-08-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 01a8471c03d72b94316e7286f9e39c3f4d022245
Author: Timo Walther 
AuthorDate: Thu Aug 2 17:19:04 2018 +0200

[hotfix] [e2e] Remove explicit Maven plugin version
---
 flink-end-to-end-tests/flink-sql-client-test/pom.xml | 1 -
 1 file changed, 1 deletion(-)

diff --git a/flink-end-to-end-tests/flink-sql-client-test/pom.xml 
b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
index d2bfe4b..17a144f 100644
--- a/flink-end-to-end-tests/flink-sql-client-test/pom.xml
+++ b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
@@ -50,7 +50,6 @@ under the License.

org.apache.maven.plugins
maven-shade-plugin
-   3.1.1


package



[flink] branch release-1.6 updated: [FLINK-6846] [table] Deprecate quarter() in Table API

2018-08-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new ef1fdfa  [FLINK-6846] [table] Deprecate quarter() in Table API
ef1fdfa is described below

commit ef1fdfa1d16d381d0e1e2ec4d401f21c2f9ac47b
Author: Timo Walther 
AuthorDate: Thu Aug 2 16:39:46 2018 +0200

[FLINK-6846] [table] Deprecate quarter() in Table API
---
 docs/dev/table/tableApi.md | 26 ++
 .../flink/table/api/scala/expressionDsl.scala  |  4 
 .../apache/flink/table/expressions/symbols.scala   |  1 +
 .../org/apache/flink/table/expressions/time.scala  |  1 +
 .../table/expressions/ScalarFunctionsTest.scala| 20 ++---
 5 files changed, 20 insertions(+), 32 deletions(-)

diff --git a/docs/dev/table/tableApi.md b/docs/dev/table/tableApi.md
index 2f65145..961138f 100644
--- a/docs/dev/table/tableApi.md
+++ b/docs/dev/table/tableApi.md
@@ -2750,7 +2750,7 @@ TEMPORAL.extract(TIMEINTERVALUNIT)
 {% endhighlight %}
   
   
-Extracts parts of a time point or time interval. Returns the part 
as a long value. E.g. '2006-06-05'.toDate.extract(DAY) leads to 
5.
+Extracts parts of a time point or time interval. Returns the part 
as a long value. E.g. '2006-06-05'.toDate.extract(DAY) leads to 5 
or '2006-06-05'.toDate.extract(QUARTER) leads to 2.
   
 
 
@@ -2779,17 +2779,6 @@ TIMEPOINT.ceil(TIMEINTERVALUNIT)
 
   
 {% highlight java %}
-DATE.quarter()
-{% endhighlight %}
-  
-  
-Returns the quarter of a year from a SQL date. E.g. 
'1994-09-27'.toDate.quarter() leads to 3.
-  
-
-
-
-  
-{% highlight java %}
 temporalOverlaps(TIMEPOINT, TEMPORAL, TIMEPOINT, TEMPORAL)
 {% endhighlight %}
   
@@ -4267,7 +4256,7 @@ TEMPORAL.extract(TimeIntervalUnit)
 {% endhighlight %}
   
   
-Extracts parts of a time point or time interval. Returns the part 
as a long value. E.g. 
"2006-06-05".toDate.extract(TimeIntervalUnit.DAY) leads to 5.
+Extracts parts of a time point or time interval. Returns the part 
as a long value. E.g. 
"2006-06-05".toDate.extract(TimeIntervalUnit.DAY) leads to 5 or 
'2006-06-05'.toDate.extract(QUARTER) leads to 2.
   
 
 
@@ -4296,17 +4285,6 @@ TIMEPOINT.ceil(TimeIntervalUnit)
 
   
 {% highlight scala %}
-DATE.quarter()
-{% endhighlight %}
-  
-  
-Returns the quarter of a year from a SQL date. E.g. 
"1994-09-27".toDate.quarter() leads to 3.
-  
-
-
-
-  
-{% highlight scala %}
 temporalOverlaps(TIMEPOINT, TEMPORAL, TIMEPOINT, TEMPORAL)
 {% endhighlight %}
   
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
index b0bc5b6..fe705d4 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
@@ -567,7 +567,11 @@ trait ImplicitExpressionOperations {
 * Returns the quarter of a year from a SQL date.
 *
 * e.g. "1994-09-27".toDate.quarter() leads to 3
+*
+* @deprecated This method will be used for describing an interval of 
months in future versions.
+* Use `extract(TimeIntervalUnit.QUARTER)` instead.
 */
+  @deprecated
   def quarter() = Quarter(expr)
 
   /**
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/symbols.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/symbols.scala
index 4faf8d3..ec127e2 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/symbols.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/symbols.scala
@@ -85,6 +85,7 @@ object TimeIntervalUnit extends TableSymbols {
   val YEAR = Value(TimeUnitRange.YEAR)
   val YEAR_TO_MONTH = Value(TimeUnitRange.YEAR_TO_MONTH)
   val MONTH = Value(TimeUnitRange.MONTH)
+  val QUARTER = Value(TimeUnitRange.QUARTER)
   val DAY = Value(TimeUnitRange.DAY)
   val DAY_TO_HOUR = Value(TimeUnitRange.DAY_TO_HOUR)
   val DAY_TO_MINUTE = Value(TimeUnitRange.DAY_TO_MINUTE)
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/time.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/time.scala
index f231343..5dff774 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/time.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/tabl

[flink] branch master updated: [FLINK-6846] [table] Deprecate quarter() in Table API

2018-08-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 94ca19e  [FLINK-6846] [table] Deprecate quarter() in Table API
94ca19e is described below

commit 94ca19e6195dfc44c519aeaf4c31f8c2bd2535d2
Author: Timo Walther 
AuthorDate: Thu Aug 2 16:39:46 2018 +0200

[FLINK-6846] [table] Deprecate quarter() in Table API
---
 docs/dev/table/tableApi.md | 26 ++
 .../flink/table/api/scala/expressionDsl.scala  |  4 
 .../apache/flink/table/expressions/symbols.scala   |  1 +
 .../org/apache/flink/table/expressions/time.scala  |  1 +
 .../table/expressions/ScalarFunctionsTest.scala| 20 ++---
 5 files changed, 20 insertions(+), 32 deletions(-)

diff --git a/docs/dev/table/tableApi.md b/docs/dev/table/tableApi.md
index 9abe17f..bd9d286 100644
--- a/docs/dev/table/tableApi.md
+++ b/docs/dev/table/tableApi.md
@@ -2783,7 +2783,7 @@ TEMPORAL.extract(TIMEINTERVALUNIT)
 {% endhighlight %}
   
   
-Extracts parts of a time point or time interval. Returns the part 
as a long value. E.g. '2006-06-05'.toDate.extract(DAY) leads to 
5.
+Extracts parts of a time point or time interval. Returns the part 
as a long value. E.g. '2006-06-05'.toDate.extract(DAY) leads to 5 
or '2006-06-05'.toDate.extract(QUARTER) leads to 2.
   
 
 
@@ -2812,17 +2812,6 @@ TIMEPOINT.ceil(TIMEINTERVALUNIT)
 
   
 {% highlight java %}
-DATE.quarter()
-{% endhighlight %}
-  
-  
-Returns the quarter of a year from a SQL date. E.g. 
'1994-09-27'.toDate.quarter() leads to 3.
-  
-
-
-
-  
-{% highlight java %}
 temporalOverlaps(TIMEPOINT, TEMPORAL, TIMEPOINT, TEMPORAL)
 {% endhighlight %}
   
@@ -4311,7 +4300,7 @@ TEMPORAL.extract(TimeIntervalUnit)
 {% endhighlight %}
   
   
-Extracts parts of a time point or time interval. Returns the part 
as a long value. E.g. 
"2006-06-05".toDate.extract(TimeIntervalUnit.DAY) leads to 5.
+Extracts parts of a time point or time interval. Returns the part 
as a long value. E.g. 
"2006-06-05".toDate.extract(TimeIntervalUnit.DAY) leads to 5 or 
'2006-06-05'.toDate.extract(QUARTER) leads to 2.
   
 
 
@@ -4340,17 +4329,6 @@ TIMEPOINT.ceil(TimeIntervalUnit)
 
   
 {% highlight scala %}
-DATE.quarter()
-{% endhighlight %}
-  
-  
-Returns the quarter of a year from a SQL date. E.g. 
"1994-09-27".toDate.quarter() leads to 3.
-  
-
-
-
-  
-{% highlight scala %}
 temporalOverlaps(TIMEPOINT, TEMPORAL, TIMEPOINT, TEMPORAL)
 {% endhighlight %}
   
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
index 8aa5f8a..91d72ce 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
@@ -577,7 +577,11 @@ trait ImplicitExpressionOperations {
 * Returns the quarter of a year from a SQL date.
 *
 * e.g. "1994-09-27".toDate.quarter() leads to 3
+*
+* @deprecated This method will be used for describing an interval of 
months in future versions.
+* Use `extract(TimeIntervalUnit.QUARTER)` instead.
 */
+  @deprecated
   def quarter() = Quarter(expr)
 
   /**
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/symbols.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/symbols.scala
index 4faf8d3..ec127e2 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/symbols.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/symbols.scala
@@ -85,6 +85,7 @@ object TimeIntervalUnit extends TableSymbols {
   val YEAR = Value(TimeUnitRange.YEAR)
   val YEAR_TO_MONTH = Value(TimeUnitRange.YEAR_TO_MONTH)
   val MONTH = Value(TimeUnitRange.MONTH)
+  val QUARTER = Value(TimeUnitRange.QUARTER)
   val DAY = Value(TimeUnitRange.DAY)
   val DAY_TO_HOUR = Value(TimeUnitRange.DAY_TO_HOUR)
   val DAY_TO_MINUTE = Value(TimeUnitRange.DAY_TO_MINUTE)
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/time.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/time.scala
index f231343..5dff774 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressions/time.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/expressi

[flink] branch master updated: [FLINK-9947] [docs] Document unified table sources/sinks/formats

2018-08-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new dacc16b  [FLINK-9947] [docs] Document unified table 
sources/sinks/formats
dacc16b is described below

commit dacc16b4fa6db6abfdbf73b99f26a5fd36b12acd
Author: Timo Walther 
AuthorDate: Thu Jul 26 15:42:53 2018 +0200

[FLINK-9947] [docs] Document unified table sources/sinks/formats

Adds documentation for unified table sources, sinks, and formats both
for Table API & SQL and SQL Client.

- New connect page
- Adapted SQL Client page
- Adapted Sources & Sinks page

This closes #6456.
---
 docs/dev/table/connect.md  | 1049 
 docs/dev/table/sourceSinks.md  | 1026 ++-
 docs/dev/table/sqlClient.md|  412 +---
 .../org/apache/flink/table/descriptors/Json.java   |4 +-
 4 files changed, 1359 insertions(+), 1132 deletions(-)

diff --git a/docs/dev/table/connect.md b/docs/dev/table/connect.md
new file mode 100644
index 000..1bfff42
--- /dev/null
+++ b/docs/dev/table/connect.md
@@ -0,0 +1,1049 @@
+---
+title: "Connect to External Systems"
+nav-parent_id: tableapi
+nav-pos: 19
+---
+
+
+Flink's Table API & SQL programs can be connected to other external systems 
for reading and writing both batch and streaming tables. A table source 
provides access to data which is stored in external systems (such as a 
database, key-value store, message queue, or file system). A table sink emits a 
table to an external storage system. Depending on the type of source and sink, 
they support different formats such as CSV, Parquet, or ORC.
+
+This page describes how to declare built-in table sources and/or table sinks 
and register them in Flink. After a source or sink has been registered, it can 
be accessed by Table API & SQL statements.
+
+Attention If you want to implement 
your own *custom* table source or sink, have a look at the [user-defined 
sources & sinks page](sourceSinks.html).
+
+* This will be replaced by the TOC
+{:toc}
+
+Dependencies
+
+
+The following table list all available connectors and formats. Their mutual 
compatibility is tagged in the corresponding sections for [table 
connectors](connect.html#table-connectors) and [table 
formats](connect.html#table-formats). The following table provides dependency 
information for both projects using a build automation tool (such as Maven or 
SBT) and SQL Client with SQL JAR bundles.
+
+{% if site.is_stable %}
+
+### Connectors
+
+| Name  | Version   | Maven dependency | SQL 
Client JAR |
+| : | : | :--- | 
:--|
+| Filesystem|   | Built-in | Built-in  
 |
+| Apache Kafka  | 0.8   | `flink-connector-kafka-0.8`  | Not 
available  |
+| Apache Kafka  | 0.9   | `flink-connector-kafka-0.9`  | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.9{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.9{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar)
 |
+| Apache Kafka  | 0.10  | `flink-connector-kafka-0.10` | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.10{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.10{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar)
 |
+| Apache Kafka  | 0.11  | `flink-connector-kafka-0.11` | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.11{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.11{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar)
 |
+
+### Formats
+
+| Name  | Maven dependency | SQL Client JAR |
+| : | :--- | :- |
+| CSV   | Built-in | Built-in   |
+| JSON  | `flink-json` | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-json/{{site.version}}/flink-json-{{site.version}}-sql-jar.jar)
 |
+| Apache Avro   | `flink-avro` | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-avro/{{site.version}}/flink-avro-{{site.version}}-sql-jar.jar)
 |
+
+{% else %}
+
+This table is only available for stable releases.
+
+{% endif %}
+
+{% top %}
+
+Overview
+
+
+Beginning from Flink 1.6, the declaration of a connection to an external 
system is separated from the actual implementation.
+
+Connections can be specified either
+
+- **programmatically** using a `Descriptor` under 
`org.apache

[flink] branch release-1.6 updated: [FLINK-9947] [docs] Document unified table sources/sinks/formats

2018-08-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new c858d31  [FLINK-9947] [docs] Document unified table 
sources/sinks/formats
c858d31 is described below

commit c858d31e7e6b404f810741ac076e66e14cb06868
Author: Timo Walther 
AuthorDate: Thu Jul 26 15:42:53 2018 +0200

[FLINK-9947] [docs] Document unified table sources/sinks/formats

Adds documentation for unified table sources, sinks, and formats both
for Table API & SQL and SQL Client.

- New connect page
- Adapted SQL Client page
- Adapted Sources & Sinks page

This closes #6456.
---
 docs/dev/table/connect.md  | 1049 
 docs/dev/table/sourceSinks.md  | 1026 ++-
 docs/dev/table/sqlClient.md|  412 +---
 .../org/apache/flink/table/descriptors/Json.java   |4 +-
 4 files changed, 1359 insertions(+), 1132 deletions(-)

diff --git a/docs/dev/table/connect.md b/docs/dev/table/connect.md
new file mode 100644
index 000..1bfff42
--- /dev/null
+++ b/docs/dev/table/connect.md
@@ -0,0 +1,1049 @@
+---
+title: "Connect to External Systems"
+nav-parent_id: tableapi
+nav-pos: 19
+---
+
+
+Flink's Table API & SQL programs can be connected to other external systems 
for reading and writing both batch and streaming tables. A table source 
provides access to data which is stored in external systems (such as a 
database, key-value store, message queue, or file system). A table sink emits a 
table to an external storage system. Depending on the type of source and sink, 
they support different formats such as CSV, Parquet, or ORC.
+
+This page describes how to declare built-in table sources and/or table sinks 
and register them in Flink. After a source or sink has been registered, it can 
be accessed by Table API & SQL statements.
+
+Attention If you want to implement 
your own *custom* table source or sink, have a look at the [user-defined 
sources & sinks page](sourceSinks.html).
+
+* This will be replaced by the TOC
+{:toc}
+
+Dependencies
+
+
+The following table list all available connectors and formats. Their mutual 
compatibility is tagged in the corresponding sections for [table 
connectors](connect.html#table-connectors) and [table 
formats](connect.html#table-formats). The following table provides dependency 
information for both projects using a build automation tool (such as Maven or 
SBT) and SQL Client with SQL JAR bundles.
+
+{% if site.is_stable %}
+
+### Connectors
+
+| Name  | Version   | Maven dependency | SQL 
Client JAR |
+| : | : | :--- | 
:--|
+| Filesystem|   | Built-in | Built-in  
 |
+| Apache Kafka  | 0.8   | `flink-connector-kafka-0.8`  | Not 
available  |
+| Apache Kafka  | 0.9   | `flink-connector-kafka-0.9`  | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.9{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.9{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar)
 |
+| Apache Kafka  | 0.10  | `flink-connector-kafka-0.10` | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.10{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.10{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar)
 |
+| Apache Kafka  | 0.11  | `flink-connector-kafka-0.11` | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.11{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.11{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar)
 |
+
+### Formats
+
+| Name  | Maven dependency | SQL Client JAR |
+| : | :--- | :- |
+| CSV   | Built-in | Built-in   |
+| JSON  | `flink-json` | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-json/{{site.version}}/flink-json-{{site.version}}-sql-jar.jar)
 |
+| Apache Avro   | `flink-avro` | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-avro/{{site.version}}/flink-avro-{{site.version}}-sql-jar.jar)
 |
+
+{% else %}
+
+This table is only available for stable releases.
+
+{% endif %}
+
+{% top %}
+
+Overview
+
+
+Beginning from Flink 1.6, the declaration of a connection to an external 
system is separated from the actual implementation.
+
+Connections can be specified either
+
+- **programmatically** using a `Descriptor` under 
`org.

[flink] branch master updated: [FLINK-6846] [table] Add timestamp addition in Table API

2018-08-03 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 4a369a0  [FLINK-6846] [table] Add timestamp addition in Table API
4a369a0 is described below

commit 4a369a0362a7e626e7bf746ed048153e010d92c7
Author: xueyu <278006...@qq.com>
AuthorDate: Wed Jun 20 21:30:02 2018 +0800

[FLINK-6846] [table] Add timestamp addition in Table API

It adds all temporal intervals known to SQL to the Table API
for timestamp/interval arithmetic.

Replaces the deprecated "quarter()" function.

This closes #6188.
---
 docs/dev/table/tableApi.md |   4 +-
 .../flink/table/api/scala/expressionDsl.scala  |  69 ++
 .../flink/table/expressions/ExpressionParser.scala |  12 +-
 .../apache/flink/table/expressions/symbols.scala   |   3 +-
 .../org/apache/flink/table/expressions/time.scala  |   3 +-
 .../table/expressions/ScalarFunctionsTest.scala| 142 +
 6 files changed, 148 insertions(+), 85 deletions(-)

diff --git a/docs/dev/table/tableApi.md b/docs/dev/table/tableApi.md
index bd9d286..9317122 100644
--- a/docs/dev/table/tableApi.md
+++ b/docs/dev/table/tableApi.md
@@ -1613,7 +1613,7 @@ suffixed = interval | cast | as | if | functionCall ;
 
 interval = timeInterval | rowInterval ;
 
-timeInterval = composite , "." , ("year" | "years" | "month" | "months" | 
"day" | "days" | "hour" | "hours" | "minute" | "minutes" | "second" | "seconds" 
| "milli" | "millis") ;
+timeInterval = composite , "." , ("year" | "years" | "quarter" | "quarters" | 
"month" | "months" | "week" | "weeks" | "day" | "days" | "hour" | "hours" | 
"minute" | "minutes" | "second" | "seconds" | "milli" | "millis") ;
 
 rowInterval = composite , "." , "rows" ;
 
@@ -1633,7 +1633,7 @@ fieldReference = "*" | identifier ;
 
 nullLiteral = "Null(" , dataType , ")" ;
 
-timeIntervalUnit = "YEAR" | "YEAR_TO_MONTH" | "MONTH" | "DAY" | "DAY_TO_HOUR" 
| "DAY_TO_MINUTE" | "DAY_TO_SECOND" | "HOUR" | "HOUR_TO_MINUTE" | 
"HOUR_TO_SECOND" | "MINUTE" | "MINUTE_TO_SECOND" | "SECOND" ;
+timeIntervalUnit = "YEAR" | "YEAR_TO_MONTH" | "MONTH" | "QUARTER" | "WEEK" | 
"DAY" | "DAY_TO_HOUR" | "DAY_TO_MINUTE" | "DAY_TO_SECOND" | "HOUR" | 
"HOUR_TO_MINUTE" | "HOUR_TO_SECOND" | "MINUTE" | "MINUTE_TO_SECOND" | "SECOND" ;
 
 timePointUnit = "YEAR" | "MONTH" | "DAY" | "HOUR" | "MINUTE" | "SECOND" | 
"QUARTER" | "WEEK" | "MILLISECOND" | "MICROSECOND" ;
 
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
index 91d72ce..0989fcd 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
@@ -574,17 +574,6 @@ trait ImplicitExpressionOperations {
   def extract(timeIntervalUnit: TimeIntervalUnit) = Extract(timeIntervalUnit, 
expr)
 
   /**
-* Returns the quarter of a year from a SQL date.
-*
-* e.g. "1994-09-27".toDate.quarter() leads to 3
-*
-* @deprecated This method will be used for describing an interval of 
months in future versions.
-* Use `extract(TimeIntervalUnit.QUARTER)` instead.
-*/
-  @deprecated
-  def quarter() = Quarter(expr)
-
-  /**
 * Rounds down a time point to the given unit.
 *
 * e.g. "12:44:31".toDate.floor(MINUTE) leads to 12:44:00
@@ -605,98 +594,126 @@ trait ImplicitExpressionOperations {
 *
 * @return interval of months
 */
-  def year = toMonthInterval(expr, 12)
+  def year: Expression = toMonthInterval(expr, 12)
 
   /**
 * Creates an interval of the given number of years.
 *
 * @return interval of months
 */
-  def years = year
+  def years: Expression = year
+
+  /**
+   * Creates an interval of the given number of quarters.
+   *
+   * @return interval of months
+   */
+  def quarter: Expression = toMo

[flink] branch master updated: [FLINK-10064] [table] Fix a typo in ExternalCatalogTable

2018-08-06 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 4b3dfb5  [FLINK-10064] [table] Fix a typo in ExternalCatalogTable
4b3dfb5 is described below

commit 4b3dfb571e3b64b8fe340b29aa0d9edf1ce3fef5
Author: jerryjzhang 
AuthorDate: Mon Aug 6 01:01:12 2018 +0800

[FLINK-10064] [table] Fix a typo in ExternalCatalogTable

This closes #6497.
---
 .../scala/org/apache/flink/table/catalog/ExternalCatalogTable.scala | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalCatalogTable.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalCatalogTable.scala
index 79da852..9576f34 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalCatalogTable.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalCatalogTable.scala
@@ -88,7 +88,7 @@ class ExternalCatalogTable(
 * Returns whether this external table is declared as table sink.
 */
   def isTableSink: Boolean = {
-isSource
+isSink
   }
 
   /**



[flink] branch release-1.6 updated: [FLINK-10064] [table] Fix a typo in ExternalCatalogTable

2018-08-06 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 8c9b59e  [FLINK-10064] [table] Fix a typo in ExternalCatalogTable
8c9b59e is described below

commit 8c9b59e79d35842a771214302dad75c1e99da682
Author: jerryjzhang 
AuthorDate: Mon Aug 6 01:01:12 2018 +0800

[FLINK-10064] [table] Fix a typo in ExternalCatalogTable

This closes #6497.
---
 .../scala/org/apache/flink/table/catalog/ExternalCatalogTable.scala | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalCatalogTable.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalCatalogTable.scala
index 79da852..9576f34 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalCatalogTable.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/ExternalCatalogTable.scala
@@ -88,7 +88,7 @@ class ExternalCatalogTable(
 * Returns whether this external table is declared as table sink.
 */
   def isTableSink: Boolean = {
-isSource
+isSink
   }
 
   /**



[flink] branch master updated: [FLINK-10071] [docs] Document usage of INSERT INTO in SQL Client

2018-08-06 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 5b7a64f  [FLINK-10071] [docs] Document usage of INSERT INTO in SQL 
Client
5b7a64f is described below

commit 5b7a64fcef3ad09df1ccc0f91386766ee553bf45
Author: Timo Walther 
AuthorDate: Mon Aug 6 15:26:51 2018 +0200

[FLINK-10071] [docs] Document usage of INSERT INTO in SQL Client

This closes #6505.
---
 docs/dev/table/sqlClient.md | 68 +
 1 file changed, 63 insertions(+), 5 deletions(-)

diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index b735705..d35aa59 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -106,7 +106,9 @@ Alice, 1
 Greg, 1
 {% endhighlight %}
 
-The [configuration section](sqlClient.html#configuration) explains how to read 
from table sources and configure other table program properties.
+Both result modes can be useful during the prototyping of SQL queries.
+
+After a query is defined, it can be submitted to the cluster as a 
long-running, detached Flink job. For this, a target system that stores the 
results needs to be specified using the [INSERT INTO 
statement](sqlClient.html#detached-sql-queries). The [configuration 
section](sqlClient.html#configuration) explains how to declare table sources 
for reading data, how to declare table sinks for writing data, and how to 
configure other table program properties.
 
 {% top %}
 
@@ -161,7 +163,7 @@ Every environment file is a regular [YAML 
file](http://yaml.org/). An example of
 # Define table sources here.
 
 tables:
-  - name: MyTableName
+  - name: MyTableSource
 type: source
 connector:
   type: filesystem
@@ -286,8 +288,8 @@ Both `connector` and `format` allow to define a property 
version (which is curre
 
 {% top %}
 
-User-defined Functions
-
+### User-defined Functions
+
 The SQL Client allows users to create custom, user-defined functions to be 
used in SQL queries. Currently, these functions are restricted to be defined 
programmatically in Java/Scala classes.
 
 In order to provide a user-defined function, you need to first implement and 
compile a function class that extends `ScalarFunction`, `AggregateFunction` or 
`TableFunction` (see [User-defined Functions]({{ site.baseurl 
}}/dev/table/udfs.html)). One or more functions can then be packaged into a 
dependency JAR for the SQL Client.
@@ -313,7 +315,7 @@ functions:
 
 Make sure that the order and types of the specified parameters strictly match 
one of the constructors of your function class.
 
-### Constructor Parameters
+ Constructor Parameters
 
 Depending on the user-defined function, it might be necessary to parameterize 
the implementation before using it in SQL statements.
 
@@ -369,6 +371,62 @@ This process can be recursively performed until all the 
constructor parameters a
 
 {% top %}
 
+Detached SQL Queries
+
+
+In order to define end-to-end SQL pipelines, SQL's `INSERT INTO` statement can 
be used for submitting long-running, detached queries to a Flink cluster. These 
queries produce their results into an external system instead of the SQL 
Client. This allows for dealing with higher parallelism and larger amounts of 
data. The CLI itself does not have any control over a detached query after 
submission.
+
+{% highlight sql %}
+INSERT INTO MyTableSink SELECT * FROM MyTableSource
+{% endhighlight %}
+
+The table sink `MyTableSink` has to be declared in the environment file. See 
the [connection page](connect.html) for more information about supported 
external systems and their configuration. An example for an Apache Kafka table 
sink is shown below.
+
+{% highlight yaml %}
+tables:
+  - name: MyTableSink
+type: sink
+update-mode: append
+connector:
+  property-version: 1
+  type: kafka
+  version: 0.11
+  topic: OutputTopic
+  properties:
+- key: zookeeper.connect
+  value: localhost:2181
+- key: bootstrap.servers
+  value: localhost:9092
+- key: group.id
+  value: testGroup
+format:
+  property-version: 1
+  type: json
+  derive-schema: true
+schema:
+  - name: rideId
+type: LONG
+  - name: lon
+type: FLOAT
+  - name: lat
+type: FLOAT
+  - name: rideTime
+type: TIMESTAMP
+{% endhighlight %}
+
+The SQL Client makes sure that a statement is successfully submitted to the 
cluster. Once the query is submitted, the CLI will show information about the 
Flink job.
+
+{% highlight text %}
+[INFO] Table update statement has been successfully submitted to the cluster:
+Cluster ID: StandaloneClusterId
+Job ID: 6f922fe5cba87406ff23ae4a7bb79044
+Web interface: http://localhost:8081
+{% endhigh

[flink] branch release-1.6 updated: [FLINK-10071] [docs] Document usage of INSERT INTO in SQL Client

2018-08-06 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 2915811  [FLINK-10071] [docs] Document usage of INSERT INTO in SQL 
Client
2915811 is described below

commit 29158113432251f9bc6b004dcafd2338d8cf7ee6
Author: Timo Walther 
AuthorDate: Mon Aug 6 15:26:51 2018 +0200

[FLINK-10071] [docs] Document usage of INSERT INTO in SQL Client

This closes #6505.
---
 docs/dev/table/sqlClient.md | 68 +
 1 file changed, 63 insertions(+), 5 deletions(-)

diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index b735705..d35aa59 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -106,7 +106,9 @@ Alice, 1
 Greg, 1
 {% endhighlight %}
 
-The [configuration section](sqlClient.html#configuration) explains how to read 
from table sources and configure other table program properties.
+Both result modes can be useful during the prototyping of SQL queries.
+
+After a query is defined, it can be submitted to the cluster as a 
long-running, detached Flink job. For this, a target system that stores the 
results needs to be specified using the [INSERT INTO 
statement](sqlClient.html#detached-sql-queries). The [configuration 
section](sqlClient.html#configuration) explains how to declare table sources 
for reading data, how to declare table sinks for writing data, and how to 
configure other table program properties.
 
 {% top %}
 
@@ -161,7 +163,7 @@ Every environment file is a regular [YAML 
file](http://yaml.org/). An example of
 # Define table sources here.
 
 tables:
-  - name: MyTableName
+  - name: MyTableSource
 type: source
 connector:
   type: filesystem
@@ -286,8 +288,8 @@ Both `connector` and `format` allow to define a property 
version (which is curre
 
 {% top %}
 
-User-defined Functions
-
+### User-defined Functions
+
 The SQL Client allows users to create custom, user-defined functions to be 
used in SQL queries. Currently, these functions are restricted to be defined 
programmatically in Java/Scala classes.
 
 In order to provide a user-defined function, you need to first implement and 
compile a function class that extends `ScalarFunction`, `AggregateFunction` or 
`TableFunction` (see [User-defined Functions]({{ site.baseurl 
}}/dev/table/udfs.html)). One or more functions can then be packaged into a 
dependency JAR for the SQL Client.
@@ -313,7 +315,7 @@ functions:
 
 Make sure that the order and types of the specified parameters strictly match 
one of the constructors of your function class.
 
-### Constructor Parameters
+ Constructor Parameters
 
 Depending on the user-defined function, it might be necessary to parameterize 
the implementation before using it in SQL statements.
 
@@ -369,6 +371,62 @@ This process can be recursively performed until all the 
constructor parameters a
 
 {% top %}
 
+Detached SQL Queries
+
+
+In order to define end-to-end SQL pipelines, SQL's `INSERT INTO` statement can 
be used for submitting long-running, detached queries to a Flink cluster. These 
queries produce their results into an external system instead of the SQL 
Client. This allows for dealing with higher parallelism and larger amounts of 
data. The CLI itself does not have any control over a detached query after 
submission.
+
+{% highlight sql %}
+INSERT INTO MyTableSink SELECT * FROM MyTableSource
+{% endhighlight %}
+
+The table sink `MyTableSink` has to be declared in the environment file. See 
the [connection page](connect.html) for more information about supported 
external systems and their configuration. An example for an Apache Kafka table 
sink is shown below.
+
+{% highlight yaml %}
+tables:
+  - name: MyTableSink
+type: sink
+update-mode: append
+connector:
+  property-version: 1
+  type: kafka
+  version: 0.11
+  topic: OutputTopic
+  properties:
+- key: zookeeper.connect
+  value: localhost:2181
+- key: bootstrap.servers
+  value: localhost:9092
+- key: group.id
+  value: testGroup
+format:
+  property-version: 1
+  type: json
+  derive-schema: true
+schema:
+  - name: rideId
+type: LONG
+  - name: lon
+type: FLOAT
+  - name: lat
+type: FLOAT
+  - name: rideTime
+type: TIMESTAMP
+{% endhighlight %}
+
+The SQL Client makes sure that a statement is successfully submitted to the 
cluster. Once the query is submitted, the CLI will show information about the 
Flink job.
+
+{% highlight text %}
+[INFO] Table update statement has been successfully submitted to the cluster:
+Cluster ID: StandaloneClusterId
+Job ID: 6f922fe5cba87406ff23ae4a7bb79044
+Web interface: http://localhost:8081
+{% endhigh

[flink] branch master updated: [FLINK-10073] [sql-client] Allow setting a restart strategy in SQL Client

2018-08-07 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 73bf45a  [FLINK-10073] [sql-client] Allow setting a restart strategy 
in SQL Client
73bf45a is described below

commit 73bf45ab4802ca396ce7e3a0393bb999642a3d4a
Author: Timo Walther 
AuthorDate: Mon Aug 6 18:27:57 2018 +0200

[FLINK-10073] [sql-client] Allow setting a restart strategy in SQL Client

Adds support for fine-grained restart strategies per job/query.

This closes #6506.
---
 docs/dev/table/sqlClient.md| 34 +++-
 .../flink-sql-client/conf/sql-client-defaults.yaml |  6 +++
 .../flink/table/client/config/Execution.java   | 45 +-
 .../flink/table/client/config/PropertyStrings.java | 18 +
 .../client/gateway/local/ExecutionContext.java |  2 +
 .../client/gateway/local/ExecutionContextTest.java | 11 ++
 .../client/gateway/local/LocalExecutorITCase.java  |  4 ++
 .../test/resources/test-sql-client-defaults.yaml   |  5 +++
 8 files changed, 122 insertions(+), 3 deletions(-)

diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index d35aa59..0e8d2d6 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -108,6 +108,8 @@ Greg, 1
 
 Both result modes can be useful during the prototyping of SQL queries.
 
+Attention Queries that are executed in 
a batch environment, can only be retrieved using the `table` result mode.
+
 After a query is defined, it can be submitted to the cluster as a 
long-running, detached Flink job. For this, a target system that stores the 
results needs to be specified using the [INSERT INTO 
statement](sqlClient.html#detached-sql-queries). The [configuration 
section](sqlClient.html#configuration) explains how to declare table sources 
for reading data, how to declare table sinks for writing data, and how to 
configure other table program properties.
 
 {% top %}
@@ -204,6 +206,8 @@ execution:
   max-parallelism: 16   # optional: Flink's maximum parallelism 
(128 by default)
   min-idle-state-retention: 0   # optional: table program's minimum idle 
state time
   max-idle-state-retention: 0   # optional: table program's maximum idle 
state time
+  restart-strategy: # optional: restart strategy
+type: fallback  #   "fallback" to global restart 
strategy by default
 
 # Deployment properties allow for describing the cluster to which table 
programs are submitted to.
 
@@ -227,7 +231,35 @@ Depending on the use case, a configuration can be split 
into multiple files. The
 CLI commands > session environment file > defaults environment file
 {% endhighlight %}
 
-Queries that are executed in a batch environment, can only be retrieved using 
the `table` result mode. 
+ Restart Strategies
+
+Restart strategies control how Flink jobs are restarted in case of a failure. 
Similar to [global restart strategies]({{ site.baseurl 
}}/dev/restart_strategies.html) for a Flink cluster, a more fine-grained 
restart configuration can be declared in an environment file.
+
+The following strategies are supported:
+
+{% highlight yaml %}
+execution:
+  # falls back to the global strategy defined in flink-conf.yaml
+  restart-strategy:
+type: fallback
+
+  # job fails directly and no restart is attempted
+  restart-strategy:
+type: none
+
+  # attempts a given number of times to restart the job
+  restart-strategy:
+type: fixed-delay
+attempts: 3  # retries before job is declared as failed (default: 
Integer.MAX_VALUE)
+delay: 1 # delay in ms between retries (default: 10 s)
+
+  # attempts as long as the maximum number of failures per time interval is 
not exceeded
+  restart-strategy:
+type: failure-rate
+max-failures-per-interval: 1   # retries in interval until failing 
(default: 1)
+failure-rate-interval: 6   # measuring interval in ms for failure rate
+delay: 1   # delay in ms between retries (default: 10 
s)
+{% endhighlight %}
 
 {% top %}
 
diff --git a/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml 
b/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml
index 51e6e95..8be4ce6 100644
--- a/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml
+++ b/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml
@@ -74,6 +74,12 @@ execution:
   min-idle-state-retention: 0
   # maximum idle state retention in ms
   max-idle-state-retention: 0
+  # controls how table programs are restarted in case of a failures
+  restart-strategy:
+# strategy type
+# possible values are "fixed-delay", "failure-rate", &

[flink] branch master updated: [FLINK-10107] [e2e] Exclude conflicting SQL JARs from test

2018-08-09 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 764da81  [FLINK-10107] [e2e] Exclude conflicting SQL JARs from test
764da81 is described below

commit 764da8183b6dfd0fe00eebe96fb619ca8d096047
Author: Timo Walther 
AuthorDate: Thu Aug 9 10:58:36 2018 +0200

[FLINK-10107] [e2e] Exclude conflicting SQL JARs from test

This is a temporary solution for fixing the SQL Client end-to-end
test for releases. For now, we do not include conflicting
SQL JARs in library folder of the test.

This closes #6528.
---
 flink-end-to-end-tests/flink-sql-client-test/pom.xml | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/flink-end-to-end-tests/flink-sql-client-test/pom.xml 
b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
index ec5a0e1..6e69568 100644
--- a/flink-end-to-end-tests/flink-sql-client-test/pom.xml
+++ b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
@@ -153,13 +153,14 @@ under the License.

sql-jar

jar

+   


org.apache.flink

flink-connector-kafka-0.10_${scala.binary.version}
@@ -167,13 +168,14 @@ under the License.

sql-jar

jar

+   






[flink] branch release-1.6 updated: [FLINK-10107] [e2e] Exclude conflicting SQL JARs from test

2018-08-09 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new da23c5d  [FLINK-10107] [e2e] Exclude conflicting SQL JARs from test
da23c5d is described below

commit da23c5d3c1e921cd7d2a88b7c0892f17e5d7276f
Author: Timo Walther 
AuthorDate: Thu Aug 9 10:58:36 2018 +0200

[FLINK-10107] [e2e] Exclude conflicting SQL JARs from test

This is a temporary solution for fixing the SQL Client end-to-end
test for releases. For now, we do not include conflicting
SQL JARs in library folder of the test.

This closes #6528.
---
 flink-end-to-end-tests/flink-sql-client-test/pom.xml | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/flink-end-to-end-tests/flink-sql-client-test/pom.xml 
b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
index c64fe28..0baa514 100644
--- a/flink-end-to-end-tests/flink-sql-client-test/pom.xml
+++ b/flink-end-to-end-tests/flink-sql-client-test/pom.xml
@@ -153,13 +153,14 @@ under the License.

sql-jar

jar

+   


org.apache.flink

flink-connector-kafka-0.10_${scala.binary.version}
@@ -167,13 +168,14 @@ under the License.

sql-jar

jar

+   






[flink] branch master updated: [FLINK-9637] [docs] Add public user documentation for state TTL feature

2018-08-09 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 44eef6f  [FLINK-9637] [docs] Add public user documentation for state 
TTL feature
44eef6f is described below

commit 44eef6fdff73f5aa59573519ad83005a42153e6e
Author: Andrey Zagrebin 
AuthorDate: Fri Jul 20 14:57:08 2018 +0200

[FLINK-9637] [docs] Add public user documentation for state TTL feature

This closes #6379.
---
 docs/dev/stream/state/state.md | 127 +
 1 file changed, 127 insertions(+)

diff --git a/docs/dev/stream/state/state.md b/docs/dev/stream/state/state.md
index 44a3653..fb78776 100644
--- a/docs/dev/stream/state/state.md
+++ b/docs/dev/stream/state/state.md
@@ -266,6 +266,133 @@ a `ValueState`. Once the count reaches 2 it will emit the 
average and clear the
 we start over from `0`. Note that this would keep a different state value for 
each different input
 key if we had tuples with different values in the first field.
 
+### State Time-To-Live (TTL)
+
+A *time-to-live* (TTL) can be assigned to the keyed state of any type. If a 
TTL is configured and a
+state value has expired, the stored value will be cleaned up on a best effort 
basis which is
+discussed in more detail below.
+
+All state collection types support per-entry TTLs. This means that list 
elements and map entries
+expire independently.
+
+In order to use state TTL one must first build a `StateTtlConfig` 
configuration object. The TTL 
+functionality can then be enabled in any state descriptor by passing the 
configuration:
+
+
+
+{% highlight java %}
+import org.apache.flink.api.common.state.StateTtlConfig;
+import org.apache.flink.api.common.state.ValueStateDescriptor;
+import org.apache.flink.api.common.time.Time;
+
+StateTtlConfig ttlConfig = StateTtlConfig
+.newBuilder(Time.seconds(1))
+.setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)
+.setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
+.build();
+
+ValueStateDescriptor stateDescriptor = new 
ValueStateDescriptor<>("text state", String.class);
+stateDescriptor.enableTimeToLive(ttlConfig);
+{% endhighlight %}
+
+
+
+{% highlight scala %}
+import org.apache.flink.api.common.state.StateTtlConfig
+import org.apache.flink.api.common.state.ValueStateDescriptor
+import org.apache.flink.api.common.time.Time
+
+val ttlConfig = StateTtlConfig
+.newBuilder(Time.seconds(1))
+.setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)
+.setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
+.build
+
+val stateDescriptor = new ValueStateDescriptor[String]("text state", 
classOf[String])
+stateDescriptor.enableTimeToLive(ttlConfig)
+{% endhighlight %}
+
+
+
+The configuration has several options to consider:
+
+The first parameter of the `newBuilder` method is mandatory, it is the 
time-to-live value.
+
+The update type configures when the state TTL is refreshed (by default 
`OnCreateAndWrite`):
+
+ - `StateTtlConfig.UpdateType.OnCreateAndWrite` - only on creation and write 
access
+ - `StateTtlConfig.UpdateType.OnReadAndWrite` - also on read access
+ 
+The state visibility configures whether the expired value is returned on read 
access 
+if it is not cleaned up yet (by default `NeverReturnExpired`):
+
+ - `StateTtlConfig.StateVisibility.NeverReturnExpired` - expired value is 
never returned
+ - `StateTtlConfig.StateVisibility.ReturnExpiredIfNotCleanedUp` - returned if 
still available
+ 
+In case of `NeverReturnExpired`, the expired state behaves as if it does not 
exist anymore, 
+even if it still has to be removed. The option can be useful for use cases 
+where data has to become unavailable for read access strictly after TTL, 
+e.g. application working with privacy sensitive data.
+ 
+Another option `ReturnExpiredIfNotCleanedUp` allows to return the expired 
state before its cleanup.
+
+**Notes:** 
+
+- The state backends store the timestamp of the last modification along with 
the user value, 
+which means that enabling this feature increases consumption of state storage. 
+Heap state backend stores an additional Java object with a reference to the 
user state object 
+and a primitive long value in memory. The RocksDB state backend adds 8 bytes 
per stored value, list entry or map entry.
+
+- Only TTLs in reference to *processing time* are currently supported.
+
+- Trying to restore state, which was previously configured without TTL, using 
TTL enabled descriptor or vice versa
+will lead to compatibility failure and `StateMigrationException`.
+
+- The TTL configuration is not part of check- or savepoints but rather a way 
of how Flink treats it in the currently running job.
+
+ Cleanup of Expired State
+
+Currently, expired values are only removed when they are read

[flink] branch release-1.6 updated: [FLINK-9637] [docs] Add public user documentation for state TTL feature

2018-08-09 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new ca65e94  [FLINK-9637] [docs] Add public user documentation for state 
TTL feature
ca65e94 is described below

commit ca65e9490eda09f28754da395be5948756fd2614
Author: Andrey Zagrebin 
AuthorDate: Fri Jul 20 14:57:08 2018 +0200

[FLINK-9637] [docs] Add public user documentation for state TTL feature

This closes #6379.
---
 docs/dev/stream/state/state.md | 127 +
 1 file changed, 127 insertions(+)

diff --git a/docs/dev/stream/state/state.md b/docs/dev/stream/state/state.md
index 44a3653..fb78776 100644
--- a/docs/dev/stream/state/state.md
+++ b/docs/dev/stream/state/state.md
@@ -266,6 +266,133 @@ a `ValueState`. Once the count reaches 2 it will emit the 
average and clear the
 we start over from `0`. Note that this would keep a different state value for 
each different input
 key if we had tuples with different values in the first field.
 
+### State Time-To-Live (TTL)
+
+A *time-to-live* (TTL) can be assigned to the keyed state of any type. If a 
TTL is configured and a
+state value has expired, the stored value will be cleaned up on a best effort 
basis which is
+discussed in more detail below.
+
+All state collection types support per-entry TTLs. This means that list 
elements and map entries
+expire independently.
+
+In order to use state TTL one must first build a `StateTtlConfig` 
configuration object. The TTL 
+functionality can then be enabled in any state descriptor by passing the 
configuration:
+
+
+
+{% highlight java %}
+import org.apache.flink.api.common.state.StateTtlConfig;
+import org.apache.flink.api.common.state.ValueStateDescriptor;
+import org.apache.flink.api.common.time.Time;
+
+StateTtlConfig ttlConfig = StateTtlConfig
+.newBuilder(Time.seconds(1))
+.setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)
+.setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
+.build();
+
+ValueStateDescriptor stateDescriptor = new 
ValueStateDescriptor<>("text state", String.class);
+stateDescriptor.enableTimeToLive(ttlConfig);
+{% endhighlight %}
+
+
+
+{% highlight scala %}
+import org.apache.flink.api.common.state.StateTtlConfig
+import org.apache.flink.api.common.state.ValueStateDescriptor
+import org.apache.flink.api.common.time.Time
+
+val ttlConfig = StateTtlConfig
+.newBuilder(Time.seconds(1))
+.setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)
+.setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
+.build
+
+val stateDescriptor = new ValueStateDescriptor[String]("text state", 
classOf[String])
+stateDescriptor.enableTimeToLive(ttlConfig)
+{% endhighlight %}
+
+
+
+The configuration has several options to consider:
+
+The first parameter of the `newBuilder` method is mandatory, it is the 
time-to-live value.
+
+The update type configures when the state TTL is refreshed (by default 
`OnCreateAndWrite`):
+
+ - `StateTtlConfig.UpdateType.OnCreateAndWrite` - only on creation and write 
access
+ - `StateTtlConfig.UpdateType.OnReadAndWrite` - also on read access
+ 
+The state visibility configures whether the expired value is returned on read 
access 
+if it is not cleaned up yet (by default `NeverReturnExpired`):
+
+ - `StateTtlConfig.StateVisibility.NeverReturnExpired` - expired value is 
never returned
+ - `StateTtlConfig.StateVisibility.ReturnExpiredIfNotCleanedUp` - returned if 
still available
+ 
+In case of `NeverReturnExpired`, the expired state behaves as if it does not 
exist anymore, 
+even if it still has to be removed. The option can be useful for use cases 
+where data has to become unavailable for read access strictly after TTL, 
+e.g. application working with privacy sensitive data.
+ 
+Another option `ReturnExpiredIfNotCleanedUp` allows to return the expired 
state before its cleanup.
+
+**Notes:** 
+
+- The state backends store the timestamp of the last modification along with 
the user value, 
+which means that enabling this feature increases consumption of state storage. 
+Heap state backend stores an additional Java object with a reference to the 
user state object 
+and a primitive long value in memory. The RocksDB state backend adds 8 bytes 
per stored value, list entry or map entry.
+
+- Only TTLs in reference to *processing time* are currently supported.
+
+- Trying to restore state, which was previously configured without TTL, using 
TTL enabled descriptor or vice versa
+will lead to compatibility failure and `StateMigrationException`.
+
+- The TTL configuration is not part of check- or savepoints but rather a way 
of how Flink treats it in the currently running job.
+
+ Cleanup of Expired State
+
+Currently, expired values are only removed when th

[flink] 01/02: [FLINK-9853] [table] Add HEX support for Table API & SQL

2018-08-14 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit ab1d1dfb2ad872748833896d552e2d56a26a9a92
Author: xueyu <278006...@qq.com>
AuthorDate: Sun Jul 15 20:01:15 2018 +0800

[FLINK-9853] [table] Add HEX support for Table API & SQL

This closes #6337.
---
 docs/dev/table/functions.md| 39 +
 .../flink/table/api/scala/expressionDsl.scala  |  9 +++
 .../flink/table/codegen/calls/BuiltInMethods.scala |  4 +
 .../table/codegen/calls/FunctionGenerator.scala| 12 +++
 .../flink/table/expressions/mathExpressions.scala  | 18 +
 .../table/functions/sql/ScalarSqlFunctions.scala   |  9 +++
 .../table/runtime/functions/ScalarFunctions.scala  | 16 +++-
 .../flink/table/validate/FunctionCatalog.scala |  2 +
 .../table/expressions/ScalarFunctionsTest.scala| 93 ++
 9 files changed, 199 insertions(+), 3 deletions(-)

diff --git a/docs/dev/table/functions.md b/docs/dev/table/functions.md
index 00a9605..f637b4d 100644
--- a/docs/dev/table/functions.md
+++ b/docs/dev/table/functions.md
@@ -1393,6 +1393,19 @@ BIN(integer)
 E.g. BIN(4) returns '100' and BIN(12) 
returns '1100'.
   
 
+
+
+  
+{% highlight text %}
+HEX(numeric)
+HEX(string)
+  {% endhighlight %}
+  
+  
+Returns a string representation of an integer numeric value 
or a string in hex format. Returns NULL if the argument is NULL.
+E.g. a numeric 20 leads to "14", a numeric 100 leads to "64", a 
string "hello,world" leads to "68656C6C6F2C776F726C64".
+  
+
   
 
 
@@ -1805,6 +1818,19 @@ INTEGER.bin()
   E.g., 4.bin() returns "100" and 12.bin() 
returns "1100".
 

+
+
+  
+   {% highlight java %}
+NUMERIC.hex()
+STRING.hex()
+{% endhighlight %}
+ 
+
+  Returns a string representation of an integer NUMERIC value or 
a STRING in hex format. Returns NULL if the argument is NULL.
+  E.g. a numeric 20 leads to "14", a numeric 100 leads to "64", a 
string "hello,world" leads to "68656C6C6F2C776F726C64".
+
+   
   
 
 
@@ -2217,6 +2243,19 @@ INTEGER.bin()
   E.g., 4.bin() returns "100" and 12.bin() 
returns "1100".
 

+
+
+  
+   {% highlight scala %}
+NUMERIC.hex()
+STRING.hex()
+{% endhighlight %}
+ 
+
+  Returns a string representation of an integer NUMERIC value or 
a STRING in hex format. Returns NULL if the argument is NULL.
+  E.g. a numeric 20 leads to "14", a numeric 100 leads to "64", a 
string "hello,world" leads to "68656C6C6F2C776F726C64".
+
+   
   
 
 
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
index d1bb06c..66e7544 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
@@ -406,6 +406,15 @@ trait ImplicitExpressionOperations {
 */
   def bin() = Bin(expr)
 
+  /**
+* Returns a string representation of an integer numeric value or a string 
in hex format. Returns
+* null if numeric or string is null.
+*
+* E.g. a numeric 20 leads to "14", a numeric 100 leads to "64", and a 
string "hello,world" leads
+* to "68656c6c6f2c776f726c64".
+*/
+  def hex() = Hex(expr)
+
   // String operations
 
   /**
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
index f5ed9b3..942666a 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
@@ -17,6 +17,7 @@
  */
 package org.apache.flink.table.codegen.calls
 
+import java.lang.reflect.Method
 import java.lang.{Long => JLong}
 import java.math.{BigDecimal => JBigDecimal}
 
@@ -135,4 +136,7 @@ object BuiltInMethods {
   val FROMBASE64 = Types.lookupMethod(classOf[ScalarFunctions], "fromBase64", 
classOf[String])
 
   val TOBASE64 = Types.lookupMethod(classOf[ScalarFunctions], "toBase64", 
classOf[String])
+
+  val HEX_LONG: Method = Types.lookupMethod(classOf[ScalarFunctions], "hex", 
classOf[Long])
+  val HEX_STRING: Method = Types.lookupMethod(classOf[ScalarFunctions], "hex", 
classOf[String])
 }
diff --git 
a/flink-libraries/fli

[flink] 02/02: [hotfix] [table] [docs] Improvements for the functions documentation

2018-08-14 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 2f1689477a8b5de898e834deb5865ff68b3032a3
Author: Timo Walther 
AuthorDate: Tue Aug 14 11:39:28 2018 +0200

[hotfix] [table] [docs] Improvements for the functions documentation
---
 docs/dev/table/functions.md | 135 +---
 docs/dev/table/sql.md   | 119 ++
 docs/dev/table/tableApi.md  |   7 ++-
 3 files changed, 139 insertions(+), 122 deletions(-)

diff --git a/docs/dev/table/functions.md b/docs/dev/table/functions.md
index f637b4d..cf42bfa 100644
--- a/docs/dev/table/functions.md
+++ b/docs/dev/table/functions.md
@@ -1,7 +1,7 @@
 ---
 title: "Built-In Functions"
 nav-parent_id: tableapi
-nav-pos: 45
+nav-pos: 31
 ---
 

[flink] branch master updated (047a85b -> 2f16894)

2018-08-14 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git.


from 047a85b  Fix Javadoc links in documentation
 new ab1d1df  [FLINK-9853] [table] Add HEX support for Table API & SQL
 new 2f16894  [hotfix] [table] [docs] Improvements for the functions 
documentation

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 docs/dev/table/functions.md| 174 -
 docs/dev/table/sql.md  | 119 +-
 docs/dev/table/tableApi.md |   7 +-
 .../flink/table/api/scala/expressionDsl.scala  |   9 ++
 .../flink/table/codegen/calls/BuiltInMethods.scala |   4 +
 .../table/codegen/calls/FunctionGenerator.scala|  12 ++
 .../flink/table/expressions/mathExpressions.scala  |  18 +++
 .../table/functions/sql/ScalarSqlFunctions.scala   |   9 ++
 .../table/runtime/functions/ScalarFunctions.scala  |  16 +-
 .../flink/table/validate/FunctionCatalog.scala |   2 +
 .../table/expressions/ScalarFunctionsTest.scala|  93 +++
 11 files changed, 338 insertions(+), 125 deletions(-)



[flink] branch master updated: [FLINK-7205] [table] Add UUID() for Table API & SQL

2018-08-14 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 913b041  [FLINK-7205] [table] Add UUID() for Table API & SQL
913b041 is described below

commit 913b0413882939c30da4ad4df0cabc84dfe69ea0
Author: wind 
AuthorDate: Wed Aug 1 12:52:34 2018 +0800

[FLINK-7205] [table] Add UUID() for Table API & SQL

This closes #6381.
---
 docs/dev/table/functions.md| 33 ++
 .../flink/table/api/scala/expressionDsl.scala  | 19 +
 .../flink/table/codegen/calls/BuiltInMethods.scala |  2 ++
 .../table/codegen/calls/FunctionGenerator.scala|  6 
 .../flink/table/expressions/mathExpressions.scala  | 11 
 .../table/functions/sql/ScalarSqlFunctions.scala   | 11 
 .../table/runtime/functions/ScalarFunctions.scala  |  5 
 .../flink/table/validate/FunctionCatalog.scala |  3 ++
 .../table/expressions/NonDeterministicTests.scala  |  9 ++
 .../table/expressions/ScalarFunctionsTest.scala| 33 ++
 10 files changed, 132 insertions(+)

diff --git a/docs/dev/table/functions.md b/docs/dev/table/functions.md
index cf42bfa..eac6d16 100644
--- a/docs/dev/table/functions.md
+++ b/docs/dev/table/functions.md
@@ -1382,6 +1382,17 @@ RAND_INTEGER(integer1, integer2)
   Returns a pseudorandom integer value between 0 (inclusive) and the 
specified value (exclusive) with an initial seed. Two RAND_INTEGER functions 
will return identical sequences of numbers if they have the same initial seed 
and bound.
 

+
+
+ 
+   {% highlight text %}
+UUID()
+{% endhighlight %}
+ 
+
+  Returns an UUID (Universally Unique Identifier) string (e.g., 
"3d3c68f7-f608-473f-b60c-b0c44ad4cc4e") according to RFC 4122 type 4 (pseudo 
randomly generated) UUID. The UUID is generated using a cryptographically 
strong pseudo random number generator.
+
+   
 
 
   
@@ -1811,6 +1822,17 @@ randInteger(INTEGER1, INTEGER2)
 
  
{% highlight java %}
+uuid()
+{% endhighlight %}
+ 
+
+  Returns an UUID (Universally Unique Identifier) string (e.g., 
"3d3c68f7-f608-473f-b60c-b0c44ad4cc4e") according to RFC 4122 type 4 (pseudo 
randomly generated) UUID. The UUID is generated using a cryptographically 
strong pseudo random number generator.
+
+   
+
+
+ 
+   {% highlight java %}
 INTEGER.bin()
 {% endhighlight %}
  
@@ -2236,6 +2258,17 @@ randInteger(INTEGER1, INTEGER2)
 
  
{% highlight scala %}
+uuid()
+{% endhighlight %}
+ 
+
+  Returns an UUID (Universally Unique Identifier) string (e.g., 
"3d3c68f7-f608-473f-b60c-b0c44ad4cc4e") according to RFC 4122 type 4 (pseudo 
randomly generated) UUID. The UUID is generated using a cryptographically 
strong pseudo random number generator.
+
+   
+
+
+ 
+   {% highlight scala %}
 INTEGER.bin()
 {% endhighlight %}
  
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
index 66e7544..dfe69cb 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
@@ -1256,4 +1256,23 @@ object concat_ws {
   }
 }
 
+/**
+  * Returns an UUID (Universally Unique Identifier) string (e.g.,
+  * "3d3c68f7-f608-473f-b60c-b0c44ad4cc4e") according to RFC 4122 type 4 
(pseudo randomly
+  * generated) UUID. The UUID is generated using a cryptographically strong 
pseudo random number
+  * generator.
+  */
+object uuid {
+
+  /**
+* Returns an UUID (Universally Unique Identifier) string (e.g.,
+* "3d3c68f7-f608-473f-b60c-b0c44ad4cc4e") according to RFC 4122 type 4 
(pseudo randomly
+* generated) UUID. The UUID is generated using a cryptographically strong 
pseudo random number
+* generator.
+*/
+  def apply(): Expression = {
+UUID()
+  }
+}
+
 // scalastyle:on object.name
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
index 942666a..7eb91d3 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/calls/BuiltInMethods.scala
@@ -139,4 +139,6 @@ object BuiltInMethods {
 
   val HEX_LONG: Method = Types.lookupMethod(classOf[ScalarFunctions], "hex", 
classOf[Long])
   val HEX_STRING: Method = Types.lookupMet

[flink] branch master updated: [hotfix] [docs] Fix typo in Elasticsearch example

2018-08-15 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new e91b2d5  [hotfix] [docs] Fix typo in Elasticsearch example
e91b2d5 is described below

commit e91b2d579085315e4f7b09f7a0c25886b5acc406
Author: Timo Walther 
AuthorDate: Wed Aug 15 13:54:03 2018 +0200

[hotfix] [docs] Fix typo in Elasticsearch example
---
 docs/dev/connectors/elasticsearch.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/dev/connectors/elasticsearch.md 
b/docs/dev/connectors/elasticsearch.md
index bafe391..927bb69 100644
--- a/docs/dev/connectors/elasticsearch.md
+++ b/docs/dev/connectors/elasticsearch.md
@@ -223,7 +223,7 @@ ElasticsearchSink.Builder esSinkBuilder = new 
ElasticsearchSink.Builder<
 builder.setBulkFlushMaxActions(1);
 
 // provide a RestClientFactory for custom configuration on the internally 
created REST client
-builder.setRestClientBuilder(
+builder.setRestClientFactory(
   restClientBuilder -> {
 restClientBuilder.setDefaultHeaders(...)
 restClientBuilder.setMaxRetryTimeoutMillis(...)
@@ -360,7 +360,7 @@ val esSinkBuilder = new ElasticsearchSink.Builer[String](
 builder.setBulkFlushMaxActions(1)
 
 // provide a RestClientFactory for custom configuration on the internally 
created REST client
-builder.setRestClientBuilder(
+builder.setRestClientFactory(
   restClientBuilder -> {
 restClientBuilder.setDefaultHeaders(...)
 restClientBuilder.setMaxRetryTimeoutMillis(...)



[flink] branch release-1.6 updated: [hotfix] [docs] Fix typo in Elasticsearch example

2018-08-15 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 0b1e4f3  [hotfix] [docs] Fix typo in Elasticsearch example
0b1e4f3 is described below

commit 0b1e4f3289d876cdb32395c6696c9340cfabb0b9
Author: Timo Walther 
AuthorDate: Wed Aug 15 13:54:03 2018 +0200

[hotfix] [docs] Fix typo in Elasticsearch example
---
 docs/dev/connectors/elasticsearch.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/dev/connectors/elasticsearch.md 
b/docs/dev/connectors/elasticsearch.md
index bafe391..927bb69 100644
--- a/docs/dev/connectors/elasticsearch.md
+++ b/docs/dev/connectors/elasticsearch.md
@@ -223,7 +223,7 @@ ElasticsearchSink.Builder esSinkBuilder = new 
ElasticsearchSink.Builder<
 builder.setBulkFlushMaxActions(1);
 
 // provide a RestClientFactory for custom configuration on the internally 
created REST client
-builder.setRestClientBuilder(
+builder.setRestClientFactory(
   restClientBuilder -> {
 restClientBuilder.setDefaultHeaders(...)
 restClientBuilder.setMaxRetryTimeoutMillis(...)
@@ -360,7 +360,7 @@ val esSinkBuilder = new ElasticsearchSink.Builer[String](
 builder.setBulkFlushMaxActions(1)
 
 // provide a RestClientFactory for custom configuration on the internally 
created REST client
-builder.setRestClientBuilder(
+builder.setRestClientFactory(
   restClientBuilder -> {
 restClientBuilder.setDefaultHeaders(...)
 restClientBuilder.setMaxRetryTimeoutMillis(...)



[flink] branch master updated: [FLINK-10169] [table] Fix error in RowtimeValidator when getting custom TimestampExtractor

2018-08-18 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 9fe70f2  [FLINK-10169] [table] Fix error in RowtimeValidator when 
getting custom TimestampExtractor
9fe70f2 is described below

commit 9fe70f2f4a0ed46df8ff7f75289ea81fd2f5a0f7
Author: jrthe42 
AuthorDate: Fri Aug 17 15:59:05 2018 +0800

[FLINK-10169] [table] Fix error in RowtimeValidator when getting custom 
TimestampExtractor

This closes #6575.
---
 .../flink/table/descriptors/RowtimeValidator.scala |  2 +-
 .../flink/table/descriptors/RowtimeTest.scala  | 59 --
 .../table/descriptors/SchemaValidatorTest.scala| 22 +++-
 3 files changed, 77 insertions(+), 6 deletions(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/RowtimeValidator.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/RowtimeValidator.scala
index 160347e..4bd51c0 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/RowtimeValidator.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/RowtimeValidator.scala
@@ -177,7 +177,7 @@ object RowtimeValidator {
 
   case ROWTIME_TIMESTAMPS_TYPE_VALUE_CUSTOM =>
 val clazz = properties.getClass(
-  ROWTIME_TIMESTAMPS_CLASS,
+  prefix + ROWTIME_TIMESTAMPS_CLASS,
   classOf[TimestampExtractor])
 DescriptorProperties.deserialize(
   properties.getString(prefix + ROWTIME_TIMESTAMPS_SERIALIZED),
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/RowtimeTest.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/RowtimeTest.scala
index d5930fa..199d416 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/RowtimeTest.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/RowtimeTest.scala
@@ -20,9 +20,12 @@ package org.apache.flink.table.descriptors
 
 import java.util
 
+import org.apache.flink.api.common.typeinfo.TypeInformation
 import org.apache.flink.streaming.api.watermark.Watermark
-import org.apache.flink.table.api.ValidationException
-import org.apache.flink.table.descriptors.RowtimeTest.CustomAssigner
+import org.apache.flink.table.api.{Types, ValidationException}
+import org.apache.flink.table.descriptors.RowtimeTest.{CustomAssigner, 
CustomExtractor}
+import org.apache.flink.table.expressions.{Cast, Expression, 
ResolvedFieldReference}
+import org.apache.flink.table.sources.tsextractors.TimestampExtractor
 import org.apache.flink.table.sources.wmstrategies.PunctuatedWatermarkAssigner
 import org.apache.flink.types.Row
 import org.junit.Test
@@ -57,7 +60,11 @@ class RowtimeTest extends DescriptorTestBase {
   .timestampsFromSource()
   .watermarksFromStrategy(new CustomAssigner())
 
-util.Arrays.asList(desc1, desc2)
+val desc3 = Rowtime()
+  .timestampsFromExtractor(new CustomExtractor("tsField"))
+  .watermarksPeriodicBounded(1000L)
+
+util.Arrays.asList(desc1, desc2, desc3)
   }
 
   override def validator(): DescriptorValidator = {
@@ -83,7 +90,19 @@ class RowtimeTest extends DescriptorTestBase {
 "F0ZWd5mB_uSxDZ8-MCAAB4cA")
 )
 
-util.Arrays.asList(props1.asJava, props2.asJava)
+val props3 = Map(
+  "rowtime.timestamps.type" -> "custom",
+  "rowtime.timestamps.class" -> ("org.apache.flink.table.descriptors." +
+"RowtimeTest$CustomExtractor"),
+  "rowtime.timestamps.serialized" -> 
("rO0ABXNyAD5vcmcuYXBhY2hlLmZsaW5rLnRhYmxlLmRlc2NyaXB0b3" +
+
"JzLlJvd3RpbWVUZXN0JEN1c3RvbUV4dHJhY3RvcoaChjMg55xwAgABTAAFZmllbGR0ABJMamF2YS9sYW5nL1N0cm"
 +
+
"luZzt4cgA-b3JnLmFwYWNoZS5mbGluay50YWJsZS5zb3VyY2VzLnRzZXh0cmFjdG9ycy5UaW1lc3RhbXBFeHRyYW"
 +
+"N0b3LU8E2thK4wMQIAAHhwdAAHdHNGaWVsZA"),
+  "rowtime.watermarks.type" -> "periodic-bounded",
+  "rowtime.watermarks.delay" -> "1000"
+)
+
+util.Arrays.asList(props1.asJava, props2.asJava, props3.asJava)
   }
 }
 
@@ -93,4 +112,36 @@ object RowtimeTest {
 override def getWatermark(row: Row, timestamp: Long): Watermark =
   throw new UnsupportedOperationException()
   }
+
+  class CustomExtractor(val field: String) extends TimestampExtractor {
+def this() = {
+  this("ts")
+}
+
+override def getArgumentFields: Array[String] = Array(field)
+
+override def validateArgumentFields(argumentFieldTypes: 
Array[TypeInformation[_]]): Unit = {
+  argumentFieldTypes(0) match {
+case Types.SQL_TIMESTAMP =&g

[flink] branch release-1.6 updated: [FLINK-10169] [table] Fix error in RowtimeValidator when getting custom TimestampExtractor

2018-08-18 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 50c0fb8  [FLINK-10169] [table] Fix error in RowtimeValidator when 
getting custom TimestampExtractor
50c0fb8 is described below

commit 50c0fb8d22cc8e9b58bfda1cb4381f7b3e34a901
Author: jrthe42 
AuthorDate: Fri Aug 17 15:59:05 2018 +0800

[FLINK-10169] [table] Fix error in RowtimeValidator when getting custom 
TimestampExtractor

This closes #6575.
---
 .../flink/table/descriptors/RowtimeValidator.scala |  2 +-
 .../flink/table/descriptors/RowtimeTest.scala  | 59 --
 .../table/descriptors/SchemaValidatorTest.scala| 22 +++-
 3 files changed, 77 insertions(+), 6 deletions(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/RowtimeValidator.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/RowtimeValidator.scala
index 160347e..4bd51c0 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/RowtimeValidator.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/RowtimeValidator.scala
@@ -177,7 +177,7 @@ object RowtimeValidator {
 
   case ROWTIME_TIMESTAMPS_TYPE_VALUE_CUSTOM =>
 val clazz = properties.getClass(
-  ROWTIME_TIMESTAMPS_CLASS,
+  prefix + ROWTIME_TIMESTAMPS_CLASS,
   classOf[TimestampExtractor])
 DescriptorProperties.deserialize(
   properties.getString(prefix + ROWTIME_TIMESTAMPS_SERIALIZED),
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/RowtimeTest.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/RowtimeTest.scala
index d5930fa..199d416 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/RowtimeTest.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/RowtimeTest.scala
@@ -20,9 +20,12 @@ package org.apache.flink.table.descriptors
 
 import java.util
 
+import org.apache.flink.api.common.typeinfo.TypeInformation
 import org.apache.flink.streaming.api.watermark.Watermark
-import org.apache.flink.table.api.ValidationException
-import org.apache.flink.table.descriptors.RowtimeTest.CustomAssigner
+import org.apache.flink.table.api.{Types, ValidationException}
+import org.apache.flink.table.descriptors.RowtimeTest.{CustomAssigner, 
CustomExtractor}
+import org.apache.flink.table.expressions.{Cast, Expression, 
ResolvedFieldReference}
+import org.apache.flink.table.sources.tsextractors.TimestampExtractor
 import org.apache.flink.table.sources.wmstrategies.PunctuatedWatermarkAssigner
 import org.apache.flink.types.Row
 import org.junit.Test
@@ -57,7 +60,11 @@ class RowtimeTest extends DescriptorTestBase {
   .timestampsFromSource()
   .watermarksFromStrategy(new CustomAssigner())
 
-util.Arrays.asList(desc1, desc2)
+val desc3 = Rowtime()
+  .timestampsFromExtractor(new CustomExtractor("tsField"))
+  .watermarksPeriodicBounded(1000L)
+
+util.Arrays.asList(desc1, desc2, desc3)
   }
 
   override def validator(): DescriptorValidator = {
@@ -83,7 +90,19 @@ class RowtimeTest extends DescriptorTestBase {
 "F0ZWd5mB_uSxDZ8-MCAAB4cA")
 )
 
-util.Arrays.asList(props1.asJava, props2.asJava)
+val props3 = Map(
+  "rowtime.timestamps.type" -> "custom",
+  "rowtime.timestamps.class" -> ("org.apache.flink.table.descriptors." +
+"RowtimeTest$CustomExtractor"),
+  "rowtime.timestamps.serialized" -> 
("rO0ABXNyAD5vcmcuYXBhY2hlLmZsaW5rLnRhYmxlLmRlc2NyaXB0b3" +
+
"JzLlJvd3RpbWVUZXN0JEN1c3RvbUV4dHJhY3RvcoaChjMg55xwAgABTAAFZmllbGR0ABJMamF2YS9sYW5nL1N0cm"
 +
+
"luZzt4cgA-b3JnLmFwYWNoZS5mbGluay50YWJsZS5zb3VyY2VzLnRzZXh0cmFjdG9ycy5UaW1lc3RhbXBFeHRyYW"
 +
+"N0b3LU8E2thK4wMQIAAHhwdAAHdHNGaWVsZA"),
+  "rowtime.watermarks.type" -> "periodic-bounded",
+  "rowtime.watermarks.delay" -> "1000"
+)
+
+util.Arrays.asList(props1.asJava, props2.asJava, props3.asJava)
   }
 }
 
@@ -93,4 +112,36 @@ object RowtimeTest {
 override def getWatermark(row: Row, timestamp: Long): Watermark =
   throw new UnsupportedOperationException()
   }
+
+  class CustomExtractor(val field: String) extends TimestampExtractor {
+def this() = {
+  this("ts")
+}
+
+override def getArgumentFields: Array[String] = Array(field)
+
+override def validateArgumentFields(argumentFieldTypes: 
Array[TypeInformation[_]]): Unit = {
+  argumentFieldTypes(0) matc

[flink] branch master updated: [hotfix][docs] Add missing to metrics page

2018-08-23 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 1de8600  [hotfix][docs] Add missing  to metrics page
1de8600 is described below

commit 1de86006b0bc9bfc4c6bbc44910d0852b3c6f91c
Author: David Anderson 
AuthorDate: Thu Aug 23 19:13:45 2018 +0200

[hotfix][docs] Add missing  to metrics page

This closes #6609.
---
 docs/monitoring/metrics.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/docs/monitoring/metrics.md b/docs/monitoring/metrics.md
index a93853c..7d88a36 100644
--- a/docs/monitoring/metrics.md
+++ b/docs/monitoring/metrics.md
@@ -1486,6 +1486,7 @@ Thus, in order to infer the metric identifier:
   bytesRequestedPerFetch
   stream, shardId
   The bytes requested (2 Mbps / loopFrequencyHz) in a single call to 
getRecords.
+  
   Gauge
 
   



[flink] branch release-1.6 updated: [hotfix][docs] Add missing to metrics page

2018-08-23 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new dd76ede  [hotfix][docs] Add missing  to metrics page
dd76ede is described below

commit dd76ede4e1a4126c7c547eeba650d12b2df4d836
Author: David Anderson 
AuthorDate: Thu Aug 23 19:13:45 2018 +0200

[hotfix][docs] Add missing  to metrics page

This closes #6609.
---
 docs/monitoring/metrics.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/docs/monitoring/metrics.md b/docs/monitoring/metrics.md
index 89a524c..d785258 100644
--- a/docs/monitoring/metrics.md
+++ b/docs/monitoring/metrics.md
@@ -1487,6 +1487,7 @@ Thus, in order to infer the metric identifier:
   bytesRequestedPerFetch
   stream, shardId
   The bytes requested (2 Mbps / loopFrequencyHz) in a single call to 
getRecords.
+  
   Gauge
 
   



[flink] branch master updated: [FLINK-10163] [sql-client] Support views in SQL Client

2018-08-28 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 669eb5f  [FLINK-10163] [sql-client] Support views in SQL Client
669eb5f is described below

commit 669eb5f7c3bb877ede08960d58279d4bc5503b0d
Author: Timo Walther 
AuthorDate: Thu Aug 23 07:03:54 2018 +0200

[FLINK-10163] [sql-client] Support views in SQL Client

Adds initial support for views in SQL Client. It adds the following 
statements:

CREATE VIEW: Creates a virtual table from a SQL query.
SHOW VIEW: Describes a previously created virtual table.
DROP VIEW: Deletes a previously created virtual table.

It also adds the section 'views' to environment files.

This closes #6606.
---
 docs/dev/table/sqlClient.md|  51 +-
 .../flink-sql-client/conf/sql-client-defaults.yaml |  13 +-
 .../org/apache/flink/table/client/SqlClient.java   |  17 +-
 .../apache/flink/table/client/cli/CliClient.java   | 148 +++-
 .../apache/flink/table/client/cli/CliStrings.java  |  17 +-
 .../flink/table/client/cli/SqlCommandParser.java   | 190 ++---
 .../flink/table/client/config/ConfigUtil.java  |  16 ++
 .../flink/table/client/config/Environment.java |  63 +--
 .../table/client/config/UserDefinedFunction.java   |  17 +-
 .../flink/table/client/gateway/Executor.java   |   5 +
 .../flink/table/client/gateway/SessionContext.java |  37 +++-
 .../client/gateway/local/ExecutionContext.java |  13 ++
 .../table/client/gateway/local/LocalExecutor.java  |   6 +
 .../flink/table/client/cli/CliClientTest.java  |   5 +
 .../table/client/cli/SqlCommandParserTest.java |  93 ++
 .../client/gateway/local/ExecutionContextTest.java |   9 +-
 .../client/gateway/local/LocalExecutorITCase.java  |  65 ++-
 .../test/resources/test-sql-client-defaults.yaml   |   6 +
 18 files changed, 622 insertions(+), 149 deletions(-)

diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index 0e8d2d6..f16f1a5 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -162,7 +162,7 @@ A SQL query needs a configuration environment in which it 
is executed. The so-ca
 Every environment file is a regular [YAML file](http://yaml.org/). An example 
of such a file is presented below.
 
 {% highlight yaml %}
-# Define table sources here.
+# Define table sources and sinks here.
 
 tables:
   - name: MyTableSource
@@ -185,6 +185,12 @@ tables:
   - name: MyField2
 type: VARCHAR
 
+# Define table views here.
+
+views:
+  - name: MyCustomView
+query: "SELECT MyField2 FROM MyTableSource"
+
 # Define user-defined functions here.
 
 functions:
@@ -217,7 +223,8 @@ deployment:
 
 This configuration:
 
-- defines an environment with a table source `MyTableName` that reads from a 
CSV file,
+- defines an environment with a table source `MyTableSource` that reads from a 
CSV file,
+- defines a view `MyCustomView` that declares a virtual table using a SQL 
query,
 - defines a user-defined function `myUDF` that can be instantiated using the 
class name and two constructor parameters,
 - specifies a parallelism of 1 for queries executed in this streaming 
environment,
 - specifies an event-time characteristic, and
@@ -404,7 +411,7 @@ This process can be recursively performed until all the 
constructor parameters a
 {% top %}
 
 Detached SQL Queries
-
+
 
 In order to define end-to-end SQL pipelines, SQL's `INSERT INTO` statement can 
be used for submitting long-running, detached queries to a Flink cluster. These 
queries produce their results into an external system instead of the SQL 
Client. This allows for dealing with higher parallelism and larger amounts of 
data. The CLI itself does not have any control over a detached query after 
submission.
 
@@ -459,6 +466,44 @@ Web interface: http://localhost:8081
 
 {% top %}
 
+SQL Views
+-
+
+Views allow to define virtual tables from SQL queries. The view definition is 
parsed and validated immediately. However, the actual execution happens when 
the view is accessed during the submission of a general `INSERT INTO` or 
`SELECT` statement.
+
+Views can either be defined in [environment 
files](sqlClient.html#environment-files) or within the CLI session.
+
+The following example shows how to define multiple views in a file:
+
+{% highlight yaml %}
+views:
+  - name: MyRestrictedView
+query: "SELECT MyField2 FROM MyTableSource"
+  - name: MyComplexView
+query: >
+  SELECT MyField2 + 42, CAST(MyField1 AS VARCHAR)
+  FROM MyTableSource
+  WHERE MyField2 > 200
+{% endhighlight %}
+
+Similar to table sources and sinks, views defined in a session environment 
file have highest precendence.
+
+Views can a

[flink] branch master updated: [FLINK-10192] [sql-client] Fix SQL Client table visualization mode

2018-08-28 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new d78bb60  [FLINK-10192] [sql-client] Fix SQL Client table visualization 
mode
d78bb60 is described below

commit d78bb60715044077eb4267ad4b171616e94d90e3
Author: Timo Walther 
AuthorDate: Fri Aug 24 12:13:45 2018 +0200

[FLINK-10192] [sql-client] Fix SQL Client table visualization mode

Fixes the wrong materialization for the debugging visualization
in table mode. Reworks the caching mechanism in 
MaterializedCollectStreamResult.

This closes #6617.
---
 .../flink/table/client/gateway/TypedResult.java|  19 
 .../result/MaterializedCollectStreamResult.java|  51 +
 .../MaterializedCollectStreamResultTest.java   | 114 +
 3 files changed, 162 insertions(+), 22 deletions(-)

diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/TypedResult.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/TypedResult.java
index ee4e8d3..6ef8ef3 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/TypedResult.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/TypedResult.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.table.client.gateway;
 
+import java.util.Objects;
+
 /**
  * Result with an attached type (actual payload, EOS, etc.).
  *
@@ -55,6 +57,23 @@ public class TypedResult {
return "TypedResult<" + type + ">";
}
 
+   @Override
+   public boolean equals(Object o) {
+   if (this == o) {
+   return true;
+   }
+   if (o == null || getClass() != o.getClass()) {
+   return false;
+   }
+   TypedResult that = (TypedResult) o;
+   return type == that.type && Objects.equals(payload, 
that.payload);
+   }
+
+   @Override
+   public int hashCode() {
+   return Objects.hash(type, payload);
+   }
+
// 

 
public static  TypedResult empty() {
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/result/MaterializedCollectStreamResult.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/result/MaterializedCollectStreamResult.java
index 7321bd0..45c4f75 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/result/MaterializedCollectStreamResult.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/result/MaterializedCollectStreamResult.java
@@ -39,10 +39,20 @@ import java.util.Map;
 public class MaterializedCollectStreamResult extends CollectStreamResult 
implements MaterializedResult {
 
private final List materializedTable;
-   private final Map> rowPositions; // positions of 
rows in table for faster access
+
+   /**
+* Caches the last row position for faster access. The position might 
not be exact (if rows
+* with smaller position are deleted) nor complete (for deletes of 
duplicates). However, the
+* cache narrows the search in the materialized table.
+*/
+   private final Map rowPositionCache;
+
private final List snapshot;
+
private int pageCount;
+
private int pageSize;
+
private boolean isLastSnapshot;
 
public MaterializedCollectStreamResult(TypeInformation outputType, 
ExecutionConfig config,
@@ -51,7 +61,7 @@ public class MaterializedCollectStreamResult extends 
CollectStreamResult i
 
// prepare for materialization
materializedTable = new ArrayList<>();
-   rowPositions = new HashMap<>();
+   rowPositionCache = new HashMap<>();
snapshot = new ArrayList<>();
isLastSnapshot = false;
pageCount = 0;
@@ -101,32 +111,29 @@ public class MaterializedCollectStreamResult extends 
CollectStreamResult i
 
@Override
protected void processRecord(Tuple2 change) {
-   // we track the position of rows for faster access and in order 
to return consistent
-   // snapshots where new rows are appended at the end
synchronized (resultLock) {
-   final List positions = 
rowPositions.get(change.f1);
-
+   final Row row = change.f1;
// insert
if (change.f0) {
- 

[flink] branch release-1.6 updated: [FLINK-10192] [sql-client] Fix SQL Client table visualization mode

2018-08-28 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new baa1a2a  [FLINK-10192] [sql-client] Fix SQL Client table visualization 
mode
baa1a2a is described below

commit baa1a2a8f79f907cc89033c7ccbbd675dae68839
Author: Timo Walther 
AuthorDate: Fri Aug 24 12:13:45 2018 +0200

[FLINK-10192] [sql-client] Fix SQL Client table visualization mode

Fixes the wrong materialization for the debugging visualization
in table mode. Reworks the caching mechanism in 
MaterializedCollectStreamResult.

This closes #6617.
---
 .../flink/table/client/gateway/TypedResult.java|  19 
 .../result/MaterializedCollectStreamResult.java|  51 +
 .../MaterializedCollectStreamResultTest.java   | 114 +
 3 files changed, 162 insertions(+), 22 deletions(-)

diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/TypedResult.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/TypedResult.java
index ee4e8d3..6ef8ef3 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/TypedResult.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/TypedResult.java
@@ -18,6 +18,8 @@
 
 package org.apache.flink.table.client.gateway;
 
+import java.util.Objects;
+
 /**
  * Result with an attached type (actual payload, EOS, etc.).
  *
@@ -55,6 +57,23 @@ public class TypedResult {
return "TypedResult<" + type + ">";
}
 
+   @Override
+   public boolean equals(Object o) {
+   if (this == o) {
+   return true;
+   }
+   if (o == null || getClass() != o.getClass()) {
+   return false;
+   }
+   TypedResult that = (TypedResult) o;
+   return type == that.type && Objects.equals(payload, 
that.payload);
+   }
+
+   @Override
+   public int hashCode() {
+   return Objects.hash(type, payload);
+   }
+
// 

 
public static  TypedResult empty() {
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/result/MaterializedCollectStreamResult.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/result/MaterializedCollectStreamResult.java
index 7321bd0..45c4f75 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/result/MaterializedCollectStreamResult.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/result/MaterializedCollectStreamResult.java
@@ -39,10 +39,20 @@ import java.util.Map;
 public class MaterializedCollectStreamResult extends CollectStreamResult 
implements MaterializedResult {
 
private final List materializedTable;
-   private final Map> rowPositions; // positions of 
rows in table for faster access
+
+   /**
+* Caches the last row position for faster access. The position might 
not be exact (if rows
+* with smaller position are deleted) nor complete (for deletes of 
duplicates). However, the
+* cache narrows the search in the materialized table.
+*/
+   private final Map rowPositionCache;
+
private final List snapshot;
+
private int pageCount;
+
private int pageSize;
+
private boolean isLastSnapshot;
 
public MaterializedCollectStreamResult(TypeInformation outputType, 
ExecutionConfig config,
@@ -51,7 +61,7 @@ public class MaterializedCollectStreamResult extends 
CollectStreamResult i
 
// prepare for materialization
materializedTable = new ArrayList<>();
-   rowPositions = new HashMap<>();
+   rowPositionCache = new HashMap<>();
snapshot = new ArrayList<>();
isLastSnapshot = false;
pageCount = 0;
@@ -101,32 +111,29 @@ public class MaterializedCollectStreamResult extends 
CollectStreamResult i
 
@Override
protected void processRecord(Tuple2 change) {
-   // we track the position of rows for faster access and in order 
to return consistent
-   // snapshots where new rows are appended at the end
synchronized (resultLock) {
-   final List positions = 
rowPositions.get(change.f1);
-
+   final Row row = change.f1;
// insert
if (change.f0) {
- 

[flink] branch master updated: [FLINK-8686] [sql-client] Limit result size for prototyping modes

2018-08-29 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 90ada01  [FLINK-8686] [sql-client] Limit result size for prototyping 
modes
90ada01 is described below

commit 90ada01a3ebd1039feb150c48771903aa8e98eab
Author: Timo Walther 
AuthorDate: Mon Aug 27 15:02:59 2018 +0200

[FLINK-8686] [sql-client] Limit result size for prototyping modes

This makes the SQL Client more robust by limiting the result size for
both changelog and table result modes. In changelog mode the
responsiveness of the CLI is the limiting factor. This adds a hard
limit for changelog mode. In table mode only the main memory is the
limiting factor and the configurable maximum row count. This adds a
configurable limit for table mode.

This closes #6621.
---
 docs/dev/table/sqlClient.md|   7 +-
 .../flink-sql-client/conf/sql-client-defaults.yaml |   4 +-
 .../table/client/cli/CliChangelogResultView.java   |  12 +-
 .../flink/table/client/cli/CliResultView.java  |  14 +-
 .../flink/table/client/config/Execution.java   |   4 +
 .../flink/table/client/config/PropertyStrings.java |   2 +
 .../table/client/gateway/local/ResultStore.java|   7 +-
 .../result/MaterializedCollectStreamResult.java| 170 ++---
 .../client/gateway/local/LocalExecutorITCase.java  |  73 ++---
 .../MaterializedCollectStreamResultTest.java   |  87 ++-
 .../test/resources/test-sql-client-defaults.yaml   |   1 +
 11 files changed, 319 insertions(+), 62 deletions(-)

diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index f16f1a5..8c4ba83 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -106,7 +106,7 @@ Alice, 1
 Greg, 1
 {% endhighlight %}
 
-Both result modes can be useful during the prototyping of SQL queries.
+Both result modes can be useful during the prototyping of SQL queries. In both 
modes, results are stored in the Java heap memory of the SQL Client. In order 
to keep the CLI interface responsive, the changelog mode only shows the latest 
1000 changes. The table mode allows for navigating through bigger results that 
are only limited by the available main memory and the configured [maximum 
number of rows](sqlClient.html#configuration) (`max-table-result-rows`).
 
 Attention Queries that are executed in 
a batch environment, can only be retrieved using the `table` result mode.
 
@@ -167,6 +167,7 @@ Every environment file is a regular [YAML 
file](http://yaml.org/). An example of
 tables:
   - name: MyTableSource
 type: source
+update-mode: append
 connector:
   type: filesystem
   path: "/path/to/something.csv"
@@ -206,6 +207,8 @@ functions:
 execution:
   type: streaming   # required: execution mode either 'batch' 
or 'streaming'
   result-mode: table# required: either 'table' or 'changelog'
+  max-table-result-rows: 100# optional: maximum number of maintained 
rows in
+#   'table' mode (100 by default, 
smaller 1 means unlimited)
   time-characteristic: event-time   # optional: 'processing-time' or 
'event-time' (default)
   parallelism: 1# optional: Flink's parallelism (1 by 
default)
   periodic-watermarks-interval: 200 # optional: interval for periodic 
watermarks (200 ms by default)
@@ -213,7 +216,7 @@ execution:
   min-idle-state-retention: 0   # optional: table program's minimum idle 
state time
   max-idle-state-retention: 0   # optional: table program's maximum idle 
state time
   restart-strategy: # optional: restart strategy
-type: fallback  #   "fallback" to global restart 
strategy by default
+type: fallback  #   "fallback" to global restart strategy 
by default
 
 # Deployment properties allow for describing the cluster to which table 
programs are submitted to.
 
diff --git a/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml 
b/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml
index 302651a..97e89fd 100644
--- a/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml
+++ b/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml
@@ -76,7 +76,9 @@ execution:
   # interval in ms for emitting periodic watermarks
   periodic-watermarks-interval: 200
   # 'changelog' or 'table' presentation of results
-  result-mode: changelog
+  result-mode: table
+  # maximum number of maintained rows in 'table' presentation of results
+  max-table-result-rows: 100
   # parallelism of the program
   parallelism: 1
   # maximum parallelism
diff --

[flink] branch release-1.6 updated: [FLINK-8686] [sql-client] Limit result size for prototyping modes

2018-08-29 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 0f80283  [FLINK-8686] [sql-client] Limit result size for prototyping 
modes
0f80283 is described below

commit 0f8028378fffceecebaa023e1b17753dbfa280bc
Author: Timo Walther 
AuthorDate: Mon Aug 27 15:02:59 2018 +0200

[FLINK-8686] [sql-client] Limit result size for prototyping modes

This makes the SQL Client more robust by limiting the result size for
both changelog and table result modes. In changelog mode the
responsiveness of the CLI is the limiting factor. This adds a hard
limit for changelog mode. In table mode only the main memory is the
limiting factor and the configurable maximum row count. This adds a
configurable limit for table mode.

This closes #6621.
---
 docs/dev/table/sqlClient.md|   6 +-
 .../flink-sql-client/conf/sql-client-defaults.yaml |   4 +-
 .../table/client/cli/CliChangelogResultView.java   |  12 +-
 .../flink/table/client/cli/CliResultView.java  |  14 +-
 .../flink/table/client/config/Execution.java   |   4 +
 .../flink/table/client/config/PropertyStrings.java |   2 +
 .../table/client/gateway/local/ResultStore.java|   7 +-
 .../result/MaterializedCollectStreamResult.java| 170 ++---
 .../client/gateway/local/LocalExecutorITCase.java  |  73 ++---
 .../MaterializedCollectStreamResultTest.java   |  87 ++-
 .../test/resources/test-sql-client-defaults.yaml   |   1 +
 11 files changed, 319 insertions(+), 61 deletions(-)

diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index d35aa59..ed36354 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -106,7 +106,7 @@ Alice, 1
 Greg, 1
 {% endhighlight %}
 
-Both result modes can be useful during the prototyping of SQL queries.
+Both result modes can be useful during the prototyping of SQL queries. In both 
modes, results are stored in the Java heap memory of the SQL Client. In order 
to keep the CLI interface responsive, the changelog mode only shows the latest 
1000 changes. The table mode allows for navigating through bigger results that 
are only limited by the available main memory and the configured [maximum 
number of rows](sqlClient.html#configuration) (`max-table-result-rows`).
 
 After a query is defined, it can be submitted to the cluster as a 
long-running, detached Flink job. For this, a target system that stores the 
results needs to be specified using the [INSERT INTO 
statement](sqlClient.html#detached-sql-queries). The [configuration 
section](sqlClient.html#configuration) explains how to declare table sources 
for reading data, how to declare table sinks for writing data, and how to 
configure other table program properties.
 
@@ -165,6 +165,7 @@ Every environment file is a regular [YAML 
file](http://yaml.org/). An example of
 tables:
   - name: MyTableSource
 type: source
+update-mode: append
 connector:
   type: filesystem
   path: "/path/to/something.csv"
@@ -198,6 +199,9 @@ functions:
 execution:
   type: streaming   # required: execution mode either 'batch' 
or 'streaming'
   result-mode: table# required: either 'table' or 'changelog'
+  max-table-result-rows: 100# optional: maximum number of maintained 
rows in
+#   'table' mode (100 by default, 
smaller 1 means unlimited)
+#   (from Flink 1.6.1)
   time-characteristic: event-time   # optional: 'processing-time' or 
'event-time' (default)
   parallelism: 1# optional: Flink's parallelism (1 by 
default)
   periodic-watermarks-interval: 200 # optional: interval for periodic 
watermarks (200 ms by default)
diff --git a/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml 
b/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml
index 51e6e95..6dc5485 100644
--- a/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml
+++ b/flink-libraries/flink-sql-client/conf/sql-client-defaults.yaml
@@ -65,7 +65,9 @@ execution:
   # interval in ms for emitting periodic watermarks
   periodic-watermarks-interval: 200
   # 'changelog' or 'table' presentation of results
-  result-mode: changelog
+  result-mode: table
+  # maximum number of maintained rows in 'table' presentation of results
+  max-table-result-rows: 100
   # parallelism of the program
   parallelism: 1
   # maximum parallelism
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliChangelogResultView.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink

[flink] branch master updated: [FLINK-10261] [table] Fix INSERT INTO with ORDER BY clause

2018-09-06 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new d036417  [FLINK-10261] [table] Fix INSERT INTO with ORDER BY clause
d036417 is described below

commit d036417985d3e2b1ca63909007db9710e842abf4
Author: xueyu <278006...@qq.com>
AuthorDate: Mon Sep 3 14:06:29 2018 +0800

[FLINK-10261] [table] Fix INSERT INTO with ORDER BY clause

This closes #6648.
---
 .../apache/flink/table/api/TableEnvironment.scala  |  4 +--
 .../stream/sql/validation/SortValidationTest.scala |  1 -
 .../table/runtime/stream/sql/SortITCase.scala  | 36 --
 .../table/utils/MemoryTableSourceSinkUtil.scala|  2 ++
 4 files changed, 38 insertions(+), 5 deletions(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
index 37f6d02..195812d 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
@@ -711,10 +711,10 @@ abstract class TableEnvironment(val config: TableConfig) {
   case insert: SqlInsert =>
 // validate the SQL query
 val query = insert.getSource
-planner.validate(query)
+val validatedQuery = planner.validate(query)
 
 // get query result as Table
-val queryResult = new Table(this, 
LogicalRelNode(planner.rel(query).rel))
+val queryResult = new Table(this, 
LogicalRelNode(planner.rel(validatedQuery).rel))
 
 // get name of sink table
 val targetTableName = 
insert.getTargetTable.asInstanceOf[SqlIdentifier].names.get(0)
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
index 083ed94..6c477fd 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
@@ -38,7 +38,6 @@ class SortValidationTest extends TableTestBase {
 streamUtil.verifySql(sqlQuery, "")
   }
 
-
   // test should fail because time is not the primary order field
   @Test(expected = classOf[TableException])
   def testSortProcessingTimeSecondaryField(): Unit = {
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
index 19db2a0..e7b79a5 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
@@ -18,15 +18,17 @@
 
 package org.apache.flink.table.runtime.stream.sql
 
+import org.apache.flink.api.common.typeinfo.TypeInformation
 import org.apache.flink.api.scala._
 import org.apache.flink.streaming.api.TimeCharacteristic
 import org.apache.flink.streaming.api.functions.sink.RichSinkFunction
 import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
-import org.apache.flink.table.api.TableEnvironment
+import org.apache.flink.table.api.{TableEnvironment, Types}
 import org.apache.flink.table.api.scala._
 import 
org.apache.flink.table.runtime.utils.TimeTestUtil.EventTimeSourceFunction
 import 
org.apache.flink.table.runtime.stream.sql.SortITCase.StringRowSelectorSink
-import org.apache.flink.table.runtime.utils.{StreamITCase, 
StreamingWithStateTestBase}
+import org.apache.flink.table.runtime.utils.{StreamITCase, StreamTestData, 
StreamingWithStateTestBase}
+import org.apache.flink.table.utils.MemoryTableSourceSinkUtil
 import org.apache.flink.types.Row
 import org.junit.Assert._
 import org.junit._
@@ -105,6 +107,36 @@ class SortITCase extends StreamingWithStateTestBase {
   "20")
 assertEquals(expected, SortITCase.testResults)
   }
+
+  @Test
+  def testInsertIntoMemoryTableOrderBy(): Unit = {
+val env = StreamExecutionEnvironment.getExecutionEnvironment
+env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
+val tEnv = TableEnvironment.getTableEnvironment(env)
+MemoryTableSourceSinkUtil.clear()
+
+val t = StreamTestData.getSmall3TupleDataStream(env)
+  .assignAscendingTimestamps(x => x._2)
+  .toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime)
+tEnv.registerTable("sourceTable", t)
+
+val fieldNames = Array("d

[flink] branch release-1.6 updated: [FLINK-10261] [table] Fix INSERT INTO with ORDER BY clause

2018-09-06 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new ddc2a98  [FLINK-10261] [table] Fix INSERT INTO with ORDER BY clause
ddc2a98 is described below

commit ddc2a987b07806ecaa748866353f34f1e3c5f0a6
Author: xueyu <278006...@qq.com>
AuthorDate: Mon Sep 3 14:06:29 2018 +0800

[FLINK-10261] [table] Fix INSERT INTO with ORDER BY clause

This closes #6648.
---
 .../apache/flink/table/api/TableEnvironment.scala  |  4 +--
 .../stream/sql/validation/SortValidationTest.scala |  1 -
 .../table/runtime/stream/sql/SortITCase.scala  | 36 --
 .../table/utils/MemoryTableSourceSinkUtil.scala|  2 ++
 4 files changed, 38 insertions(+), 5 deletions(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
index 37f6d02..195812d 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
@@ -711,10 +711,10 @@ abstract class TableEnvironment(val config: TableConfig) {
   case insert: SqlInsert =>
 // validate the SQL query
 val query = insert.getSource
-planner.validate(query)
+val validatedQuery = planner.validate(query)
 
 // get query result as Table
-val queryResult = new Table(this, 
LogicalRelNode(planner.rel(query).rel))
+val queryResult = new Table(this, 
LogicalRelNode(planner.rel(validatedQuery).rel))
 
 // get name of sink table
 val targetTableName = 
insert.getTargetTable.asInstanceOf[SqlIdentifier].names.get(0)
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
index 083ed94..6c477fd 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
@@ -38,7 +38,6 @@ class SortValidationTest extends TableTestBase {
 streamUtil.verifySql(sqlQuery, "")
   }
 
-
   // test should fail because time is not the primary order field
   @Test(expected = classOf[TableException])
   def testSortProcessingTimeSecondaryField(): Unit = {
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
index 19db2a0..e7b79a5 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
@@ -18,15 +18,17 @@
 
 package org.apache.flink.table.runtime.stream.sql
 
+import org.apache.flink.api.common.typeinfo.TypeInformation
 import org.apache.flink.api.scala._
 import org.apache.flink.streaming.api.TimeCharacteristic
 import org.apache.flink.streaming.api.functions.sink.RichSinkFunction
 import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
-import org.apache.flink.table.api.TableEnvironment
+import org.apache.flink.table.api.{TableEnvironment, Types}
 import org.apache.flink.table.api.scala._
 import 
org.apache.flink.table.runtime.utils.TimeTestUtil.EventTimeSourceFunction
 import 
org.apache.flink.table.runtime.stream.sql.SortITCase.StringRowSelectorSink
-import org.apache.flink.table.runtime.utils.{StreamITCase, 
StreamingWithStateTestBase}
+import org.apache.flink.table.runtime.utils.{StreamITCase, StreamTestData, 
StreamingWithStateTestBase}
+import org.apache.flink.table.utils.MemoryTableSourceSinkUtil
 import org.apache.flink.types.Row
 import org.junit.Assert._
 import org.junit._
@@ -105,6 +107,36 @@ class SortITCase extends StreamingWithStateTestBase {
   "20")
 assertEquals(expected, SortITCase.testResults)
   }
+
+  @Test
+  def testInsertIntoMemoryTableOrderBy(): Unit = {
+val env = StreamExecutionEnvironment.getExecutionEnvironment
+env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
+val tEnv = TableEnvironment.getTableEnvironment(env)
+MemoryTableSourceSinkUtil.clear()
+
+val t = StreamTestData.getSmall3TupleDataStream(env)
+  .assignAscendingTimestamps(x => x._2)
+  .toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime)
+tEnv.registerTable("sourceTable", t)
+
+val fieldNames = A

[flink] branch release-1.5 updated: [FLINK-10261] [table] Fix INSERT INTO with ORDER BY clause

2018-09-06 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.5
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.5 by this push:
 new a1cc687  [FLINK-10261] [table] Fix INSERT INTO with ORDER BY clause
a1cc687 is described below

commit a1cc687f0bdcb6708d12e793b35e6f2a9674a106
Author: xueyu <278006...@qq.com>
AuthorDate: Mon Sep 3 14:06:29 2018 +0800

[FLINK-10261] [table] Fix INSERT INTO with ORDER BY clause

This closes #6648.
---
 .../apache/flink/table/api/TableEnvironment.scala  |  4 +--
 .../stream/sql/validation/SortValidationTest.scala |  1 -
 .../table/runtime/stream/sql/SortITCase.scala  | 37 --
 .../flink/table/utils/MemoryTableSinkUtil.scala|  2 ++
 4 files changed, 38 insertions(+), 6 deletions(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
index d6106be..03ae977 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/TableEnvironment.scala
@@ -669,10 +669,10 @@ abstract class TableEnvironment(val config: TableConfig) {
   case insert: SqlInsert =>
 // validate the SQL query
 val query = insert.getSource
-planner.validate(query)
+val validatedQuery = planner.validate(query)
 
 // get query result as Table
-val queryResult = new Table(this, 
LogicalRelNode(planner.rel(query).rel))
+val queryResult = new Table(this, 
LogicalRelNode(planner.rel(validatedQuery).rel))
 
 // get name of sink table
 val targetTableName = 
insert.getTargetTable.asInstanceOf[SqlIdentifier].names.get(0)
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
index 083ed94..6c477fd 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/sql/validation/SortValidationTest.scala
@@ -38,7 +38,6 @@ class SortValidationTest extends TableTestBase {
 streamUtil.verifySql(sqlQuery, "")
   }
 
-
   // test should fail because time is not the primary order field
   @Test(expected = classOf[TableException])
   def testSortProcessingTimeSecondaryField(): Unit = {
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
index 19db2a0..12ab81c 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/sql/SortITCase.scala
@@ -18,15 +18,17 @@
 
 package org.apache.flink.table.runtime.stream.sql
 
+import org.apache.flink.api.common.typeinfo.TypeInformation
 import org.apache.flink.api.scala._
 import org.apache.flink.streaming.api.TimeCharacteristic
 import org.apache.flink.streaming.api.functions.sink.RichSinkFunction
 import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
-import org.apache.flink.table.api.TableEnvironment
 import org.apache.flink.table.api.scala._
-import 
org.apache.flink.table.runtime.utils.TimeTestUtil.EventTimeSourceFunction
+import org.apache.flink.table.api.{TableEnvironment, Types}
 import 
org.apache.flink.table.runtime.stream.sql.SortITCase.StringRowSelectorSink
-import org.apache.flink.table.runtime.utils.{StreamITCase, 
StreamingWithStateTestBase}
+import 
org.apache.flink.table.runtime.utils.TimeTestUtil.EventTimeSourceFunction
+import org.apache.flink.table.runtime.utils.{StreamITCase, StreamTestData, 
StreamingWithStateTestBase}
+import org.apache.flink.table.utils.MemoryTableSinkUtil
 import org.apache.flink.types.Row
 import org.junit.Assert._
 import org.junit._
@@ -105,6 +107,35 @@ class SortITCase extends StreamingWithStateTestBase {
   "20")
 assertEquals(expected, SortITCase.testResults)
   }
+
+  @Test
+  def testInsertIntoMemoryTableOrderBy(): Unit = {
+val env = StreamExecutionEnvironment.getExecutionEnvironment
+env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
+val tEnv = TableEnvironment.getTableEnvironment(env)
+
+val t = StreamTestData.getSmall3TupleDataStream(env)
+  .assignAscendingTimestamps(x => x._2)
+  .toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime)
+tEnv.registerTable("sourceTable",

[flink] branch master updated: [FLINK-10174] [table] Define UTF-8 charset for HEX, TO_BASE64, FROM_BASE64

2018-09-07 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 99bf3a5  [FLINK-10174] [table] Define UTF-8 charset for HEX, 
TO_BASE64, FROM_BASE64
99bf3a5 is described below

commit 99bf3a5087ac80ff20c8d22adef82f30e53ba568
Author: xueyu <278006...@qq.com>
AuthorDate: Mon Aug 20 15:16:42 2018 +0800

[FLINK-10174] [table] Define UTF-8 charset for HEX, TO_BASE64, FROM_BASE64

This closes #6579.
---
 .../table/runtime/functions/ScalarFunctions.scala   | 10 +++---
 .../table/expressions/ScalarFunctionsTest.scala | 21 +
 2 files changed, 28 insertions(+), 3 deletions(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/functions/ScalarFunctions.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/functions/ScalarFunctions.scala
index 1aadf31..fa6f020 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/functions/ScalarFunctions.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/functions/ScalarFunctions.scala
@@ -19,6 +19,7 @@ package org.apache.flink.table.runtime.functions
 
 import java.lang.{StringBuilder, Long => JLong}
 import java.math.{BigDecimal => JBigDecimal}
+import java.nio.charset.StandardCharsets
 
 import org.apache.commons.codec.binary.{Base64, Hex}
 import org.apache.commons.lang3.StringUtils
@@ -207,12 +208,14 @@ object ScalarFunctions {
   /**
 * Returns the base string decoded with base64.
 */
-  def fromBase64(str: String): String = new String(Base64.decodeBase64(str))
+  def fromBase64(str: String): String =
+new String(Base64.decodeBase64(str), StandardCharsets.UTF_8)
 
   /**
 * Returns the base64-encoded result of the input string.
 */
-  def toBase64(base: String): String = 
Base64.encodeBase64String(base.getBytes())
+  def toBase64(base: String): String =
+Base64.encodeBase64String(base.getBytes(StandardCharsets.UTF_8))
 
   /**
 * Returns the hex string of a long argument.
@@ -222,7 +225,8 @@ object ScalarFunctions {
   /**
 * Returns the hex string of a string argument.
 */
-  def hex(x: String): String = Hex.encodeHexString(x.getBytes).toUpperCase()
+  def hex(x: String): String =
+Hex.encodeHexString(x.getBytes(StandardCharsets.UTF_8)).toUpperCase()
 
   /**
 * Returns an UUID string using Java utilities.
diff --git 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/ScalarFunctionsTest.scala
 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/ScalarFunctionsTest.scala
index 5038254..fbd9b02 100644
--- 
a/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/ScalarFunctionsTest.scala
+++ 
b/flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/ScalarFunctionsTest.scala
@@ -483,6 +483,13 @@ class ScalarFunctionsTest extends ScalarTypesTestBase {
   "f24.hex()",
   "HEX(f24)",
   "2A5F546869732069732061207465737420537472696E672E")
+
+testAllApis(
+  "你好".hex(),
+  "'你好'.hex()",
+  "HEX('你好')",
+  "E4BDA0E5A5BD"
+)
   }
 
   @Test
@@ -563,6 +570,13 @@ class ScalarFunctionsTest extends ScalarTypesTestBase {
   "f33.fromBase64()",
   "FROM_BASE64(f33)",
   "null")
+
+testAllApis(
+  "5L2g5aW9".fromBase64(),
+  "'5L2g5aW9'.fromBase64()",
+  "FROM_BASE64('5L2g5aW9')",
+  "你好"
+)
   }
 
   @Test
@@ -591,6 +605,13 @@ class ScalarFunctionsTest extends ScalarTypesTestBase {
   "f33.toBase64()",
   "TO_BASE64(f33)",
   "null")
+
+testAllApis(
+  "你好".toBase64(),
+  "'你好'.toBase64()",
+  "TO_BASE64('你好')",
+  "5L2g5aW9"
+)
   }
 
   @Test



[flink] branch master updated: [FLINK-10170] [table] Add string representation for all Table & SQL API types

2018-09-11 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 1ae5983  [FLINK-10170] [table] Add string representation for all Table 
& SQL API types
1ae5983 is described below

commit 1ae5983bc2b267ed7725338ef932505477aee7b8
Author: jerryjzhang 
AuthorDate: Sun Aug 19 22:27:01 2018 +0800

[FLINK-10170] [table] Add string representation for all Table & SQL API 
types

Since 1.6 the recommended way of creating source/sink tables is using
connector/format/schema descriptors. This commit adds string-based
representation for all types supported by the Table & SQL API.

We use a syntax similar to Hive and other SQL projects.

This closes #6578.
---
 docs/dev/table/connect.md  | 21 +++--
 docs/dev/table/sqlClient.md|  2 +-
 .../test-scripts/test_sql_client.sh|  2 +-
 .../apache/flink/table/descriptors/JsonTest.java   |  2 +-
 .../flink/table/typeutils/TypeStringUtils.scala| 98 --
 .../apache/flink/table/descriptors/CsvTest.scala   |  4 +-
 .../table/descriptors/TableDescriptorTest.scala| 30 ++-
 .../table/typeutils/TypeStringUtilsTest.scala  | 42 --
 8 files changed, 152 insertions(+), 49 deletions(-)

diff --git a/docs/dev/table/connect.md b/docs/dev/table/connect.md
index 1bfff42..16649e5 100644
--- a/docs/dev/table/connect.md
+++ b/docs/dev/table/connect.md
@@ -417,13 +417,18 @@ DECIMAL
 DATE
 TIME
 TIMESTAMP
-ROW(fieldtype, ...)  # unnamed row; e.g. ROW(VARCHAR, INT) that is 
mapped to Flink's RowTypeInfo
- # with indexed fields names f0, f1, ...
-ROW(fieldname fieldtype, ...)# named row; e.g., ROW(myField VARCHAR, 
myOtherField INT) that
- # is mapped to Flink's RowTypeInfo
-POJO(class)  # e.g., POJO(org.mycompany.MyPojoClass) that 
is mapped to Flink's PojoTypeInfo
-ANY(class)   # e.g., ANY(org.mycompany.MyClass) that is 
mapped to Flink's GenericTypeInfo
-ANY(class, serialized)   # used for type information that is not 
supported by Flink's Table & SQL API
+MAP# generic map; e.g. MAP that is 
mapped to Flink's MapTypeInfo
+MULTISET  # multiset; e.g. MULTISET that is 
mapped to Flink's MultisetTypeInfo
+PRIMITIVE_ARRAY   # primitive array; e.g. PRIMITIVE_ARRAY 
that is mapped to Flink's PrimitiveArrayTypeInfo
+OBJECT_ARRAY  # object array; e.g. 
OBJECT_ARRAY that is mapped to
+ #   Flink's ObjectArrayTypeInfo
+ROW  # unnamed row; e.g. ROW that is 
mapped to Flink's RowTypeInfo
+ #   with indexed fields names f0, f1, ...
+ROW# named row; e.g., ROW that
+ #   is mapped to Flink's RowTypeInfo
+POJO  # e.g., POJO that 
is mapped to Flink's PojoTypeInfo
+ANY   # e.g., ANY that is 
mapped to Flink's GenericTypeInfo
+ANY   # used for type information that is not 
supported by Flink's Table & SQL API
 {% endhighlight %}
 
 {% top %}
@@ -1046,4 +1051,4 @@ table.writeToSink(sink)
 
 
 
-{% top %}
\ No newline at end of file
+{% top %}
diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index 8c4ba83..296d638 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -302,7 +302,7 @@ tables:
 format:
   property-version: 1
   type: json
-  schema: "ROW(rideId LONG, lon FLOAT, lat FLOAT, rideTime TIMESTAMP)"
+  schema: "ROW"
 schema:
   - name: rideId
 type: LONG
diff --git a/flink-end-to-end-tests/test-scripts/test_sql_client.sh 
b/flink-end-to-end-tests/test-scripts/test_sql_client.sh
index 934f7d4..b583072 100755
--- a/flink-end-to-end-tests/test-scripts/test_sql_client.sh
+++ b/flink-end-to-end-tests/test-scripts/test_sql_client.sh
@@ -128,7 +128,7 @@ tables:
   - name: user
 type: VARCHAR
   - name: event
-type: ROW(type VARCHAR, message VARCHAR)
+type: ROW
 connector:
   type: kafka
   version: "0.10"
diff --git 
a/flink-formats/flink-json/src/test/java/org/apache/flink/table/descriptors/JsonTest.java
 
b/flink-formats/flink-json/src/test/java/org/apache/flink/table/descriptors/JsonTest.java
index 6e370a0..ac6ff11 100644
--- 
a/flink-formats/flink-json/src/test/java/org/apache/flink/table/descriptors/JsonTest.java
+++ 
b/flink-formats/flink-json/src/test/java/org/apache/flink/table/descriptors/JsonTest.java
@@ -106,7 +106,7 @@ public class JsonTest extends DescriptorTestBase {
final Map props3 = new 

[flink] branch release-1.6 updated: [FLINK-10170] [table] Add string representation for all Table & SQL API types

2018-09-11 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 636a526  [FLINK-10170] [table] Add string representation for all Table 
& SQL API types
636a526 is described below

commit 636a526ab2ce5e2f2b00597d53c2455b6e53bfa8
Author: jerryjzhang 
AuthorDate: Sun Aug 19 22:27:01 2018 +0800

[FLINK-10170] [table] Add string representation for all Table & SQL API 
types

Since 1.6 the recommended way of creating source/sink tables is using
connector/format/schema descriptors. This commit adds string-based
representation for all types supported by the Table & SQL API.

We use a syntax similar to Hive and other SQL projects.

This closes #6578.
---
 docs/dev/table/sqlClient.md|  2 +-
 .../test-scripts/test_sql_client.sh|  2 +-
 .../apache/flink/table/descriptors/JsonTest.java   |  2 +-
 .../flink/table/typeutils/TypeStringUtils.scala| 98 --
 .../apache/flink/table/descriptors/CsvTest.scala   |  4 +-
 .../table/descriptors/TableDescriptorTest.scala| 30 ++-
 .../table/typeutils/TypeStringUtilsTest.scala  | 42 --
 7 files changed, 139 insertions(+), 41 deletions(-)

diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index ed36354..c88ed38 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -264,7 +264,7 @@ tables:
 format:
   property-version: 1
   type: json
-  schema: "ROW(rideId LONG, lon FLOAT, lat FLOAT, rideTime TIMESTAMP)"
+  schema: "ROW"
 schema:
   - name: rideId
 type: LONG
diff --git a/flink-end-to-end-tests/test-scripts/test_sql_client.sh 
b/flink-end-to-end-tests/test-scripts/test_sql_client.sh
index 934f7d4..b583072 100755
--- a/flink-end-to-end-tests/test-scripts/test_sql_client.sh
+++ b/flink-end-to-end-tests/test-scripts/test_sql_client.sh
@@ -128,7 +128,7 @@ tables:
   - name: user
 type: VARCHAR
   - name: event
-type: ROW(type VARCHAR, message VARCHAR)
+type: ROW
 connector:
   type: kafka
   version: "0.10"
diff --git 
a/flink-formats/flink-json/src/test/java/org/apache/flink/table/descriptors/JsonTest.java
 
b/flink-formats/flink-json/src/test/java/org/apache/flink/table/descriptors/JsonTest.java
index 6e370a0..ac6ff11 100644
--- 
a/flink-formats/flink-json/src/test/java/org/apache/flink/table/descriptors/JsonTest.java
+++ 
b/flink-formats/flink-json/src/test/java/org/apache/flink/table/descriptors/JsonTest.java
@@ -106,7 +106,7 @@ public class JsonTest extends DescriptorTestBase {
final Map props3 = new HashMap<>();
props3.put("format.type", "json");
props3.put("format.property-version", "1");
-   props3.put("format.schema", "ROW(test1 VARCHAR, test2 
TIMESTAMP)");
+   props3.put("format.schema", "ROW");
props3.put("format.fail-on-missing-field", "true");
 
final Map props4 = new HashMap<>();
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/typeutils/TypeStringUtils.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/typeutils/TypeStringUtils.scala
index afc6506..9e5f075 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/typeutils/TypeStringUtils.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/typeutils/TypeStringUtils.scala
@@ -23,7 +23,7 @@ import java.io.Serializable
 import org.apache.commons.codec.binary.Base64
 import org.apache.commons.lang3.StringEscapeUtils
 import org.apache.flink.api.common.functions.InvalidTypesException
-import org.apache.flink.api.common.typeinfo.{BasicArrayTypeInfo, 
PrimitiveArrayTypeInfo, TypeInformation}
+import org.apache.flink.api.common.typeinfo.{PrimitiveArrayTypeInfo, 
TypeInformation}
 import org.apache.flink.api.common.typeutils.CompositeType
 import org.apache.flink.api.java.typeutils._
 import org.apache.flink.table.api.{TableException, Types, ValidationException}
@@ -67,6 +67,10 @@ object TypeStringUtils extends JavaTokenParsers with 
PackratParsers {
   lazy val ROW: Keyword = Keyword("ROW")
   lazy val ANY: Keyword = Keyword("ANY")
   lazy val POJO: Keyword = Keyword("POJO")
+  lazy val MAP: Keyword = Keyword("MAP")
+  lazy val MULTISET: Keyword = Keyword("MULTISET")
+  lazy val PRIMITIVE_ARRAY: Keyword = Keyword("PRIMITIVE_ARRAY")
+  lazy val OBJECT_ARRAY: Keyword = Keyword("OBJECT_ARRAY")
 
   lazy val qualifiedName: Parser[String] =
 """\p{javaJa

[flink] branch master updated: [FLINK-10281] [table] Fix string literal escaping throughout Table & SQL API

2018-09-12 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new c87b943  [FLINK-10281] [table] Fix string literal escaping throughout 
Table & SQL API
c87b943 is described below

commit c87b9433e3225602e56497f782a610668eac8bf9
Author: Timo Walther 
AuthorDate: Fri Sep 7 13:32:05 2018 +0200

[FLINK-10281] [table] Fix string literal escaping throughout Table & SQL API

This commit fixes the string literal escaping of the Table & SQL API. Proper
escaping of quotes was not possible in the past for Table API. SQL and SQL 
Client
were not standard compliant.

Due to FLINK-8301 backslashes were considered in SQL literals, however, they
should only be used in SQL `U&'\1234'` literals. For the Table API, the new
logic relies on the Java/Scala escaping and uses duplicate quotes for 
escaping
the quotes in expression strings. For SQL, we rely on unicode string 
literals
with or without the UESCAPE clause. The SQL Client was using backslashes for
escaping new lines. For the SQL Client, we allow unescaped new lines and
use ';' for statement finalization; similar to other SQL clients.

This closes #6671.
---
 docs/dev/table/sql.md  |  5 ++
 docs/dev/table/sqlClient.md| 12 +--
 docs/dev/table/tableApi.md |  7 +-
 .../apache/flink/table/client/cli/CliClient.java   | 19 +++--
 .../apache/flink/table/client/cli/CliStrings.java  | 10 +--
 .../flink/table/client/cli/SqlMultiLineParser.java | 52 +
 .../apache/flink/table/codegen/CodeGenerator.scala |  4 +-
 .../flink/table/codegen/ExpressionReducer.scala|  7 +-
 .../flink/table/expressions/ExpressionParser.scala | 19 +++--
 .../apache/flink/table/expressions/literals.scala  |  5 --
 .../flink/table/expressions/LiteralTest.scala  | 87 +-
 .../flink/table/runtime/batch/sql/CalcITCase.scala | 25 ---
 .../table/runtime/batch/table/CalcITCase.scala | 30 
 .../flink/table/runtime/stream/sql/SqlITCase.scala | 39 --
 .../table/runtime/stream/table/CalcITCase.scala| 28 ---
 15 files changed, 215 insertions(+), 134 deletions(-)

diff --git a/docs/dev/table/sql.md b/docs/dev/table/sql.md
index 604e989..a9fd94f 100644
--- a/docs/dev/table/sql.md
+++ b/docs/dev/table/sql.md
@@ -204,6 +204,11 @@ Flink SQL uses a lexical policy for identifier (table, 
attribute, function names
 - After which, identifiers are matched case-sensitively.
 - Unlike Java, back-ticks allow identifiers to contain non-alphanumeric 
characters (e.g. "SELECT a AS `my field` FROM t").
 
+String literals must be enclosed in single quotes (e.g., `SELECT 'Hello 
World'`). Duplicate a single quote for escaping (e.g., `SELECT 'It''s me.'`). 
Unicode characters are supported in string literals. If explicit unicode code 
points are required, use the following syntax:
+
+- Use the backslash (`\`) as escaping character (default): `SELECT U&'\263A'`
+- Use a custom escaping character: `SELECT U&'#263A' UESCAPE '#'`
+
 {% top %}
 
 Operations
diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index 296d638..5224842 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -61,7 +61,7 @@ By default, the SQL Client will read its configuration from 
the environment file
 Once the CLI has been started, you can use the `HELP` command to list all 
available SQL statements. For validating your setup and cluster connection, you 
can enter your first SQL query and press the `Enter` key to execute it:
 
 {% highlight sql %}
-SELECT 'Hello World'
+SELECT 'Hello World';
 {% endhighlight %}
 
 This query requires no table source and produces a single row result. The CLI 
will retrieve results from the cluster and visualize them. You can close the 
result view by pressing the `Q` key.
@@ -71,19 +71,19 @@ The CLI supports **two modes** for maintaining and 
visualizing results.
 The **table mode** materializes results in memory and visualizes them in a 
regular, paginated table representation. It can be enabled by executing the 
following command in the CLI:
 
 {% highlight text %}
-SET execution.result-mode=table
+SET execution.result-mode=table;
 {% endhighlight %}
 
 The **changelog mode** does not materialize results and visualizes the result 
stream that is produced by a [continuous 
query](streaming.html#dynamic-tables--continuous-queries) consisting of 
insertions (`+`) and retractions (`-`).
 
 {% highlight text %}
-SET execution.result-mode=changelog
+SET execution.result-mode=changelog;
 {% endhighlight %}
 
 You can use the following query to see both result modes in action:

[flink] branch master updated: [FLINK-10269] [connectors] Fix Elasticsearch 6 UpdateRequest binary incompatibility

2018-09-13 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new c4beb3a  [FLINK-10269] [connectors] Fix Elasticsearch 6 UpdateRequest 
binary incompatibility
c4beb3a is described below

commit c4beb3aefa806d1b14ed4d388177935578203bf0
Author: Timo Walther 
AuthorDate: Wed Sep 12 12:21:34 2018 +0200

[FLINK-10269] [connectors] Fix Elasticsearch 6 UpdateRequest binary 
incompatibility

This commit fixes the binary incompatibility for UpdateRequests in 
Elasticsearch. This
is due to a binary compatibility issue between the base module (which is 
compiled
against a very old ES version and the current Elasticsearch version).
It lets the API call bridge also provide the RequestIndexer 
version-specific.

This closes #6682.
---
 .../elasticsearch/ElasticsearchApiCallBridge.java  | 14 
 .../elasticsearch/ElasticsearchSinkBase.java   |  4 +-
 .../PreElasticsearch6BulkProcessorIndexer.java | 84 +
 .../Elasticsearch6ApiCallBridge.java   | 13 
 .../Elasticsearch6BulkProcessorIndexer.java| 85 ++
 .../streaming/tests/Elasticsearch1SinkExample.java | 42 ---
 .../streaming/tests/Elasticsearch2SinkExample.java | 43 ---
 .../streaming/tests/Elasticsearch5SinkExample.java | 42 ---
 .../streaming/tests/Elasticsearch6SinkExample.java | 35 +++--
 .../test-scripts/test_streaming_elasticsearch.sh   |  3 +-
 10 files changed, 319 insertions(+), 46 deletions(-)

diff --git 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
index f1dcc83..d3b774c 100644
--- 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
+++ 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
@@ -28,6 +28,7 @@ import javax.annotation.Nullable;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * An {@link ElasticsearchApiCallBridge} is used to bridge incompatible 
Elasticsearch Java API calls across different versions.
@@ -80,6 +81,19 @@ public interface ElasticsearchApiCallBridge extends Ser
@Nullable ElasticsearchSinkBase.BulkFlushBackoffPolicy 
flushBackoffPolicy);
 
/**
+* Creates a {@link RequestIndexer} that is able to work with {@link 
BulkProcessor} binary compatible.
+*/
+   default RequestIndexer createBulkProcessorIndexer(
+   BulkProcessor bulkProcessor,
+   boolean flushOnCheckpoint,
+   AtomicLong numPendingRequestsRef) {
+   return new PreElasticsearch6BulkProcessorIndexer(
+   bulkProcessor,
+   flushOnCheckpoint,
+   numPendingRequestsRef);
+   }
+
+   /**
 * Perform any necessary state cleanup.
 */
default void cleanup() {
diff --git 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
index 7dac06c..4d0c002 100644
--- 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
+++ 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
@@ -164,7 +164,7 @@ public abstract class ElasticsearchSinkBase extends
private boolean flushOnCheckpoint = true;
 
/** Provided to the user via the {@link ElasticsearchSinkFunction} to 
add {@link ActionRequest ActionRequests}. */
-   private transient BulkProcessorIndexer requestIndexer;
+   private transient RequestIndexer requestIndexer;
 
// 

//  Internals for the Flink Elasticsearch Sink
@@ -295,7 +295,7 @@ public abstract class ElasticsearchSinkBase extends
public void open(Configuration parameters) throws Exception {
client = callBridge.createClient(userConfig);
bulkProcessor = buildBulkProcessor(new BulkProcessorListener());
-   requestIndexer = new BulkProcessorIndexer

[flink] branch release-1.6 updated: [FLINK-10269] [connectors] Fix Elasticsearch 6 UpdateRequest binary incompatibility

2018-09-13 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new f3d6fac  [FLINK-10269] [connectors] Fix Elasticsearch 6 UpdateRequest 
binary incompatibility
f3d6fac is described below

commit f3d6fac22ff160b53052d384d8d0c231557fcf3e
Author: Timo Walther 
AuthorDate: Wed Sep 12 12:21:34 2018 +0200

[FLINK-10269] [connectors] Fix Elasticsearch 6 UpdateRequest binary 
incompatibility

This commit fixes the binary incompatibility for UpdateRequests in 
Elasticsearch. This
is due to a binary compatibility issue between the base module (which is 
compiled
against a very old ES version and the current Elasticsearch version).
It lets the API call bridge also provide the RequestIndexer 
version-specific.

This closes #6682.
---
 .../elasticsearch/ElasticsearchApiCallBridge.java  | 14 
 .../elasticsearch/ElasticsearchSinkBase.java   |  4 +-
 .../PreElasticsearch6BulkProcessorIndexer.java | 84 +
 .../Elasticsearch6ApiCallBridge.java   | 13 
 .../Elasticsearch6BulkProcessorIndexer.java| 85 ++
 .../streaming/tests/Elasticsearch1SinkExample.java | 42 ---
 .../streaming/tests/Elasticsearch2SinkExample.java | 43 ---
 .../streaming/tests/Elasticsearch5SinkExample.java | 42 ---
 .../streaming/tests/Elasticsearch6SinkExample.java | 35 +++--
 .../test-scripts/test_streaming_elasticsearch.sh   |  3 +-
 10 files changed, 319 insertions(+), 46 deletions(-)

diff --git 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
index f1dcc83..d3b774c 100644
--- 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
+++ 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchApiCallBridge.java
@@ -28,6 +28,7 @@ import javax.annotation.Nullable;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * An {@link ElasticsearchApiCallBridge} is used to bridge incompatible 
Elasticsearch Java API calls across different versions.
@@ -80,6 +81,19 @@ public interface ElasticsearchApiCallBridge extends Ser
@Nullable ElasticsearchSinkBase.BulkFlushBackoffPolicy 
flushBackoffPolicy);
 
/**
+* Creates a {@link RequestIndexer} that is able to work with {@link 
BulkProcessor} binary compatible.
+*/
+   default RequestIndexer createBulkProcessorIndexer(
+   BulkProcessor bulkProcessor,
+   boolean flushOnCheckpoint,
+   AtomicLong numPendingRequestsRef) {
+   return new PreElasticsearch6BulkProcessorIndexer(
+   bulkProcessor,
+   flushOnCheckpoint,
+   numPendingRequestsRef);
+   }
+
+   /**
 * Perform any necessary state cleanup.
 */
default void cleanup() {
diff --git 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
index 7dac06c..4d0c002 100644
--- 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
+++ 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBase.java
@@ -164,7 +164,7 @@ public abstract class ElasticsearchSinkBase extends
private boolean flushOnCheckpoint = true;
 
/** Provided to the user via the {@link ElasticsearchSinkFunction} to 
add {@link ActionRequest ActionRequests}. */
-   private transient BulkProcessorIndexer requestIndexer;
+   private transient RequestIndexer requestIndexer;
 
// 

//  Internals for the Flink Elasticsearch Sink
@@ -295,7 +295,7 @@ public abstract class ElasticsearchSinkBase extends
public void open(Configuration parameters) throws Exception {
client = callBridge.createClient(userConfig);
bulkProcessor = buildBulkProcessor(new BulkProcessorListener());
-   requestIndexer = new

[flink] branch master updated: [FLINK-6847] [FLINK-6813] [table] Add support for TIMESTAMPDIFF in Table API & SQL

2018-09-25 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new c5ce970  [FLINK-6847] [FLINK-6813] [table] Add support for 
TIMESTAMPDIFF in Table API & SQL
c5ce970 is described below

commit c5ce970e781df60eb27b62446853eaa0579c8706
Author: xueyu <278006...@qq.com>
AuthorDate: Sun Jul 8 18:50:06 2018 +0800

[FLINK-6847] [FLINK-6813] [table] Add support for TIMESTAMPDIFF in Table 
API & SQL

This closes #6282.
---
 docs/dev/table/functions.md|  84 ++-
 .../flink/table/api/scala/expressionDsl.scala  |  29 +
 .../apache/flink/table/codegen/CodeGenerator.scala |   4 +-
 .../table/codegen/calls/FunctionGenerator.scala|  24 +
 .../table/codegen/calls/ScalarOperators.scala  |  34 ++
 .../table/codegen/calls/TimestampDiffCallGen.scala | 118 +
 .../flink/table/expressions/ExpressionParser.scala |   7 ++
 .../flink/table/expressions/arithmetic.scala   |  21 ++--
 .../org/apache/flink/table/expressions/time.scala  |  54 ++
 .../flink/table/validate/FunctionCatalog.scala |   2 +
 .../table/expressions/ScalarFunctionsTest.scala| 115 
 .../validation/ScalarFunctionsValidationTest.scala |  15 ++-
 12 files changed, 494 insertions(+), 13 deletions(-)

diff --git a/docs/dev/table/functions.md b/docs/dev/table/functions.md
index 85768ab..1108294 100644
--- a/docs/dev/table/functions.md
+++ b/docs/dev/table/functions.md
@@ -3311,14 +3311,27 @@ DATE_FORMAT(timestamp, string)
 
   
 {% highlight text %}
-TIMESTAMPADD(unit, interval, timevalue)
+TIMESTAMPADD(timeintervalunit, interval, timepoint)
 {% endhighlight %}
   
   
-Returns a new time value that adds a (signed) integer interval to 
timevalue. The unit for interval is given by the unit argument, 
which should be one of the following values: SECOND, 
MINUTE, HOUR, DAY, WEEK, 
MONTH, QUARTER, or YEAR. 
+Returns a new time value that adds a (signed) integer interval to 
timepoint. The unit for interval is given by the unit argument, 
which should be one of the following values: SECOND, 
MINUTE, HOUR, DAY, WEEK, 
MONTH, QUARTER, or YEAR. 
 E.g., TIMESTAMPADD(WEEK, 1, DATE '2003-01-02') returns 
2003-01-09.
   
 
+
+
+  
+{% highlight text %}
+TIMESTAMPDIFF(timepointunit, timepoint1, timepoint2)
+{% endhighlight %}
+  
+  
+Returns the (signed) number of timepointunit between 
timepoint1 and timepoint2. The unit for the interval is given by 
the first argument, which should be one of the following values: 
SECOND, MINUTE, HOUR, DAY, 
MONTH, or YEAR. See also the Time Interval and Point Unit 
Specifiers table.
+E.g., TIMESTAMPDIFF(DAY, TIMESTAMP '2003-01-02 10:00:00', 
TIMESTAMP '2003-01-03 10:00:00') leads to 1.
+  
+
+
   
 
 
@@ -3564,6 +3577,19 @@ dateFormat(TIMESTAMP, STRING)
 E.g., dateFormat(ts, '%Y, %d %M') results in strings 
formatted as "2017, 05 May".
   
 
+
+
+  
+{% highlight java %}
+timestampDiff(TIMEPOINTUNIT, TIMEPOINT1, TIMEPOINT2)
+{% endhighlight %}
+  
+  
+Returns the (signed) number of TIMEPOINTUNIT between 
TIMEPOINT1 and TIMEPOINT2. The unit for the interval is given by 
the first argument, which should be one of the following values: 
SECOND, MINUTE, HOUR, DAY, 
MONTH, or YEAR. See also the Time Interval and Point Unit 
Specifiers table.
+E.g., timestampDiff(DAY, '2003-01-02 10:00:00'.toTimestamp, 
'2003-01-03 10:00:00'.toTimestamp) leads to 1.
+  
+
+
 
 
 
@@ -3809,6 +3835,19 @@ dateFormat(TIMESTAMP, STRING)
 E.g., dateFormat('ts, "%Y, %d %M") results in strings 
formatted as "2017, 05 May".
   
 
+
+
+  
+{% highlight scala %}
+timestampDiff(TIMEPOINTUNIT, TIMEPOINT1, TIMEPOINT2)
+{% endhighlight %}
+  
+  
+Returns the (signed) number of TIMEPOINTUNIT between 
TIMEPOINT1 and TIMEPOINT2. The unit for the interval is given by 
the first argument, which should be one of the following values: 
SECOND, MINUTE, HOUR, DAY, 
MONTH, or YEAR. See also the Time Interval and Point Unit 
Specifiers table.
+E.g., timestampDiff(TimePointUnit.DAY, '2003-01-02 
10:00:00'.toTimestamp, '2003-01-03 10:00:00'.toTimestamp) leads to 
1.
+  
+
+
   
 
 
@@ -5463,3 +5502,44 @@ The following table lists specifiers for date format 
functions.
 
 
 {% top %}
+
+Time Interval and Point Unit Specifiers
+---
+
+The following table lists specifiers for time interval and time point units. 
+
+For Table API, please use `_` for spaces (e.g., `DAY_TO_HOUR`).
+
+| Time In

[flink] branch master updated: [FLINK-10263] [sql-client] Fix classloader issues in SQL Client

2018-09-25 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 116347e  [FLINK-10263] [sql-client] Fix classloader issues in SQL 
Client
116347e is described below

commit 116347ea179bdcf0a8eb33581c744143374057a0
Author: Timo Walther 
AuthorDate: Thu Sep 20 10:28:22 2018 +0200

[FLINK-10263] [sql-client] Fix classloader issues in SQL Client

Fixes classloading issues when using a UDF with constant parameters. Every
optimization might need to compile code (i.e. for constant folding), thus,
needs access to the user-code classloader.

This closes #6725.
---
 .../test-scripts/test_sql_client.sh|  8 +--
 .../client/gateway/local/ExecutionContext.java | 14 +++
 .../table/client/gateway/local/LocalExecutor.java  | 28 +++---
 .../flink/table/codegen/ExpressionReducer.scala|  6 +++--
 4 files changed, 43 insertions(+), 13 deletions(-)

diff --git a/flink-end-to-end-tests/test-scripts/test_sql_client.sh 
b/flink-end-to-end-tests/test-scripts/test_sql_client.sh
index b583072..ca02513 100755
--- a/flink-end-to-end-tests/test-scripts/test_sql_client.sh
+++ b/flink-end-to-end-tests/test-scripts/test_sql_client.sh
@@ -212,6 +212,8 @@ tables:
 type: VARCHAR
   - name: duplicate_count
 type: BIGINT
+  - name: constant
+type: VARCHAR
 connector:
   type: filesystem
   path: $RESULT
@@ -226,6 +228,8 @@ tables:
   type: VARCHAR
 - name: duplicate_count
   type: BIGINT
+- name: constant
+  type: VARCHAR
 
 functions:
   - name: RegReplace
@@ -261,7 +265,7 @@ $FLINK_DIR/bin/sql-client.sh embedded \
 
 read -r -d '' SQL_STATEMENT_2 << EOF
 INSERT INTO CsvSinkTable
-  SELECT *
+  SELECT AvroBothTable.*, RegReplace('Test constant folding.', 'Test', 
'Success') AS constant
   FROM AvroBothTable
 EOF
 
@@ -285,4 +289,4 @@ for i in {1..10}; do
   sleep 5
 done
 
-check_result_hash "SQLClient" $RESULT "dca08a82cc09f6b19950291dbbef16bb"
+check_result_hash "SQLClient" $RESULT "0a1bf8bf716069b7269f575f87a802c0"
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
index 85b3e92..552d0b3 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
@@ -75,6 +75,7 @@ import java.net.URL;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.function.Supplier;
 
 /**
  * Context for executing table programs. This class caches everything that can 
be cached across
@@ -183,6 +184,19 @@ public class ExecutionContext {
return tableSinks;
}
 
+   /**
+* Executes the given supplier using the execution context's 
classloader as thread classloader.
+*/
+   public  R wrapClassLoader(Supplier supplier) {
+   final ClassLoader previousClassloader = 
Thread.currentThread().getContextClassLoader();
+   Thread.currentThread().setContextClassLoader(classLoader);
+   try {
+   return supplier.get();
+   } finally {
+   
Thread.currentThread().setContextClassLoader(previousClassloader);
+   }
+   }
+
// 

 
private static CommandLine createCommandLine(Deployment deployment, 
Options commandLineOptions) {
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
index 3b9e8e9..1318043 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
@@ -219,14 +219,16 @@ public class LocalExecutor implements Executor {
 
@Override
public String explainStatement(SessionContext session, String 
statement) throws SqlExecutionException {
-   final TableEnvironment tableEnv = 
getOrCreateExecutionContext(session)
+   final ExecutionContext context = 
getOrCreateExecutionContext(session);
+   final TableEnvironment tableEnv = context
  

[flink] branch release-1.6 updated: [FLINK-10263] [sql-client] Fix classloader issues in SQL Client

2018-09-26 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 4acb904  [FLINK-10263] [sql-client] Fix classloader issues in SQL 
Client
4acb904 is described below

commit 4acb90474d18427e45ee0a274fd88cf963d9220c
Author: Timo Walther 
AuthorDate: Thu Sep 20 10:28:22 2018 +0200

[FLINK-10263] [sql-client] Fix classloader issues in SQL Client

Fixes classloading issues when using a UDF with constant parameters. Every
optimization might need to compile code (i.e. for constant folding), thus,
needs access to the user-code classloader.

This closes #6725.
---
 .../test-scripts/test_sql_client.sh|  8 +--
 .../client/gateway/local/ExecutionContext.java | 14 +++
 .../table/client/gateway/local/LocalExecutor.java  | 28 +++---
 .../flink/table/codegen/ExpressionReducer.scala|  5 +++-
 4 files changed, 43 insertions(+), 12 deletions(-)

diff --git a/flink-end-to-end-tests/test-scripts/test_sql_client.sh 
b/flink-end-to-end-tests/test-scripts/test_sql_client.sh
index b583072..ca02513 100755
--- a/flink-end-to-end-tests/test-scripts/test_sql_client.sh
+++ b/flink-end-to-end-tests/test-scripts/test_sql_client.sh
@@ -212,6 +212,8 @@ tables:
 type: VARCHAR
   - name: duplicate_count
 type: BIGINT
+  - name: constant
+type: VARCHAR
 connector:
   type: filesystem
   path: $RESULT
@@ -226,6 +228,8 @@ tables:
   type: VARCHAR
 - name: duplicate_count
   type: BIGINT
+- name: constant
+  type: VARCHAR
 
 functions:
   - name: RegReplace
@@ -261,7 +265,7 @@ $FLINK_DIR/bin/sql-client.sh embedded \
 
 read -r -d '' SQL_STATEMENT_2 << EOF
 INSERT INTO CsvSinkTable
-  SELECT *
+  SELECT AvroBothTable.*, RegReplace('Test constant folding.', 'Test', 
'Success') AS constant
   FROM AvroBothTable
 EOF
 
@@ -285,4 +289,4 @@ for i in {1..10}; do
   sleep 5
 done
 
-check_result_hash "SQLClient" $RESULT "dca08a82cc09f6b19950291dbbef16bb"
+check_result_hash "SQLClient" $RESULT "0a1bf8bf716069b7269f575f87a802c0"
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
index 4283953..4361c37 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ExecutionContext.java
@@ -74,6 +74,7 @@ import java.net.URL;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.function.Supplier;
 
 /**
  * Context for executing table programs. This class caches everything that can 
be cached across
@@ -182,6 +183,19 @@ public class ExecutionContext {
return tableSinks;
}
 
+   /**
+* Executes the given supplier using the execution context's 
classloader as thread classloader.
+*/
+   public  R wrapClassLoader(Supplier supplier) {
+   final ClassLoader previousClassloader = 
Thread.currentThread().getContextClassLoader();
+   Thread.currentThread().setContextClassLoader(classLoader);
+   try {
+   return supplier.get();
+   } finally {
+   
Thread.currentThread().setContextClassLoader(previousClassloader);
+   }
+   }
+
// 

 
private static CommandLine createCommandLine(Deployment deployment, 
Options commandLineOptions) {
diff --git 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
index b2e8271..c44e3ae 100644
--- 
a/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
+++ 
b/flink-libraries/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
@@ -219,14 +219,16 @@ public class LocalExecutor implements Executor {
 
@Override
public String explainStatement(SessionContext session, String 
statement) throws SqlExecutionException {
-   final TableEnvironment tableEnv = 
getOrCreateExecutionContext(session)
+   final ExecutionContext context = 
getOrCreateExecutionContext(session);
+   final TableEnvironment tableEnv = context
  

[flink] branch master updated: [hotfix] [docs] Add double quotes to Kafka version YAML examples

2018-09-26 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 7fadbeb  [hotfix] [docs] Add double quotes to Kafka version YAML 
examples
7fadbeb is described below

commit 7fadbeb3c7b5ca295c870d8658492c7b1024b53c
Author: yangshimin 
AuthorDate: Thu Aug 30 13:47:59 2018 +0800

[hotfix] [docs] Add double quotes to Kafka version YAML examples

This closes #6639.
---
 docs/dev/table/connect.md   | 2 +-
 docs/dev/table/sqlClient.md | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/docs/dev/table/connect.md b/docs/dev/table/connect.md
index 16649e5..f79dd93 100644
--- a/docs/dev/table/connect.md
+++ b/docs/dev/table/connect.md
@@ -548,7 +548,7 @@ The Kafka connector allows for reading and writing from and 
to an Apache Kafka t
 {% highlight yaml %}
 connector:
   type: kafka
-  version: 0.11   # required: valid connector versions are "0.8", "0.9", 
"0.10", and "0.11"
+  version: "0.11" # required: valid connector versions are "0.8", "0.9", 
"0.10", and "0.11"
   topic: ...  # required: topic name from which the table is read
 
   properties: # optional: connector specific properties
diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index 5224842..a2a7dd0 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -289,7 +289,7 @@ tables:
 connector:
   property-version: 1
   type: kafka
-  version: 0.11
+  version: "0.11"
   topic: TaxiRides
   startup-mode: earliest-offset
   properties:
@@ -432,7 +432,7 @@ tables:
 connector:
   property-version: 1
   type: kafka
-  version: 0.11
+  version: "0.11"
   topic: OutputTopic
   properties:
 - key: zookeeper.connect



[flink] branch release-1.6 updated: [hotfix] [docs] Add double quotes to Kafka version YAML examples

2018-09-26 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 6c21d68  [hotfix] [docs] Add double quotes to Kafka version YAML 
examples
6c21d68 is described below

commit 6c21d687f731b0ebab74ac47dc1d7038f39f78db
Author: yangshimin 
AuthorDate: Thu Aug 30 13:47:59 2018 +0800

[hotfix] [docs] Add double quotes to Kafka version YAML examples

This closes #6639.
---
 docs/dev/table/connect.md   | 2 +-
 docs/dev/table/sqlClient.md | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/docs/dev/table/connect.md b/docs/dev/table/connect.md
index 1bfff42..7a9e141 100644
--- a/docs/dev/table/connect.md
+++ b/docs/dev/table/connect.md
@@ -543,7 +543,7 @@ The Kafka connector allows for reading and writing from and 
to an Apache Kafka t
 {% highlight yaml %}
 connector:
   type: kafka
-  version: 0.11   # required: valid connector versions are "0.8", "0.9", 
"0.10", and "0.11"
+  version: "0.11" # required: valid connector versions are "0.8", "0.9", 
"0.10", and "0.11"
   topic: ...  # required: topic name from which the table is read
 
   properties: # optional: connector specific properties
diff --git a/docs/dev/table/sqlClient.md b/docs/dev/table/sqlClient.md
index c88ed38..c181d24 100644
--- a/docs/dev/table/sqlClient.md
+++ b/docs/dev/table/sqlClient.md
@@ -251,7 +251,7 @@ tables:
 connector:
   property-version: 1
   type: kafka
-  version: 0.11
+  version: "0.11"
   topic: TaxiRides
   startup-mode: earliest-offset
   properties:
@@ -394,7 +394,7 @@ tables:
 connector:
   property-version: 1
   type: kafka
-  version: 0.11
+  version: "0.11"
   topic: OutputTopic
   properties:
 - key: zookeeper.connect



[flink] branch master updated: [hotfix] [connectors] Remove unused BulkProcessorIndexer class

2018-10-01 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new d4129c5  [hotfix] [connectors] Remove unused BulkProcessorIndexer class
d4129c5 is described below

commit d4129c574d7f30fd5dec9e7745300a16e76fb0f9
Author: Timo Walther 
AuthorDate: Mon Oct 1 11:02:05 2018 +0200

[hotfix] [connectors] Remove unused BulkProcessorIndexer class
---
 .../elasticsearch/BulkProcessorIndexer.java| 79 --
 1 file changed, 79 deletions(-)

diff --git 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
deleted file mode 100644
index 33b42cb..000
--- 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.streaming.connectors.elasticsearch;
-
-import org.apache.flink.annotation.Internal;
-
-import org.elasticsearch.action.ActionRequest;
-import org.elasticsearch.action.bulk.BulkProcessor;
-import org.elasticsearch.action.delete.DeleteRequest;
-import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.update.UpdateRequest;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-import static org.apache.flink.util.Preconditions.checkNotNull;
-
-/**
- * Implementation of a {@link RequestIndexer}, using a {@link BulkProcessor}.
- * {@link ActionRequest ActionRequests} will be buffered before sending a bulk 
request to the Elasticsearch cluster.
- */
-@Internal
-class BulkProcessorIndexer implements RequestIndexer {
-
-   private final BulkProcessor bulkProcessor;
-   private final boolean flushOnCheckpoint;
-   private final AtomicLong numPendingRequestsRef;
-
-   BulkProcessorIndexer(BulkProcessor bulkProcessor, boolean 
flushOnCheckpoint, AtomicLong numPendingRequestsRef) {
-   this.bulkProcessor = checkNotNull(bulkProcessor);
-   this.flushOnCheckpoint = flushOnCheckpoint;
-   this.numPendingRequestsRef = 
checkNotNull(numPendingRequestsRef);
-   }
-
-   @Override
-   public void add(DeleteRequest... deleteRequests) {
-   for (DeleteRequest deleteRequest : deleteRequests) {
-   if (flushOnCheckpoint) {
-   numPendingRequestsRef.getAndIncrement();
-   }
-   this.bulkProcessor.add(deleteRequest);
-   }
-   }
-
-   @Override
-   public void add(IndexRequest... indexRequests) {
-   for (IndexRequest indexRequest : indexRequests) {
-   if (flushOnCheckpoint) {
-   numPendingRequestsRef.getAndIncrement();
-   }
-   this.bulkProcessor.add(indexRequest);
-   }
-   }
-
-   @Override
-   public void add(UpdateRequest... updateRequests) {
-   for (UpdateRequest updateRequest : updateRequests) {
-   if (flushOnCheckpoint) {
-   numPendingRequestsRef.getAndIncrement();
-   }
-   this.bulkProcessor.add(updateRequest);
-   }
-   }
-}



[flink] branch release-1.6 updated: [hotfix] [connectors] Remove unused BulkProcessorIndexer class

2018-10-01 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 679a304  [hotfix] [connectors] Remove unused BulkProcessorIndexer class
679a304 is described below

commit 679a304cd0d6f699f81ef2919e879b15018bf3f4
Author: Timo Walther 
AuthorDate: Mon Oct 1 11:02:05 2018 +0200

[hotfix] [connectors] Remove unused BulkProcessorIndexer class
---
 .../elasticsearch/BulkProcessorIndexer.java| 79 --
 1 file changed, 79 deletions(-)

diff --git 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
deleted file mode 100644
index 33b42cb..000
--- 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/BulkProcessorIndexer.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.streaming.connectors.elasticsearch;
-
-import org.apache.flink.annotation.Internal;
-
-import org.elasticsearch.action.ActionRequest;
-import org.elasticsearch.action.bulk.BulkProcessor;
-import org.elasticsearch.action.delete.DeleteRequest;
-import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.update.UpdateRequest;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-import static org.apache.flink.util.Preconditions.checkNotNull;
-
-/**
- * Implementation of a {@link RequestIndexer}, using a {@link BulkProcessor}.
- * {@link ActionRequest ActionRequests} will be buffered before sending a bulk 
request to the Elasticsearch cluster.
- */
-@Internal
-class BulkProcessorIndexer implements RequestIndexer {
-
-   private final BulkProcessor bulkProcessor;
-   private final boolean flushOnCheckpoint;
-   private final AtomicLong numPendingRequestsRef;
-
-   BulkProcessorIndexer(BulkProcessor bulkProcessor, boolean 
flushOnCheckpoint, AtomicLong numPendingRequestsRef) {
-   this.bulkProcessor = checkNotNull(bulkProcessor);
-   this.flushOnCheckpoint = flushOnCheckpoint;
-   this.numPendingRequestsRef = 
checkNotNull(numPendingRequestsRef);
-   }
-
-   @Override
-   public void add(DeleteRequest... deleteRequests) {
-   for (DeleteRequest deleteRequest : deleteRequests) {
-   if (flushOnCheckpoint) {
-   numPendingRequestsRef.getAndIncrement();
-   }
-   this.bulkProcessor.add(deleteRequest);
-   }
-   }
-
-   @Override
-   public void add(IndexRequest... indexRequests) {
-   for (IndexRequest indexRequest : indexRequests) {
-   if (flushOnCheckpoint) {
-   numPendingRequestsRef.getAndIncrement();
-   }
-   this.bulkProcessor.add(indexRequest);
-   }
-   }
-
-   @Override
-   public void add(UpdateRequest... updateRequests) {
-   for (UpdateRequest updateRequest : updateRequests) {
-   if (flushOnCheckpoint) {
-   numPendingRequestsRef.getAndIncrement();
-   }
-   this.bulkProcessor.add(updateRequest);
-   }
-   }
-}



[flink] branch master updated: [FLINK-3875] [connectors] Add an upsert table sink factory for Elasticsearch

2018-10-01 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 10f9f1d  [FLINK-3875] [connectors] Add an upsert table sink factory 
for Elasticsearch
10f9f1d is described below

commit 10f9f1d431dbf047ea14130d9fb7fdd4f78178fc
Author: Timo Walther 
AuthorDate: Wed Aug 15 13:51:23 2018 +0200

[FLINK-3875] [connectors] Add an upsert table sink factory for Elasticsearch

This commit adds full support for Elasticsearch to be used with Table & SQL 
API as well as SQL Client.

It includes:
- Elasticsearch 6 upsert table sink (for append-only and updating queries)
- Elasticsearch 6 table factory
- Elasticsearch table descriptors & validators
- Unit tests, SQL Client end-to-end test
- Website documentation

This closes #6611.
---
 docs/dev/table/connect.md  | 106 +
 .../flink-connector-elasticsearch-base/pom.xml |  27 ++
 .../ElasticsearchUpsertTableSinkBase.java  | 522 +
 .../ElasticsearchUpsertTableSinkFactoryBase.java   | 296 
 .../elasticsearch/util/IgnoringFailureHandler.java |  39 ++
 .../flink/table/descriptors/Elasticsearch.java | 315 +
 .../table/descriptors/ElasticsearchValidator.java  | 129 +
 ...lasticsearchUpsertTableSinkFactoryTestBase.java | 189 
 .../flink/table/descriptors/ElasticsearchTest.java | 147 ++
 .../flink-connector-elasticsearch6/pom.xml | 117 -
 .../Elasticsearch6UpsertTableSink.java | 269 +++
 .../Elasticsearch6UpsertTableSinkFactory.java  |  77 +++
 .../elasticsearch6/ElasticsearchSink.java  |  27 ++
 .../org.apache.flink.table.factories.TableFactory  |  16 +
 .../Elasticsearch6UpsertTableSinkFactoryTest.java  | 223 +
 .../flink-sql-client-test/pom.xml  |  15 +
 .../test-scripts/elasticsearch-common.sh   |  25 +-
 .../test-scripts/test_sql_client.sh| 126 -
 .../test-scripts/test_streaming_elasticsearch.sh   |   2 +-
 .../flink/table/api/StreamTableEnvironment.scala   |   2 +-
 .../table/descriptors/DescriptorProperties.scala   | 118 -
 .../table/descriptors/StreamTableDescriptor.scala  |   4 -
 .../StreamTableDescriptorValidator.scala   |  21 +-
 .../flink/table/sinks/UpsertStreamTableSink.scala  |   3 +-
 .../flink/table/typeutils/TypeCheckUtils.scala |   6 +
 tools/travis_mvn_watchdog.sh   |   2 +-
 26 files changed, 2795 insertions(+), 28 deletions(-)

diff --git a/docs/dev/table/connect.md b/docs/dev/table/connect.md
index f79dd93..0b7d894 100644
--- a/docs/dev/table/connect.md
+++ b/docs/dev/table/connect.md
@@ -43,6 +43,7 @@ The following table list all available connectors and 
formats. Their mutual comp
 | Name  | Version   | Maven dependency | SQL 
Client JAR |
 | : | : | :--- | 
:--|
 | Filesystem|   | Built-in | Built-in  
 |
+| Elasticsearch | 6 | `flink-connector-elasticsearch6` | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-elasticsearch6{{site.scala_version_suffix}}/{{site.version}}/flink-connector-elasticsearch6{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar)
 |
 | Apache Kafka  | 0.8   | `flink-connector-kafka-0.8`  | Not 
available  |
 | Apache Kafka  | 0.9   | `flink-connector-kafka-0.9`  | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.9{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.9{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar)
 |
 | Apache Kafka  | 0.10  | `flink-connector-kafka-0.10` | 
[Download](http://central.maven.org/maven2/org/apache/flink/flink-connector-kafka-0.10{{site.scala_version_suffix}}/{{site.version}}/flink-connector-kafka-0.10{{site.scala_version_suffix}}-{{site.version}}-sql-jar.jar)
 |
@@ -588,6 +589,111 @@ Make sure to add the version-specific Kafka dependency. 
In addition, a correspon
 
 {% top %}
 
+### Elasticsearch Connector
+
+Sink: Streaming Append Mode
+Sink: Streaming Upsert Mode
+Format: JSON-only
+
+The Elasticsearch connector allows for writing into an index of the 
Elasticsearch search engine.
+
+The connector can operate in [upsert mode](#update-modes) for exchanging 
UPSERT/DELETE messages with the external system using a [key defined by the 
query](streaming.html#table-to-stream-conversion).
+
+For append-only queries, the connector can also operate in [append 
mode](#update-modes) for exchanging only INSERT messages with the external 
system. If no key is defined by the query, a key is automatically generated by 
Elast

[flink] branch master updated: [FLINK-10451] [table] TableFunctionCollector should handle the life cycle of ScalarFunction

2018-10-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 22613b7  [FLINK-10451] [table] TableFunctionCollector should handle 
the life cycle of ScalarFunction
22613b7 is described below

commit 22613b7392f6f9b344291bb3a5cec84c9aa40926
Author: Xpray 
AuthorDate: Fri Sep 28 16:34:05 2018 +0800

[FLINK-10451] [table] TableFunctionCollector should handle the life cycle 
of ScalarFunction

This closes #6771.
---
 .../table/codegen/CollectorCodeGenerator.scala | 12 +++--
 .../flink/table/plan/nodes/CommonCorrelate.scala   | 17 +++--
 .../table/runtime/CRowCorrelateProcessRunner.scala |  3 +++
 .../table/runtime/CorrelateFlatMapRunner.scala |  3 +++
 .../table/runtime/TableFunctionCollector.scala |  3 ++-
 .../utils/userDefinedScalarFunctions.scala | 17 +
 .../runtime/batch/table/CorrelateITCase.scala  | 29 +-
 .../runtime/stream/table/CorrelateITCase.scala | 29 +-
 8 files changed, 100 insertions(+), 13 deletions(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
index 9fc76e3..85d858f 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
@@ -18,6 +18,7 @@
 package org.apache.flink.table.codegen
 
 import org.apache.flink.api.common.typeinfo.TypeInformation
+import org.apache.flink.configuration.Configuration
 import org.apache.flink.table.api.TableConfig
 import org.apache.flink.table.codegen.CodeGenUtils.{boxedTypeTermForTypeInfo, 
newName}
 import org.apache.flink.table.codegen.Indenter.toISC
@@ -63,7 +64,8 @@ class CollectorCodeGenerator(
   def generateTableFunctionCollector(
   name: String,
   bodyCode: String,
-  collectedType: TypeInformation[Any])
+  collectedType: TypeInformation[Any],
+  codeGenerator: CodeGenerator)
 : GeneratedCollector = {
 
 val className = newName(name)
@@ -95,6 +97,11 @@ class CollectorCodeGenerator(
   |  }
   |
   |  @Override
+  |  public void open(${classOf[Configuration].getCanonicalName} 
parameters) throws Exception {
+  |${codeGenerator.reuseOpenCode()}
+  |  }
+  |
+  |  @Override
   |  public void collect(Object record) throws Exception {
   |super.collect(record);
   |$input1TypeClass $input1Term = ($input1TypeClass) getInput();
@@ -105,7 +112,8 @@ class CollectorCodeGenerator(
   |  }
   |
   |  @Override
-  |  public void close() {
+  |  public void close() throws Exception {
+  |${codeGenerator.reuseCloseCode()}
   |  }
   |}
   |""".stripMargin
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
index 4331457..3475e19 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
@@ -136,6 +136,13 @@ trait CommonCorrelate {
   returnSchema.typeInfo,
   returnSchema.fieldNames)
 
+val filterGenerator = new FunctionCodeGenerator(
+  config,
+  false,
+  udtfTypeInfo,
+  None,
+  pojoFieldMapping)
+
 val collectorCode = if (condition.isEmpty) {
   s"""
  |${crossResultExpr.code}
@@ -153,13 +160,6 @@ trait CommonCorrelate {
   //   The generated expression is discarded.
   
generator.generateExpression(condition.get.accept(changeInputRefIndexShuttle))
 
-  val filterGenerator = new FunctionCodeGenerator(
-config,
-false,
-udtfTypeInfo,
-None,
-pojoFieldMapping)
-
   filterGenerator.input1Term = filterGenerator.input2Term
   val filterCondition = filterGenerator.generateExpression(condition.get)
   s"""
@@ -175,7 +175,8 @@ trait CommonCorrelate {
 generator.generateTableFunctionCollector(
   "TableFunctionCollector",
   collectorCode,
-  udtfTypeInfo)
+  udtfTypeInfo,
+  filterGenerator)
   }
 
   private[flink] def selectToString(rowType: RelDataType): String = {
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/CRowCorrelateProcessRunner.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/CRowCorrelateProcessRunner.scala
index 2553d9c

[flink] branch release-1.6 updated: [FLINK-10451] [table] TableFunctionCollector should handle the life cycle of ScalarFunction

2018-10-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.6
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.6 by this push:
 new 7fb980e  [FLINK-10451] [table] TableFunctionCollector should handle 
the life cycle of ScalarFunction
7fb980e is described below

commit 7fb980ec74b95d7e6e27d6414af54deb9010e134
Author: Xpray 
AuthorDate: Fri Sep 28 16:34:05 2018 +0800

[FLINK-10451] [table] TableFunctionCollector should handle the life cycle 
of ScalarFunction

This closes #6771.
---
 .../table/codegen/CollectorCodeGenerator.scala | 12 +++--
 .../flink/table/plan/nodes/CommonCorrelate.scala   | 17 +++--
 .../table/runtime/CRowCorrelateProcessRunner.scala |  3 +++
 .../table/runtime/CorrelateFlatMapRunner.scala |  3 +++
 .../table/runtime/TableFunctionCollector.scala |  3 ++-
 .../utils/userDefinedScalarFunctions.scala | 17 +
 .../runtime/batch/table/CorrelateITCase.scala  | 29 +-
 .../runtime/stream/table/CorrelateITCase.scala | 29 +-
 8 files changed, 100 insertions(+), 13 deletions(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
index 9fc76e3..85d858f 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
@@ -18,6 +18,7 @@
 package org.apache.flink.table.codegen
 
 import org.apache.flink.api.common.typeinfo.TypeInformation
+import org.apache.flink.configuration.Configuration
 import org.apache.flink.table.api.TableConfig
 import org.apache.flink.table.codegen.CodeGenUtils.{boxedTypeTermForTypeInfo, 
newName}
 import org.apache.flink.table.codegen.Indenter.toISC
@@ -63,7 +64,8 @@ class CollectorCodeGenerator(
   def generateTableFunctionCollector(
   name: String,
   bodyCode: String,
-  collectedType: TypeInformation[Any])
+  collectedType: TypeInformation[Any],
+  codeGenerator: CodeGenerator)
 : GeneratedCollector = {
 
 val className = newName(name)
@@ -95,6 +97,11 @@ class CollectorCodeGenerator(
   |  }
   |
   |  @Override
+  |  public void open(${classOf[Configuration].getCanonicalName} 
parameters) throws Exception {
+  |${codeGenerator.reuseOpenCode()}
+  |  }
+  |
+  |  @Override
   |  public void collect(Object record) throws Exception {
   |super.collect(record);
   |$input1TypeClass $input1Term = ($input1TypeClass) getInput();
@@ -105,7 +112,8 @@ class CollectorCodeGenerator(
   |  }
   |
   |  @Override
-  |  public void close() {
+  |  public void close() throws Exception {
+  |${codeGenerator.reuseCloseCode()}
   |  }
   |}
   |""".stripMargin
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
index 4331457..3475e19 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
@@ -136,6 +136,13 @@ trait CommonCorrelate {
   returnSchema.typeInfo,
   returnSchema.fieldNames)
 
+val filterGenerator = new FunctionCodeGenerator(
+  config,
+  false,
+  udtfTypeInfo,
+  None,
+  pojoFieldMapping)
+
 val collectorCode = if (condition.isEmpty) {
   s"""
  |${crossResultExpr.code}
@@ -153,13 +160,6 @@ trait CommonCorrelate {
   //   The generated expression is discarded.
   
generator.generateExpression(condition.get.accept(changeInputRefIndexShuttle))
 
-  val filterGenerator = new FunctionCodeGenerator(
-config,
-false,
-udtfTypeInfo,
-None,
-pojoFieldMapping)
-
   filterGenerator.input1Term = filterGenerator.input2Term
   val filterCondition = filterGenerator.generateExpression(condition.get)
   s"""
@@ -175,7 +175,8 @@ trait CommonCorrelate {
 generator.generateTableFunctionCollector(
   "TableFunctionCollector",
   collectorCode,
-  udtfTypeInfo)
+  udtfTypeInfo,
+  filterGenerator)
   }
 
   private[flink] def selectToString(rowType: RelDataType): String = {
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/CRowCorrelateProcessRunner.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/CRowCorrelateProcessRunner.scala
ind

[flink] branch release-1.5 updated: [FLINK-10451] [table] TableFunctionCollector should handle the life cycle of ScalarFunction

2018-10-02 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch release-1.5
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.5 by this push:
 new 9a1f0c5  [FLINK-10451] [table] TableFunctionCollector should handle 
the life cycle of ScalarFunction
9a1f0c5 is described below

commit 9a1f0c5e05ef50f115c8f92aac9fc156cdf54249
Author: Xpray 
AuthorDate: Fri Sep 28 16:34:05 2018 +0800

[FLINK-10451] [table] TableFunctionCollector should handle the life cycle 
of ScalarFunction

This closes #6771.
---
 .../table/codegen/CollectorCodeGenerator.scala | 12 +++--
 .../flink/table/plan/nodes/CommonCorrelate.scala   | 17 +++--
 .../table/runtime/CRowCorrelateProcessRunner.scala |  3 +++
 .../table/runtime/CorrelateFlatMapRunner.scala |  3 +++
 .../table/runtime/TableFunctionCollector.scala |  3 ++-
 .../utils/userDefinedScalarFunctions.scala | 17 +
 .../runtime/batch/table/CorrelateITCase.scala  | 29 +-
 .../runtime/stream/table/CorrelateITCase.scala | 29 +-
 8 files changed, 100 insertions(+), 13 deletions(-)

diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
index 9fc76e3..85d858f 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/CollectorCodeGenerator.scala
@@ -18,6 +18,7 @@
 package org.apache.flink.table.codegen
 
 import org.apache.flink.api.common.typeinfo.TypeInformation
+import org.apache.flink.configuration.Configuration
 import org.apache.flink.table.api.TableConfig
 import org.apache.flink.table.codegen.CodeGenUtils.{boxedTypeTermForTypeInfo, 
newName}
 import org.apache.flink.table.codegen.Indenter.toISC
@@ -63,7 +64,8 @@ class CollectorCodeGenerator(
   def generateTableFunctionCollector(
   name: String,
   bodyCode: String,
-  collectedType: TypeInformation[Any])
+  collectedType: TypeInformation[Any],
+  codeGenerator: CodeGenerator)
 : GeneratedCollector = {
 
 val className = newName(name)
@@ -95,6 +97,11 @@ class CollectorCodeGenerator(
   |  }
   |
   |  @Override
+  |  public void open(${classOf[Configuration].getCanonicalName} 
parameters) throws Exception {
+  |${codeGenerator.reuseOpenCode()}
+  |  }
+  |
+  |  @Override
   |  public void collect(Object record) throws Exception {
   |super.collect(record);
   |$input1TypeClass $input1Term = ($input1TypeClass) getInput();
@@ -105,7 +112,8 @@ class CollectorCodeGenerator(
   |  }
   |
   |  @Override
-  |  public void close() {
+  |  public void close() throws Exception {
+  |${codeGenerator.reuseCloseCode()}
   |  }
   |}
   |""".stripMargin
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
index 4331457..3475e19 100644
--- 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
+++ 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/CommonCorrelate.scala
@@ -136,6 +136,13 @@ trait CommonCorrelate {
   returnSchema.typeInfo,
   returnSchema.fieldNames)
 
+val filterGenerator = new FunctionCodeGenerator(
+  config,
+  false,
+  udtfTypeInfo,
+  None,
+  pojoFieldMapping)
+
 val collectorCode = if (condition.isEmpty) {
   s"""
  |${crossResultExpr.code}
@@ -153,13 +160,6 @@ trait CommonCorrelate {
   //   The generated expression is discarded.
   
generator.generateExpression(condition.get.accept(changeInputRefIndexShuttle))
 
-  val filterGenerator = new FunctionCodeGenerator(
-config,
-false,
-udtfTypeInfo,
-None,
-pojoFieldMapping)
-
   filterGenerator.input1Term = filterGenerator.input2Term
   val filterCondition = filterGenerator.generateExpression(condition.get)
   s"""
@@ -175,7 +175,8 @@ trait CommonCorrelate {
 generator.generateTableFunctionCollector(
   "TableFunctionCollector",
   collectorCode,
-  udtfTypeInfo)
+  udtfTypeInfo,
+  filterGenerator)
   }
 
   private[flink] def selectToString(rowType: RelDataType): String = {
diff --git 
a/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/CRowCorrelateProcessRunner.scala
 
b/flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/CRowCorrelateProcessRunner.scala
ind

[flink] 02/03: [FLINK-9712][docs, table] Document processing time Temporal Table Joins

2018-10-10 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 6dad790b6eb84b8faafafc2f8506099a7fc08d2b
Author: Piotr Nowojski 
AuthorDate: Thu Sep 20 09:43:42 2018 +0200

[FLINK-9712][docs,table] Document processing time Temporal Table Joins
---
 docs/dev/table/sql.md   |  25 +++
 docs/dev/table/streaming/index.md   |   6 +-
 docs/dev/table/streaming/joins.md   | 104 ++
 docs/dev/table/streaming/temporal_tables.md | 286 
 docs/dev/table/tableApi.md  |  50 +
 5 files changed, 467 insertions(+), 4 deletions(-)

diff --git a/docs/dev/table/sql.md b/docs/dev/table/sql.md
index b490515..6266fd1 100644
--- a/docs/dev/table/sql.md
+++ b/docs/dev/table/sql.md
@@ -502,6 +502,31 @@ FROM Orders LEFT JOIN LATERAL TABLE(unnest_udtf(tags)) t 
AS tag ON TRUE
 Note: Currently, only literal TRUE is supported 
as predicate for a left outer join against a lateral table.
   
 
+
+  
+Join with Temporal Table
+Streaming
+  
+  
+Temporal Tables are 
tables that track changes over time.
+A Temporal Table 
Function provides access to the state of a temporal table at a specific 
point in time.
+The syntax to join a table with a temporal table function is the same 
as in Join with Table Functions.
+
+Currently only inner joins with temporal tables are supported.
+Assuming Rates is a Temporal Table 
Function
+{% highlight sql %}
+SELECT
+  o_amount, r_rate
+FROM
+  Orders,
+  LATERAL TABLE (Rates(o_proctime))
+WHERE
+  r_currency = o_currency
+{% endhighlight %}
+For more information please check the more detailed Temporal Tables concept 
description.
+  
+
+
   
 
 
diff --git a/docs/dev/table/streaming/index.md 
b/docs/dev/table/streaming/index.md
index dbeed32..1d8821d 100644
--- a/docs/dev/table/streaming/index.md
+++ b/docs/dev/table/streaming/index.md
@@ -37,8 +37,6 @@ In the following pages, we explain concepts, practical 
limitations, and stream-s
 
 * [Dynamic Tables]({{ site.baseurl 
}}/dev/table/streaming/dynamic_tables.html): Describes the concept of Dynamic 
Tables.
 * [Time attributes]({{ site.baseurl 
}}/dev/table/streaming/time_attributes.html): How time attributes are handled 
in Table API & SQL.
+* [Joins in Continuous Queries]({{ site.baseurl 
}}/dev/table/streaming/joins.html): Different supported types of Joins in 
Continuous Queries.
+* [Temporal Tables]({{ site.baseurl 
}}/dev/table/streaming/temporal_tables.html): Describes the Temporal Table 
concept.
 * [Query configuration]({{ site.baseurl 
}}/dev/table/streaming/query_configuration.html): Lists Table API & SQL 
specific configuration options.
-
-
-
-
diff --git a/docs/dev/table/streaming/joins.md 
b/docs/dev/table/streaming/joins.md
new file mode 100644
index 000..cd32bce
--- /dev/null
+++ b/docs/dev/table/streaming/joins.md
@@ -0,0 +1,104 @@
+---
+title: "Joins in Continuous Queries"
+nav-parent_id: streaming_tableapi
+nav-pos: 3
+---
+
+
+When we have two tables that we want to connect such operation can usually be 
expressed via some kind of join.
+In batch processing joins can be efficiently executed, since we are working on 
a bounded completed data sets.
+In stream processing things are a little bit more complicated,
+especially when it comes to the issue how to handle that data can change over 
time.
+Because of that, there are a couple of ways to actually perform the join using 
either Table API or SQL.
+
+For more information regarding the syntax please check the Joins sections in 
[Table API](../tableApi.html#joins) and [SQL](../sql.html#joins).
+
+* This will be replaced by the TOC
+{:toc}
+
+Regular Joins
+-
+
+This is the most basic case in which any new records or changes to either side 
of the join input are visible
+and are affecting the whole join result.
+For example, if there is a new record on the left side,
+it will be joined with all of the previous and future records on the other 
side.
+
+These semantics have an important implication:
+it requires to keep both sides of the join input in the state indefinitely
+and resource usage will grow indefinitely as well,
+if one or both input tables are continuously growing.
+
+Example:
+{% highlight sql %}
+SELECT * FROM Orders
+INNER JOIN Product
+ON Orders.productId = Product.id
+{% endhighlight %}
+
+Time-windowed Joins
+---
+
+A time-windowed join is defined by a join predicate,
+that checks if [the time attributes](time_attributes.html) of the input 
records are within a time-window.
+Since time attributes are quasi-monontic increasing,
+Flink can remove old values from the state without affecting the correctness 
of the result.
+
+Example:
+{% highlight sql %}
+SELECT *
+FROM
+  Orders o,
+  Shipments s
+WHERE o.

[flink] branch master updated (9b0bf69 -> 6f5ff2f)

2018-10-10 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git.


from 9b0bf69  [FLINK-10316] [kinesis] bug was preventing 
FlinkKinesisProducer to connect to Kinesalite
 new d432ca0  [hotfix][docs,table] Split Streaming Concepts page into 
multiple documents.
 new 6dad790  [FLINK-9712][docs,table] Document processing time Temporal 
Table Joins
 new 6f5ff2f  [hotfix] [docs] Improve table streaming docs section

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 docs/dev/table/common.md|   2 +-
 docs/dev/table/connect.md   |  14 +-
 docs/dev/table/functions.md |  10 +-
 docs/dev/table/index.md |  11 +-
 docs/dev/table/sourceSinks.md   |   6 +-
 docs/dev/table/sql.md   |  67 ++-
 docs/dev/table/sqlClient.md |   2 +-
 docs/dev/table/streaming.md | 603 
 docs/dev/table/streaming/dynamic_tables.md  | 189 
 docs/dev/table/streaming/index.md   |  42 ++
 docs/dev/table/streaming/joins.md   | 202 
 docs/dev/table/streaming/query_configuration.md | 130 +
 docs/dev/table/streaming/temporal_tables.md | 189 
 docs/dev/table/streaming/time_attributes.md | 336 +
 docs/dev/table/tableApi.md  | 118 +++--
 15 files changed, 1245 insertions(+), 676 deletions(-)
 delete mode 100644 docs/dev/table/streaming.md
 create mode 100644 docs/dev/table/streaming/dynamic_tables.md
 create mode 100644 docs/dev/table/streaming/index.md
 create mode 100644 docs/dev/table/streaming/joins.md
 create mode 100644 docs/dev/table/streaming/query_configuration.md
 create mode 100644 docs/dev/table/streaming/temporal_tables.md
 create mode 100644 docs/dev/table/streaming/time_attributes.md



<    1   2   3   4   5   6   7   8   9   10   >