[flink] branch master updated: [hotfix][table-api] Remove deprecated table function code

2019-03-04 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new dcd8c74  [hotfix][table-api] Remove deprecated table function code
dcd8c74 is described below

commit dcd8c74b504046802cebf278b718e4753928a260
Author: sunjincheng121 
AuthorDate: Wed Feb 20 20:09:46 2019 +0800

[hotfix][table-api] Remove deprecated table function code

This commit removes the table constructor (deprecated in
FLINK-11447) for lateral table function joins and simplifies
related code.
---
 .../flink/table/api/scala/expressionDsl.scala  |   8 --
 .../scala/org/apache/flink/table/api/table.scala   | 143 +++--
 .../expressions/PlannerExpressionConverter.scala   |   8 +-
 .../org/apache/flink/table/expressions/call.scala  |  65 +-
 .../functions/utils/UserDefinedFunctionUtils.scala |  29 -
 .../api/batch/table/TemporalTableJoinTest.scala|  11 --
 .../stringexpr/CorrelateStringExpressionTest.scala |  62 -
 .../table/validation/CorrelateValidationTest.scala |   2 +-
 .../api/stream/table/TemporalTableJoinTest.scala   |  14 +-
 .../stringexpr/CorrelateStringExpressionTest.scala |  62 -
 .../table/validation/CorrelateValidationTest.scala |  89 +
 .../table/plan/TimeIndicatorConversionTest.scala   |   6 +-
 .../table/runtime/batch/table/JoinITCase.scala |   2 +-
 13 files changed, 55 insertions(+), 446 deletions(-)

diff --git 
a/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
 
b/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
index ed0f4c1..23d54a5 100644
--- 
a/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
+++ 
b/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala
@@ -1071,14 +1071,6 @@ trait ImplicitExpressionConversions {
 }
   }
 
-  @deprecated("Please use Table.joinLateral() or Table.leftOuterJoinLateral() 
instead.", "1.8")
-  implicit def tableFunctionCall2Table(tfc: TableFunctionCall): Table = {
-new Table(
-  tableEnv = null, // table environment will be set later.
-  tfc.toLogicalTableFunctionCall(child = null) // child will be set later.
-)
-  }
-
   implicit def symbol2FieldExpression(sym: Symbol): Expression = 
UnresolvedFieldReference(sym.name)
   implicit def byte2Literal(b: Byte): Expression = Literal(b)
   implicit def short2Literal(s: Short): Expression = Literal(s)
diff --git 
a/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/api/table.scala
 
b/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/api/table.scala
index e2f1161..44dac79 100644
--- 
a/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/api/table.scala
+++ 
b/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/api/table.scala
@@ -21,7 +21,7 @@ import org.apache.calcite.rel.RelNode
 import org.apache.flink.api.common.typeinfo.TypeInformation
 import org.apache.flink.api.java.operators.join.JoinType
 import org.apache.flink.table.calcite.{FlinkRelBuilder, FlinkTypeFactory}
-import org.apache.flink.table.expressions.{Alias, Asc, Expression, 
ExpressionParser, Ordering, ResolvedFieldReference, UnresolvedAlias, 
UnresolvedFieldReference, WindowProperty}
+import org.apache.flink.table.expressions.{Alias, Asc, Expression, 
ExpressionParser, Ordering, ResolvedFieldReference, UnresolvedAlias, 
WindowProperty}
 import org.apache.flink.table.functions.TemporalTableFunction
 import org.apache.flink.table.functions.utils.UserDefinedFunctionUtils
 import org.apache.flink.table.plan.ProjectionTranslator._
@@ -65,44 +65,13 @@ class Table(
 private[flink] val tableEnv: TableEnvironment,
 private[flink] val logicalPlan: LogicalNode) {
 
-  // Check if the plan has an unbounded TableFunctionCall as child node.
-  //   A TableFunctionCall is tolerated as root node because the Table holds 
the initial call.
-  if (containsUnboundedUDTFCall(logicalPlan) &&
-!logicalPlan.isInstanceOf[LogicalTableFunctionCall]) {
-throw new ValidationException(
-  "Table functions can only be used in table.joinLateral() and 
table.leftOuterJoinLateral().")
-  }
-
-  /**
-* Creates a [[Table]] for a TableFunction call from a String expression.
-*
-* @param tableEnv The TableEnvironment in which the call is created.
-* @param tableFunctionCall A string expression of a table function call.
-*
-* @deprecated This constructor will be removed. Use table.joinLateral() or
-* table.leftOuterJoinLateral() instead.
-*/
-  @Deprecated
-  @deprecated(
-"This constructor will be removed. Use table.joinLateral() or " +
-  "table.leftOuterJoinLateral() instead.",
-

[flink] 02/02: [FLINK-11730] [State Backends] Make HeapKeyedStateBackend follow the builder pattern

2019-03-04 Thread srichter
This is an automated email from the ASF dual-hosted git repository.

srichter pushed a commit to branch release-1.8
in repository https://gitbox.apache.org/repos/asf/flink.git

commit d5364dff200728d8611e9cdf0e8f798de145adcf
Author: Yu Li 
AuthorDate: Thu Feb 28 20:02:35 2019 +0100

[FLINK-11730] [State Backends] Make HeapKeyedStateBackend follow the 
builder pattern

This closes #7866.

(cherry picked from commit 237d07c76b51c171f0f41f9f82d777df26da1dd4)
---
 .../KVStateRequestSerializerRocksDBTest.java   |   5 +-
 .../network/KvStateRequestSerializerTest.java  |  57 +--
 .../flink/runtime/state/AbstractStateBackend.java  |   9 +
 .../flink/runtime/state/RestoreOperation.java  |  10 +-
 .../runtime/state/filesystem/FsStateBackend.java   |  34 +-
 ...AsyncSnapshotStrategySynchronicityBehavior.java |  26 +-
 .../runtime/state/heap/HeapKeyedStateBackend.java  | 546 ++---
 .../state/heap/HeapKeyedStateBackendBuilder.java   | 152 ++
 .../runtime/state/heap/HeapRestoreOperation.java   | 293 +++
 .../runtime/state/heap/HeapSnapshotStrategy.java   | 271 ++
 .../SnapshotStrategySynchronicityBehavior.java |  24 +-
 .../apache/flink/runtime/state/heap/StateUID.java  |  73 +++
 .../SyncSnapshotStrategySynchronicityBehavior.java |  32 +-
 .../runtime/state/memory/MemoryStateBackend.java   |  34 +-
 .../state/StateBackendMigrationTestBase.java   |   2 -
 .../flink/runtime/state/StateBackendTestBase.java  |  14 +-
 .../state/StateSnapshotCompressionTest.java|  89 ++--
 ...HeapKeyedStateBackendSnapshotMigrationTest.java |  25 +-
 .../state/heap/HeapStateBackendTestBase.java   |  23 +-
 .../runtime/state/ttl/StateBackendTestContext.java |  13 -
 .../flink/runtime/state/ttl/TtlStateTestBase.java  |   3 -
 .../state/ttl/mock/MockKeyedStateBackend.java  |  24 +-
 .../ttl/mock/MockKeyedStateBackendBuilder.java |  85 
 .../state/ttl/mock/MockRestoreOperation.java   |  53 ++
 .../runtime/state/ttl/mock/MockStateBackend.java   |   8 +-
 .../streaming/state/RocksDBStateBackend.java   |  10 -
 .../state/restore/RocksDBRestoreOperation.java |   4 +-
 .../streaming/state/RocksDBStateBackendTest.java   |   5 +-
 28 files changed, 1188 insertions(+), 736 deletions(-)

diff --git 
a/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
 
b/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
index 3431199..a5df958 100644
--- 
a/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
+++ 
b/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
@@ -32,6 +32,7 @@ import 
org.apache.flink.metrics.groups.UnregisteredMetricsGroup;
 import org.apache.flink.queryablestate.client.VoidNamespace;
 import org.apache.flink.queryablestate.client.VoidNamespaceSerializer;
 import org.apache.flink.runtime.query.TaskKvStateRegistry;
+import org.apache.flink.runtime.state.AbstractStateBackend;
 import org.apache.flink.runtime.state.KeyGroupRange;
 import org.apache.flink.runtime.state.TestLocalRecoveryConfig;
 import org.apache.flink.runtime.state.internal.InternalListState;
@@ -88,7 +89,7 @@ public final class KVStateRequestSerializerRocksDBTest {
TtlTimeProvider.DEFAULT,
new UnregisteredMetricsGroup(),
Collections.emptyList(),
-   
RocksDBStateBackend.getCompressionDecorator(executionConfig),
+   
AbstractStateBackend.getCompressionDecorator(executionConfig),
new CloseableRegistry()
).build();
longHeapKeyedStateBackend.setCurrentKey(key);
@@ -132,7 +133,7 @@ public final class KVStateRequestSerializerRocksDBTest {
TtlTimeProvider.DEFAULT,
new UnregisteredMetricsGroup(),
Collections.emptyList(),
-   
RocksDBStateBackend.getCompressionDecorator(executionConfig),
+   
AbstractStateBackend.getCompressionDecorator(executionConfig),
new CloseableRegistry()
).build();
longHeapKeyedStateBackend.setCurrentKey(key);
diff --git 
a/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateRequestSerializerTest.java
 
b/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateRequestSerializerTest.java
index aac3394..2ad202f 100644
--- 

[flink] branch release-1.8 updated (8a5c972 -> d5364df)

2019-03-04 Thread srichter
This is an automated email from the ASF dual-hosted git repository.

srichter pushed a change to branch release-1.8
in repository https://gitbox.apache.org/repos/asf/flink.git.


from 8a5c972  [hotfix][tests] Added matcher that performs deep comparison 
with taking Tuples into account
 new e177ba9  [FLINK-11804] [State Backends] Make sure the 
CloseableRegistry used in backend builder is registered with task
 new d5364df  [FLINK-11730] [State Backends] Make HeapKeyedStateBackend 
follow the builder pattern

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../flink/streaming/tests/StubStateBackend.java|   9 +-
 .../KVStateRequestSerializerRocksDBTest.java   |   8 +-
 .../network/KvStateRequestSerializerTest.java  |  58 ++-
 .../network/KvStateServerHandlerTest.java  |   4 +-
 .../state/AbstractKeyedStateBackendBuilder.java|   6 +-
 .../flink/runtime/state/AbstractStateBackend.java  |  13 +-
 .../flink/runtime/state/RestoreOperation.java  |  10 +-
 .../apache/flink/runtime/state/StateBackend.java   |  10 +-
 .../runtime/state/filesystem/FsStateBackend.java   |  35 +-
 ...syncSnapshotStrategySynchronicityBehavior.java} |  24 +-
 .../runtime/state/heap/HeapKeyedStateBackend.java  | 559 ++---
 .../state/heap/HeapKeyedStateBackendBuilder.java   | 152 ++
 .../runtime/state/heap/HeapRestoreOperation.java   | 293 +++
 .../runtime/state/heap/HeapSnapshotStrategy.java   | 271 ++
 .../SnapshotStrategySynchronicityBehavior.java}|  28 +-
 .../apache/flink/runtime/state/heap/StateUID.java  |  73 +++
 ...SyncSnapshotStrategySynchronicityBehavior.java} |  30 +-
 .../runtime/state/memory/MemoryStateBackend.java   |  36 +-
 .../CheckpointSettingsSerializableTest.java|   5 +-
 .../state/StateBackendMigrationTestBase.java   |   2 -
 .../flink/runtime/state/StateBackendTestBase.java  |  14 +-
 .../state/StateSnapshotCompressionTest.java|  86 ++--
 ...HeapKeyedStateBackendSnapshotMigrationTest.java |  25 +-
 .../state/heap/HeapStateBackendTestBase.java   |  23 +-
 .../runtime/state/ttl/StateBackendTestContext.java |  13 -
 .../flink/runtime/state/ttl/TtlStateTestBase.java  |   3 -
 .../state/ttl/mock/MockKeyedStateBackend.java  |  27 +-
 .../ttl/mock/MockKeyedStateBackendBuilder.java |  85 
 .../state/ttl/mock/MockRestoreOperation.java   |  53 ++
 .../runtime/state/ttl/mock/MockStateBackend.java   |  12 +-
 .../state/RocksDBKeyedStateBackendBuilder.java |  21 +-
 .../streaming/state/RocksDBStateBackend.java   |  17 +-
 .../state/restore/RocksDBRestoreOperation.java |   4 +-
 .../streaming/state/RocksDBStateBackendTest.java   |  14 +-
 .../operators/StreamTaskStateInitializerImpl.java  |  18 +-
 .../StreamTaskStateInitializerImplTest.java|   6 +-
 .../runtime/tasks/StreamTaskTerminationTest.java   |   6 +-
 .../runtime/tasks/TestSpyWrapperStateBackend.java  |  10 +-
 .../test/streaming/runtime/StateBackendITCase.java |   6 +-
 39 files changed, 1286 insertions(+), 783 deletions(-)
 copy 
flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/restore/RocksDBRestoreOperation.java
 => 
flink-runtime/src/main/java/org/apache/flink/runtime/state/RestoreOperation.java
 (82%)
 copy 
flink-runtime/src/{test/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSetTest.java
 => 
main/java/org/apache/flink/runtime/state/heap/AsyncSnapshotStrategySynchronicityBehavior.java}
 (62%)
 create mode 100644 
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapKeyedStateBackendBuilder.java
 create mode 100644 
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapRestoreOperation.java
 create mode 100644 
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapSnapshotStrategy.java
 copy 
flink-runtime/src/{test/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSetTest.java
 => 
main/java/org/apache/flink/runtime/state/heap/SnapshotStrategySynchronicityBehavior.java}
 (63%)
 create mode 100644 
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateUID.java
 copy 
flink-runtime/src/{test/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSetTest.java
 => 
main/java/org/apache/flink/runtime/state/heap/SyncSnapshotStrategySynchronicityBehavior.java}
 (54%)
 create mode 100644 
flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockKeyedStateBackendBuilder.java
 create mode 100644 
flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockRestoreOperation.java



[flink] 01/02: [FLINK-11804] [State Backends] Make sure the CloseableRegistry used in backend builder is registered with task

2019-03-04 Thread srichter
This is an automated email from the ASF dual-hosted git repository.

srichter pushed a commit to branch release-1.8
in repository https://gitbox.apache.org/repos/asf/flink.git

commit e177ba9bac042d142a8efa989a0b6b7bb2c122cf
Author: Yu Li 
AuthorDate: Fri Mar 1 04:52:14 2019 +0100

[FLINK-11804] [State Backends] Make sure the CloseableRegistry used in 
backend builder is registered with task

We need to make sure each stream constructed in restore could also be 
closed in case of task cancel,
for example the data input stream opened for serDe during restore.

Also removed close of CloseableRegistry in RocksDBKeyedStateBackendBuilder.

(cherry picked from commit eada52be5194a018a41e7ea51ea86e0273df2073)
---
 .../flink/streaming/tests/StubStateBackend.java |  9 +++--
 .../KVStateRequestSerializerRocksDBTest.java|  7 +--
 .../network/KvStateRequestSerializerTest.java   |  9 +
 .../network/KvStateServerHandlerTest.java   |  4 +++-
 .../state/AbstractKeyedStateBackendBuilder.java |  6 +-
 .../flink/runtime/state/AbstractStateBackend.java   |  4 +++-
 .../apache/flink/runtime/state/StateBackend.java| 10 +++---
 .../runtime/state/filesystem/FsStateBackend.java|  7 +--
 .../runtime/state/heap/HeapKeyedStateBackend.java   | 21 +++--
 .../runtime/state/memory/MemoryStateBackend.java|  8 ++--
 .../CheckpointSettingsSerializableTest.java |  5 -
 .../runtime/state/StateSnapshotCompressionTest.java | 13 +
 .../state/heap/HeapStateBackendTestBase.java|  4 +++-
 .../state/ttl/mock/MockKeyedStateBackend.java   |  5 +++--
 .../runtime/state/ttl/mock/MockStateBackend.java|  8 ++--
 .../state/RocksDBKeyedStateBackendBuilder.java  | 21 -
 .../streaming/state/RocksDBStateBackend.java|  7 +--
 .../streaming/state/RocksDBStateBackendTest.java|  9 +
 .../operators/StreamTaskStateInitializerImpl.java   | 18 +++---
 .../StreamTaskStateInitializerImplTest.java |  6 +-
 .../runtime/tasks/StreamTaskTerminationTest.java|  6 +-
 .../runtime/tasks/TestSpyWrapperStateBackend.java   | 10 --
 .../test/streaming/runtime/StateBackendITCase.java  |  6 +-
 23 files changed, 142 insertions(+), 61 deletions(-)

diff --git 
a/flink-end-to-end-tests/flink-stream-state-ttl-test/src/main/java/org/apache/flink/streaming/tests/StubStateBackend.java
 
b/flink-end-to-end-tests/flink-stream-state-ttl-test/src/main/java/org/apache/flink/streaming/tests/StubStateBackend.java
index 17c51d8..dec4f2d 100644
--- 
a/flink-end-to-end-tests/flink-stream-state-ttl-test/src/main/java/org/apache/flink/streaming/tests/StubStateBackend.java
+++ 
b/flink-end-to-end-tests/flink-stream-state-ttl-test/src/main/java/org/apache/flink/streaming/tests/StubStateBackend.java
@@ -20,6 +20,7 @@ package org.apache.flink.streaming.tests;
 
 import org.apache.flink.api.common.JobID;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.core.fs.CloseableRegistry;
 import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.runtime.execution.Environment;
 import org.apache.flink.runtime.query.TaskKvStateRegistry;
@@ -32,6 +33,8 @@ import org.apache.flink.runtime.state.OperatorStateBackend;
 import org.apache.flink.runtime.state.StateBackend;
 import org.apache.flink.runtime.state.ttl.TtlTimeProvider;
 
+import javax.annotation.Nonnull;
+
 import java.io.IOException;
 import java.util.Collection;
 
@@ -75,7 +78,8 @@ final class StubStateBackend implements StateBackend {
TaskKvStateRegistry kvStateRegistry,
TtlTimeProvider ttlTimeProvider,
MetricGroup metricGroup,
-   Collection stateHandles) throws Exception {
+   @Nonnull Collection stateHandles,
+   CloseableRegistry cancelStreamRegistry) throws Exception {
 
return backend.createKeyedStateBackend(
env,
@@ -87,7 +91,8 @@ final class StubStateBackend implements StateBackend {
kvStateRegistry,
this.ttlTimeProvider,
metricGroup,
-   stateHandles);
+   stateHandles,
+   cancelStreamRegistry);
}
 
@Override
diff --git 
a/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
 
b/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
index de86340..3431199 100644
--- 
a/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
+++ 

[flink] branch master updated (39e1384 -> 237d07c)

2019-03-04 Thread srichter
This is an automated email from the ASF dual-hosted git repository.

srichter pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git.


from 39e1384  [hotfix][tests] Added matcher that performs deep comparison 
with taking Tuples into account
 new eada52b  [FLINK-11804] [State Backends] Make sure the 
CloseableRegistry used in backend builder is registered with task
 new 237d07c  [FLINK-11730] [State Backends] Make HeapKeyedStateBackend 
follow the builder pattern

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../flink/streaming/tests/StubStateBackend.java|   9 +-
 .../KVStateRequestSerializerRocksDBTest.java   |   8 +-
 .../network/KvStateRequestSerializerTest.java  |  58 ++-
 .../network/KvStateServerHandlerTest.java  |   4 +-
 .../state/AbstractKeyedStateBackendBuilder.java|   6 +-
 .../flink/runtime/state/AbstractStateBackend.java  |  13 +-
 .../flink/runtime/state/RestoreOperation.java  |  10 +-
 .../apache/flink/runtime/state/StateBackend.java   |  10 +-
 .../runtime/state/filesystem/FsStateBackend.java   |  35 +-
 ...syncSnapshotStrategySynchronicityBehavior.java} |  24 +-
 .../runtime/state/heap/HeapKeyedStateBackend.java  | 559 ++---
 .../state/heap/HeapKeyedStateBackendBuilder.java   | 152 ++
 .../runtime/state/heap/HeapRestoreOperation.java   | 293 +++
 .../runtime/state/heap/HeapSnapshotStrategy.java   | 271 ++
 .../SnapshotStrategySynchronicityBehavior.java}|  28 +-
 .../apache/flink/runtime/state/heap/StateUID.java  |  73 +++
 ...SyncSnapshotStrategySynchronicityBehavior.java} |  30 +-
 .../runtime/state/memory/MemoryStateBackend.java   |  36 +-
 .../CheckpointSettingsSerializableTest.java|   5 +-
 .../state/StateBackendMigrationTestBase.java   |   2 -
 .../flink/runtime/state/StateBackendTestBase.java  |  14 +-
 .../state/StateSnapshotCompressionTest.java|  86 ++--
 ...HeapKeyedStateBackendSnapshotMigrationTest.java |  25 +-
 .../state/heap/HeapStateBackendTestBase.java   |  23 +-
 .../runtime/state/ttl/StateBackendTestContext.java |  13 -
 .../flink/runtime/state/ttl/TtlStateTestBase.java  |   3 -
 .../state/ttl/mock/MockKeyedStateBackend.java  |  27 +-
 .../ttl/mock/MockKeyedStateBackendBuilder.java |  85 
 .../state/ttl/mock/MockRestoreOperation.java   |  53 ++
 .../runtime/state/ttl/mock/MockStateBackend.java   |  12 +-
 .../state/RocksDBKeyedStateBackendBuilder.java |  21 +-
 .../streaming/state/RocksDBStateBackend.java   |  17 +-
 .../state/restore/RocksDBRestoreOperation.java |   4 +-
 .../streaming/state/RocksDBStateBackendTest.java   |  14 +-
 .../operators/StreamTaskStateInitializerImpl.java  |  18 +-
 .../StreamTaskStateInitializerImplTest.java|   6 +-
 .../runtime/tasks/StreamTaskTerminationTest.java   |   6 +-
 .../runtime/tasks/TestSpyWrapperStateBackend.java  |  10 +-
 .../test/streaming/runtime/StateBackendITCase.java |   6 +-
 39 files changed, 1286 insertions(+), 783 deletions(-)
 copy 
flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/restore/RocksDBRestoreOperation.java
 => 
flink-runtime/src/main/java/org/apache/flink/runtime/state/RestoreOperation.java
 (82%)
 copy 
flink-runtime/src/{test/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSetTest.java
 => 
main/java/org/apache/flink/runtime/state/heap/AsyncSnapshotStrategySynchronicityBehavior.java}
 (62%)
 create mode 100644 
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapKeyedStateBackendBuilder.java
 create mode 100644 
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapRestoreOperation.java
 create mode 100644 
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapSnapshotStrategy.java
 copy 
flink-runtime/src/{test/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSetTest.java
 => 
main/java/org/apache/flink/runtime/state/heap/SnapshotStrategySynchronicityBehavior.java}
 (63%)
 create mode 100644 
flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/StateUID.java
 copy 
flink-runtime/src/{test/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSetTest.java
 => 
main/java/org/apache/flink/runtime/state/heap/SyncSnapshotStrategySynchronicityBehavior.java}
 (54%)
 create mode 100644 
flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockKeyedStateBackendBuilder.java
 create mode 100644 
flink-runtime/src/test/java/org/apache/flink/runtime/state/ttl/mock/MockRestoreOperation.java



[flink] 02/02: [FLINK-11730] [State Backends] Make HeapKeyedStateBackend follow the builder pattern

2019-03-04 Thread srichter
This is an automated email from the ASF dual-hosted git repository.

srichter pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 237d07c76b51c171f0f41f9f82d777df26da1dd4
Author: Yu Li 
AuthorDate: Fri Mar 1 03:02:35 2019 +0800

[FLINK-11730] [State Backends] Make HeapKeyedStateBackend follow the 
builder pattern

This closes #7866.
---
 .../KVStateRequestSerializerRocksDBTest.java   |   5 +-
 .../network/KvStateRequestSerializerTest.java  |  57 +--
 .../flink/runtime/state/AbstractStateBackend.java  |   9 +
 .../flink/runtime/state/RestoreOperation.java  |  10 +-
 .../runtime/state/filesystem/FsStateBackend.java   |  34 +-
 ...AsyncSnapshotStrategySynchronicityBehavior.java |  26 +-
 .../runtime/state/heap/HeapKeyedStateBackend.java  | 546 ++---
 .../state/heap/HeapKeyedStateBackendBuilder.java   | 152 ++
 .../runtime/state/heap/HeapRestoreOperation.java   | 293 +++
 .../runtime/state/heap/HeapSnapshotStrategy.java   | 271 ++
 .../SnapshotStrategySynchronicityBehavior.java |  24 +-
 .../apache/flink/runtime/state/heap/StateUID.java  |  73 +++
 .../SyncSnapshotStrategySynchronicityBehavior.java |  32 +-
 .../runtime/state/memory/MemoryStateBackend.java   |  34 +-
 .../state/StateBackendMigrationTestBase.java   |   2 -
 .../flink/runtime/state/StateBackendTestBase.java  |  14 +-
 .../state/StateSnapshotCompressionTest.java|  89 ++--
 ...HeapKeyedStateBackendSnapshotMigrationTest.java |  25 +-
 .../state/heap/HeapStateBackendTestBase.java   |  23 +-
 .../runtime/state/ttl/StateBackendTestContext.java |  13 -
 .../flink/runtime/state/ttl/TtlStateTestBase.java  |   3 -
 .../state/ttl/mock/MockKeyedStateBackend.java  |  24 +-
 .../ttl/mock/MockKeyedStateBackendBuilder.java |  85 
 .../state/ttl/mock/MockRestoreOperation.java   |  53 ++
 .../runtime/state/ttl/mock/MockStateBackend.java   |   8 +-
 .../streaming/state/RocksDBStateBackend.java   |  10 -
 .../state/restore/RocksDBRestoreOperation.java |   4 +-
 .../streaming/state/RocksDBStateBackendTest.java   |   5 +-
 28 files changed, 1188 insertions(+), 736 deletions(-)

diff --git 
a/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
 
b/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
index 3431199..a5df958 100644
--- 
a/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
+++ 
b/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
@@ -32,6 +32,7 @@ import 
org.apache.flink.metrics.groups.UnregisteredMetricsGroup;
 import org.apache.flink.queryablestate.client.VoidNamespace;
 import org.apache.flink.queryablestate.client.VoidNamespaceSerializer;
 import org.apache.flink.runtime.query.TaskKvStateRegistry;
+import org.apache.flink.runtime.state.AbstractStateBackend;
 import org.apache.flink.runtime.state.KeyGroupRange;
 import org.apache.flink.runtime.state.TestLocalRecoveryConfig;
 import org.apache.flink.runtime.state.internal.InternalListState;
@@ -88,7 +89,7 @@ public final class KVStateRequestSerializerRocksDBTest {
TtlTimeProvider.DEFAULT,
new UnregisteredMetricsGroup(),
Collections.emptyList(),
-   
RocksDBStateBackend.getCompressionDecorator(executionConfig),
+   
AbstractStateBackend.getCompressionDecorator(executionConfig),
new CloseableRegistry()
).build();
longHeapKeyedStateBackend.setCurrentKey(key);
@@ -132,7 +133,7 @@ public final class KVStateRequestSerializerRocksDBTest {
TtlTimeProvider.DEFAULT,
new UnregisteredMetricsGroup(),
Collections.emptyList(),
-   
RocksDBStateBackend.getCompressionDecorator(executionConfig),
+   
AbstractStateBackend.getCompressionDecorator(executionConfig),
new CloseableRegistry()
).build();
longHeapKeyedStateBackend.setCurrentKey(key);
diff --git 
a/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateRequestSerializerTest.java
 
b/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateRequestSerializerTest.java
index aac3394..2ad202f 100644
--- 

[flink] 01/02: [FLINK-11804] [State Backends] Make sure the CloseableRegistry used in backend builder is registered with task

2019-03-04 Thread srichter
This is an automated email from the ASF dual-hosted git repository.

srichter pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit eada52be5194a018a41e7ea51ea86e0273df2073
Author: Yu Li 
AuthorDate: Fri Mar 1 11:52:14 2019 +0800

[FLINK-11804] [State Backends] Make sure the CloseableRegistry used in 
backend builder is registered with task

We need to make sure each stream constructed in restore could also be 
closed in case of task cancel,
for example the data input stream opened for serDe during restore.

Also removed close of CloseableRegistry in RocksDBKeyedStateBackendBuilder.
---
 .../flink/streaming/tests/StubStateBackend.java |  9 +++--
 .../KVStateRequestSerializerRocksDBTest.java|  7 +--
 .../network/KvStateRequestSerializerTest.java   |  9 +
 .../network/KvStateServerHandlerTest.java   |  4 +++-
 .../state/AbstractKeyedStateBackendBuilder.java |  6 +-
 .../flink/runtime/state/AbstractStateBackend.java   |  4 +++-
 .../apache/flink/runtime/state/StateBackend.java| 10 +++---
 .../runtime/state/filesystem/FsStateBackend.java|  7 +--
 .../runtime/state/heap/HeapKeyedStateBackend.java   | 21 +++--
 .../runtime/state/memory/MemoryStateBackend.java|  8 ++--
 .../CheckpointSettingsSerializableTest.java |  5 -
 .../runtime/state/StateSnapshotCompressionTest.java | 13 +
 .../state/heap/HeapStateBackendTestBase.java|  4 +++-
 .../state/ttl/mock/MockKeyedStateBackend.java   |  5 +++--
 .../runtime/state/ttl/mock/MockStateBackend.java|  8 ++--
 .../state/RocksDBKeyedStateBackendBuilder.java  | 21 -
 .../streaming/state/RocksDBStateBackend.java|  7 +--
 .../streaming/state/RocksDBStateBackendTest.java|  9 +
 .../operators/StreamTaskStateInitializerImpl.java   | 18 +++---
 .../StreamTaskStateInitializerImplTest.java |  6 +-
 .../runtime/tasks/StreamTaskTerminationTest.java|  6 +-
 .../runtime/tasks/TestSpyWrapperStateBackend.java   | 10 --
 .../test/streaming/runtime/StateBackendITCase.java  |  6 +-
 23 files changed, 142 insertions(+), 61 deletions(-)

diff --git 
a/flink-end-to-end-tests/flink-stream-state-ttl-test/src/main/java/org/apache/flink/streaming/tests/StubStateBackend.java
 
b/flink-end-to-end-tests/flink-stream-state-ttl-test/src/main/java/org/apache/flink/streaming/tests/StubStateBackend.java
index 17c51d8..dec4f2d 100644
--- 
a/flink-end-to-end-tests/flink-stream-state-ttl-test/src/main/java/org/apache/flink/streaming/tests/StubStateBackend.java
+++ 
b/flink-end-to-end-tests/flink-stream-state-ttl-test/src/main/java/org/apache/flink/streaming/tests/StubStateBackend.java
@@ -20,6 +20,7 @@ package org.apache.flink.streaming.tests;
 
 import org.apache.flink.api.common.JobID;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.core.fs.CloseableRegistry;
 import org.apache.flink.metrics.MetricGroup;
 import org.apache.flink.runtime.execution.Environment;
 import org.apache.flink.runtime.query.TaskKvStateRegistry;
@@ -32,6 +33,8 @@ import org.apache.flink.runtime.state.OperatorStateBackend;
 import org.apache.flink.runtime.state.StateBackend;
 import org.apache.flink.runtime.state.ttl.TtlTimeProvider;
 
+import javax.annotation.Nonnull;
+
 import java.io.IOException;
 import java.util.Collection;
 
@@ -75,7 +78,8 @@ final class StubStateBackend implements StateBackend {
TaskKvStateRegistry kvStateRegistry,
TtlTimeProvider ttlTimeProvider,
MetricGroup metricGroup,
-   Collection stateHandles) throws Exception {
+   @Nonnull Collection stateHandles,
+   CloseableRegistry cancelStreamRegistry) throws Exception {
 
return backend.createKeyedStateBackend(
env,
@@ -87,7 +91,8 @@ final class StubStateBackend implements StateBackend {
kvStateRegistry,
this.ttlTimeProvider,
metricGroup,
-   stateHandles);
+   stateHandles,
+   cancelStreamRegistry);
}
 
@Override
diff --git 
a/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
 
b/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
index de86340..3431199 100644
--- 
a/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
+++ 
b/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
@@ -27,6 +27,7 

[flink] branch release-1.8 updated (3d6e713 -> 8a5c972)

2019-03-04 Thread dwysakowicz
This is an automated email from the ASF dual-hosted git repository.

dwysakowicz pushed a change to branch release-1.8
in repository https://gitbox.apache.org/repos/asf/flink.git.


from 3d6e713  Update ops/upgrading.md for Flink 1.8
 new 4679a46  [hotfix][core] Fix Tuple0Serializer handling of null
 new 7d222a6  [hotfix][core] Added snapshot for TestListCompositeSerializer
 new 975a25c  [hotfix][core] Added snapshot for NothingSerializerSnapshot
 new 061981e  [hotfix][core] Comparing whole collections rather than 
contents in KryoGenericTypeSerializerTest
 new 5fe62dc  [hotfix][tests] Call all methods from SerializerTestBase in 
SerializerTestInstance by reflection
 new 7fad886  [FLINK-11420][core] Fixed duplicate method of 
TraversableSerializer
 new 8a5c972  [hotfix][tests] Added matcher that performs deep comparison 
with taking Tuples into account

The 16014 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../java/typeutils/runtime/Tuple0Serializer.java   |   2 +
 .../common/typeutils/CompositeSerializerTest.java  |  63 +++-
 .../api/common/typeutils/SerializerTestBase.java   |  82 --
 .../common/typeutils/SerializerTestInstance.java   |  55 +--
 .../java/typeutils/runtime/RowSerializerTest.java  |  14 +-
 .../typeutils/runtime/TupleSerializerTest.java |   3 +-
 .../runtime/TupleSerializerTestInstance.java   |  79 --
 .../flink/testutils/CustomEqualityMatcher.java |  70 +
 .../flink/testutils/DeeplyEqualsChecker.java   | 175 +
 .../valuearray/ByteValueArraySerializerTest.java   |   3 +-
 .../valuearray/CharValueArraySerializerTest.java   |   3 +-
 .../valuearray/DoubleValueArraySerializerTest.java |   3 +-
 .../valuearray/FloatValueArraySerializerTest.java  |   3 +-
 .../valuearray/IntValueArraySerializerTest.java|   3 +-
 .../valuearray/LongValueArraySerializerTest.java   |   3 +-
 .../valuearray/NullValueArraySerializerTest.java   |   3 +-
 .../valuearray/ShortValueArraySerializerTest.java  |   3 +-
 .../valuearray/StringValueArraySerializerTest.java |   3 +-
 .../valuearray/ValueArraySerializerTestBase.java   |  38 ++---
 .../api/scala/typeutils/NothingSerializer.scala|  14 +-
 .../scala/typeutils/TraversableSerializer.scala|   2 +-
 .../runtime/KryoGenericTypeSerializerTest.scala|   8 +-
 .../runtime/ScalaSpecialTypesSerializerTest.scala  |  83 +++---
 .../scala/runtime/TraversableSerializerTest.scala  |  17 --
 .../runtime/TupleSerializerTestInstance.scala  |  81 +-
 25 files changed, 518 insertions(+), 295 deletions(-)
 delete mode 100644 
flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/TupleSerializerTestInstance.java
 create mode 100644 
flink-core/src/test/java/org/apache/flink/testutils/CustomEqualityMatcher.java
 create mode 100644 
flink-core/src/test/java/org/apache/flink/testutils/DeeplyEqualsChecker.java
 copy 
flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/StringSerializerTest.java
 => 
flink-libraries/flink-gelly/src/test/java/org/apache/flink/graph/types/valuearray/ValueArraySerializerTestBase.java
 (56%)



[flink] branch master updated (c940c19 -> 39e1384)

2019-03-04 Thread dwysakowicz
This is an automated email from the ASF dual-hosted git repository.

dwysakowicz pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git.


from c940c19  Update ops/upgrading.md for Flink 1.8
 new 04c21dd  [hotfix][core] Fix Tuple0Serializer handling of null
 new 64e6237  [hotfix][core] Added snapshot for TestListCompositeSerializer
 new e84d7a8  [hotfix][core] Added snapshot for NothingSerializerSnapshot
 new f28b831  [hotfix][core] Comparing whole collections rather than 
contents in KryoGenericTypeSerializerTest
 new cf49c73  [hotfix][tests] Call all methods from SerializerTestBase in 
SerializerTestInstance by reflection
 new 00e59e7  [FLINK-11420][core] Fixed duplicate method of 
TraversableSerializer
 new 39e1384  [hotfix][tests] Added matcher that performs deep comparison 
with taking Tuples into account

The 16046 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../java/typeutils/runtime/Tuple0Serializer.java   |   2 +
 .../common/typeutils/CompositeSerializerTest.java  |  63 +++-
 .../api/common/typeutils/SerializerTestBase.java   |  82 --
 .../common/typeutils/SerializerTestInstance.java   |  55 +--
 .../java/typeutils/runtime/RowSerializerTest.java  |  14 +-
 .../typeutils/runtime/TupleSerializerTest.java |   3 +-
 .../runtime/TupleSerializerTestInstance.java   |  79 --
 .../flink/testutils/CustomEqualityMatcher.java |  70 +
 .../flink/testutils/DeeplyEqualsChecker.java   | 175 +
 .../valuearray/ByteValueArraySerializerTest.java   |   3 +-
 .../valuearray/CharValueArraySerializerTest.java   |   3 +-
 .../valuearray/DoubleValueArraySerializerTest.java |   3 +-
 .../valuearray/FloatValueArraySerializerTest.java  |   3 +-
 .../valuearray/IntValueArraySerializerTest.java|   3 +-
 .../valuearray/LongValueArraySerializerTest.java   |   3 +-
 .../valuearray/NullValueArraySerializerTest.java   |   3 +-
 .../valuearray/ShortValueArraySerializerTest.java  |   3 +-
 .../valuearray/StringValueArraySerializerTest.java |   3 +-
 .../valuearray/ValueArraySerializerTestBase.java   |  38 ++---
 .../api/scala/typeutils/NothingSerializer.scala|  14 +-
 .../scala/typeutils/TraversableSerializer.scala|   2 +-
 .../runtime/KryoGenericTypeSerializerTest.scala|   8 +-
 .../runtime/ScalaSpecialTypesSerializerTest.scala  |  83 +++---
 .../scala/runtime/TraversableSerializerTest.scala  |  17 --
 .../runtime/TupleSerializerTestInstance.scala  |  81 +-
 .../table/typeutils/BaseRowSerializerTest.java |  38 ++---
 26 files changed, 538 insertions(+), 313 deletions(-)
 delete mode 100644 
flink-core/src/test/java/org/apache/flink/api/java/typeutils/runtime/TupleSerializerTestInstance.java
 create mode 100644 
flink-core/src/test/java/org/apache/flink/testutils/CustomEqualityMatcher.java
 create mode 100644 
flink-core/src/test/java/org/apache/flink/testutils/DeeplyEqualsChecker.java
 copy 
flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/StringSerializerTest.java
 => 
flink-libraries/flink-gelly/src/test/java/org/apache/flink/graph/types/valuearray/ValueArraySerializerTestBase.java
 (56%)



[flink] branch release-1.7 updated: [FLINK-9003][E2E] Add operators with input type that goes through custom, stateful serialization

2019-03-04 Thread kkloudas
This is an automated email from the ASF dual-hosted git repository.

kkloudas pushed a commit to branch release-1.7
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.7 by this push:
 new e3c0e69  [FLINK-9003][E2E] Add operators with input type that goes 
through custom, stateful serialization
e3c0e69 is described below

commit e3c0e69ce8920df15c1a85788891ff0e645e207b
Author: Andrey Zagrebin 
AuthorDate: Tue Feb 26 15:49:42 2019 +0100

[FLINK-9003][E2E] Add operators with input type that goes through custom, 
stateful serialization

This closes #7890.
---
 .../flink/api/common/typeutils/TypeSerializer.java |   5 +-
 .../tests/DataStreamAllroundTestJobFactory.java|  69 +++-
 .../tests/DataStreamAllroundTestProgram.java   |  76 +---
 .../streaming/tests/SemanticsCheckMapper.java  |   2 +-
 .../streaming/tests/SequenceGeneratorSource.java   |   7 +-
 .../tests/SingleThreadAccessCheckingTypeInfo.java  | 100 +++
 .../SingleThreadAccessCheckingTypeSerializer.java  | 195 +
 .../flink/streaming/tests/TestOperatorEnum.java|  53 ++
 .../builder/ArtificialStateBuilder.java|   4 +-
 9 files changed, 468 insertions(+), 43 deletions(-)

diff --git 
a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializer.java
 
b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializer.java
index ddb0b87..5e9498e 100644
--- 
a/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializer.java
+++ 
b/flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializer.java
@@ -29,9 +29,8 @@ import java.io.Serializable;
  * This interface describes the methods that are required for a data type to 
be handled by the Flink
  * runtime. Specifically, this interface contains the serialization and 
copying methods.
  *
- * The methods in this class are assumed to be stateless, such that it is 
effectively thread safe. Stateful
- * implementations of the methods may lead to unpredictable side effects and 
will compromise both stability and
- * correctness of the program.
+ * The methods in this class are not necessarily thread safe. To avoid 
unpredictable side effects,
+ * it is recommended to call {@code duplicate()} method and use one serializer 
instance per thread.
  *
  * Upgrading TypeSerializers to the new TypeSerializerSnapshot model
  *
diff --git 
a/flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/DataStreamAllroundTestJobFactory.java
 
b/flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/DataStreamAllroundTestJobFactory.java
index 4f69cdb..af39522 100644
--- 
a/flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/DataStreamAllroundTestJobFactory.java
+++ 
b/flink-end-to-end-tests/flink-datastream-allround-test/src/main/java/org/apache/flink/streaming/tests/DataStreamAllroundTestJobFactory.java
@@ -24,15 +24,20 @@ import org.apache.flink.api.common.functions.MapFunction;
 import org.apache.flink.api.common.restartstrategy.RestartStrategies;
 import org.apache.flink.api.common.state.ListStateDescriptor;
 import org.apache.flink.api.common.state.ValueStateDescriptor;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.api.java.functions.KeySelector;
 import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
 import org.apache.flink.api.java.utils.ParameterTool;
 import org.apache.flink.configuration.ConfigOption;
 import org.apache.flink.configuration.ConfigOptions;
 import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
+import org.apache.flink.runtime.state.StateBackend;
 import org.apache.flink.runtime.state.filesystem.FsStateBackend;
 import org.apache.flink.streaming.api.CheckpointingMode;
 import org.apache.flink.streaming.api.TimeCharacteristic;
+import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.datastream.KeyedStream;
 import org.apache.flink.streaming.api.datastream.WindowedStream;
 import org.apache.flink.streaming.api.environment.CheckpointConfig;
@@ -51,6 +56,10 @@ import 
org.apache.flink.streaming.tests.artificialstate.builder.ArtificialValueS
 import java.util.ArrayList;
 import java.util.List;
 
+import static 
org.apache.flink.streaming.tests.TestOperatorEnum.EVENT_IDENTITY_MAPPER;
+import static 
org.apache.flink.streaming.tests.TestOperatorEnum.MAPPER_RETURNS_OUT_WITH_CUSTOM_SER;
+import static 
org.apache.flink.streaming.tests.TestOperatorEnum.RESULT_TYPE_QUERYABLE_MAPPER_WITH_CUSTOM_SER;
+
 /**
  * A factory for components of general purpose test jobs for Flink's 
DataStream API operators and primitives.
  *
@@ -268,13 +277,13 @@ public 

[flink-web] branch asf-site updated: [FLINK-11565][docs] Translate the "Improve the Website" page into Chinese

2019-03-04 Thread jark
This is an automated email from the ASF dual-hosted git repository.

jark pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/flink-web.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 4e13ba1  [FLINK-11565][docs] Translate the "Improve the Website" page 
into Chinese
4e13ba1 is described below

commit 4e13ba149feed3ac2dd5fc5e7964130c0dbc07ff
Author: klion26 
AuthorDate: Tue Feb 26 22:53:04 2019 +0800

[FLINK-11565][docs] Translate the "Improve the Website" page into Chinese

This closes #177
---
 content/zh/improve-website.html | 92 -
 improve-website.zh.md   | 80 +--
 2 files changed, 83 insertions(+), 89 deletions(-)

diff --git a/content/zh/improve-website.html b/content/zh/improve-website.html
index 4da8066..61a5845 100644
--- a/content/zh/improve-website.html
+++ b/content/zh/improve-website.html
@@ -152,119 +152,115 @@
   
 改进网站
 
-   The http://flink.apache.org;>Apache Flink website 
presents Apache Flink and its community. It serves several purposes 
including:
+   http://flink.apache.org;>Apache Flink 官网 介绍了 Apache 
Flink 及其社区。包括如下多种用途:
 
 
-  Informing visitors about Apache Flink and its features.
-  Encouraging visitors to download and use Flink.
-  Encouraging visitors to engage with the community.
+  向来访者介绍 Apache Flink 及其特性。
+  鼓励来访者下载并使用 Flink。
+  鼓励来访者与社区进行互动。
 
 
-We welcome any contribution to improve our website. This document contains 
all information that is necessary to improve Flink’s website.
+我们欢迎任何改进官网的贡献。本文档包含了所有改进 Flink 官网所需要的信息。
 
 
 
-  Obtain the website sources
-  Directory structure and 
files
-  Update or extend the 
documentation
-  Submit your contribution
-  Committer section
+  获取官网源码
+  目录结构和文件
+  更新文档
+  提交你的贡献
+  Committer 章节
 
 
 
 
-Obtain the website sources
+获取官网源码
 
-The website of Apache Flink is hosted in a dedicated http://git-scm.com/;>git repository which is mirrored to GitHub at https://github.com/apache/flink-web;>https://github.com/apache/flink-web.
+Apache Flink 官网的源码托管在专用的 http://git-scm.com/;>git 仓库中,并在 
Github 中有一个镜像 https://github.com/apache/flink-web;>https://github.com/apache/flink-web。
 
-The easiest way to contribute website updates is to fork https://github.com/apache/flink-web;>the mirrored website repository on 
GitHub into your own GitHub account by clicking on the fork button at the 
top right. If you have no GitHub account, you can create one for free.
+向官网贡献的最简单方式是通过单击右上角的 fork 按钮,将 [Github 上官网的镜像] 
(https://github.com/apache/flink-web) 镜像到自己的仓库中。如果没有 Github 帐户,你可以免费创建一个。
 
-Next, clone your fork to your local machine.
+接下来,把你镜像的仓库克隆到本地机器上。
 
 git clone 
https://github.com/your-user-name/flink-web.git
 
 
-The flink-web directory contains the cloned repository. The 
website resides in the asf-site branch of the repository. Run the 
following commands to enter the directory and switch to the 
asf-site branch.
+flink-web 目录包含了拷贝的仓库。官网的代码位于 asf-site 
分支上。运行如下命令切换到 asf-site 分支
 
 cd flink-web
 git checkout asf-site
 
 
-Directory structure and files
+目录结构和文件
 
-Flink’s website is written in http://daringfireball.net/projects/markdown/;>Markdown. Markdown is a 
lightweight markup language which can be translated to HTML. We use http://jekyllrb.com/;>Jekyll to generate static HTML files from 
Markdown.
+Flink 的官网是用 http://daringfireball.net/projects/markdown/;>Markdown 所编写。Markdown 
是一种轻量级标记语言,可以翻译为 HTML。我们使用 http://jekyllrb.com/;>Jekyll 生成静态 HTML 
文件。
 
-The files and directories in the website git repository have the following 
roles:
+官网仓库中的目录和文件组织如下:
 
 
-  All files ending with .md are Markdown files. These files 
are translated into static HTML files.
-  Regular directories (not starting with an underscore (_)) 
contain also .md files. The directory structure is reflected in 
the generated HTML files and the published website.
-  The _posts directory contains blog posts. Each blog post is 
written as one Markdown file. To contribute a post, add a new file there.
-  The _includes/ directory contains includeable files such as 
the navigation bar or the footer.
-  The docs/ directory contains copies of the documentation of 
Flink for different releases. There is a directory inside docs/ 
for each stable release and the latest SNAPSHOT version. The build script is 
taking care of the maintenance of this directory.
-  The content/ directory contains the generated HTML files 
from Jekyll. It is important to place the files in this directory since the 
Apache Infrastructure to host the Flink website is pulling the HTML content 
from his directory. (For committers: When pushing changes to the website git, 
push also the updates in the content/ directory!)
+  所有文件以 .md 结尾。这些文件会被转换为静态 HTML 文件。
+  常规目录(不以下划线(_)开头)也包含以 .md 结尾的文件。目录结构最终会反映在生成的 
HTML 文件和发布的网站上。
+  _posts 目录下包含了所有的博客文章。每个 Markdown 
文件是一篇博客。贡献新的博客,请添加一个新的文件。
+  _includes/ 目录包含可导入的文件,例如导航栏或页脚。
+  docs/ 目录包含不同版本的 

[flink] branch release-1.8 updated: Update ops/upgrading.md for Flink 1.8

2019-03-04 Thread aljoscha
This is an automated email from the ASF dual-hosted git repository.

aljoscha pushed a commit to branch release-1.8
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.8 by this push:
 new 3d6e713  Update ops/upgrading.md for Flink 1.8
3d6e713 is described below

commit 3d6e7135ed63a967bb17b1463f243ffda966f229
Author: Aljoscha Krettek 
AuthorDate: Mon Mar 4 16:09:48 2019 +0100

Update ops/upgrading.md for Flink 1.8
---
 docs/ops/upgrading.md | 24 
 1 file changed, 24 insertions(+)

diff --git a/docs/ops/upgrading.md b/docs/ops/upgrading.md
index 22cea4c..0955127 100644
--- a/docs/ops/upgrading.md
+++ b/docs/ops/upgrading.md
@@ -213,6 +213,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   1.5.x
   1.6.x
   1.7.x
+  1.8.x
   Limitations
 
   
@@ -226,6 +227,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   
   
   
+  
   The maximum parallelism of a job that was 
migrated from Flink 1.1.x to 1.2.x+ is
   currently fixed as the parallelism of the job. This means that the 
parallelism can not be increased after
   migration. This limitation might be removed in a future bugfix 
release.
@@ -239,6 +241,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   O
   O
   O
+  O
   When migrating from Flink 1.2.x to Flink 
1.3.x+, changing parallelism at the same
   time is not supported. Users have to first take a savepoint after 
migrating to Flink 1.3.x+, and then change
   parallelism. Savepoints created for CEP applications cannot be 
restored in 1.4.x+.
@@ -252,6 +255,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   O
   O
   O
+  O
   Migrating from Flink 1.3.0 to Flink 1.4.[0,1] 
will fail if the savepoint contains Scala case classes. Users have to directly 
migrate to 1.4.2+ instead.
 
 
@@ -263,6 +267,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   O
   O
   O
+  O
   
 
 
@@ -274,6 +279,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   O
   O
   O
+  O
   There is a known issue with resuming broadcast 
state created with 1.5.x in versions
   1.6.x up to 1.6.2, and 1.7.0: https://issues.apache.org/jira/browse/FLINK-11087;>FLINK-11087. Users
   upgrading to 1.6.x or 1.7.x series need to directly migrate to minor 
versions higher than 1.6.2 and 1.7.0,
@@ -288,6 +294,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   
   O
   O
+  O
   
 
 
@@ -299,8 +306,25 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   
   
   O
+  O
   
 
+
+  1.8.x
+  
+  
+  
+  
+  
+  
+  
+  O
+  Savepoints from Flink 1.2 that contain a Scala
+  TraversableSerializer are not compatible with Flink 1.8 anymore
+  because of an update in this serializer. You can get around this
+  restriction by first upgrading to a version between Flink 1.3 and
+  Flink 1.7 and then updating to Flink 1.8.
+
   
 
 



[flink] branch master updated: Update ops/upgrading.md for Flink 1.8

2019-03-04 Thread aljoscha
This is an automated email from the ASF dual-hosted git repository.

aljoscha pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new c940c19  Update ops/upgrading.md for Flink 1.8
c940c19 is described below

commit c940c195e598293084675d9df1eb631018ef
Author: Aljoscha Krettek 
AuthorDate: Mon Mar 4 16:09:48 2019 +0100

Update ops/upgrading.md for Flink 1.8
---
 docs/ops/upgrading.md | 24 
 1 file changed, 24 insertions(+)

diff --git a/docs/ops/upgrading.md b/docs/ops/upgrading.md
index 42c1835..1fc0502 100644
--- a/docs/ops/upgrading.md
+++ b/docs/ops/upgrading.md
@@ -213,6 +213,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   1.5.x
   1.6.x
   1.7.x
+  1.8.x
   Limitations
 
   
@@ -226,6 +227,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   
   
   
+  
   The maximum parallelism of a job that was 
migrated from Flink 1.1.x to 1.2.x+ is
   currently fixed as the parallelism of the job. This means that the 
parallelism can not be increased after
   migration. This limitation might be removed in a future bugfix 
release.
@@ -239,6 +241,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   O
   O
   O
+  O
   When migrating from Flink 1.2.x to Flink 
1.3.x+, changing parallelism at the same
   time is not supported. Users have to first take a savepoint after 
migrating to Flink 1.3.x+, and then change
   parallelism. Savepoints created for CEP applications cannot be 
restored in 1.4.x+.
@@ -252,6 +255,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   O
   O
   O
+  O
   Migrating from Flink 1.3.0 to Flink 1.4.[0,1] 
will fail if the savepoint contains Scala case classes. Users have to directly 
migrate to 1.4.2+ instead.
 
 
@@ -263,6 +267,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   O
   O
   O
+  O
   
 
 
@@ -274,6 +279,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   O
   O
   O
+  O
   There is a known issue with resuming broadcast 
state created with 1.5.x in versions
   1.6.x up to 1.6.2, and 1.7.0: https://issues.apache.org/jira/browse/FLINK-11087;>FLINK-11087. Users
   upgrading to 1.6.x or 1.7.x series need to directly migrate to minor 
versions higher than 1.6.2 and 1.7.0,
@@ -288,6 +294,7 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   
   O
   O
+  O
   
 
 
@@ -299,8 +306,25 @@ Savepoints are compatible across Flink versions as 
indicated by the table below:
   
   
   O
+  O
   
 
+
+  1.8.x
+  
+  
+  
+  
+  
+  
+  
+  O
+  Savepoints from Flink 1.2 that contain a Scala
+  TraversableSerializer are not compatible with Flink 1.8 anymore
+  because of an update in this serializer. You can get around this
+  restriction by first upgrading to a version between Flink 1.3 and
+  Flink 1.7 and then updating to Flink 1.8.
+
   
 
 



[flink] branch master updated: [FLINK-11653][DataStream] Add configuration to enforce custom UIDs on datastream

2019-03-04 Thread dwysakowicz
This is an automated email from the ASF dual-hosted git repository.

dwysakowicz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 9c32948  [FLINK-11653][DataStream] Add configuration to enforce custom 
UIDs on datastream
9c32948 is described below

commit 9c329484f9e8da58b3f0ac4e48348ba3e781dd08
Author: Seth Wiesman 
AuthorDate: Mon Mar 4 08:27:08 2019 -0600

[FLINK-11653][DataStream] Add configuration to enforce custom UIDs on 
datastream
---
 docs/ops/upgrading.md  |  2 +-
 .../apache/flink/api/common/ExecutionConfig.java   | 37 ++
 .../streaming/api/graph/StreamGraphGenerator.java  |  7 
 .../StreamingJobGraphGeneratorNodeHashTest.java| 34 
 4 files changed, 79 insertions(+), 1 deletion(-)

diff --git a/docs/ops/upgrading.md b/docs/ops/upgrading.md
index 22cea4c..42c1835 100644
--- a/docs/ops/upgrading.md
+++ b/docs/ops/upgrading.md
@@ -75,7 +75,7 @@ val mappedEvents: DataStream[(Int, Long)] = events
 
 **Important:** As of 1.3.x this also applies to operators that are part of a 
chain.
 
-By default all state stored in a savepoint must be matched to the operators of 
a starting application. However, users can explicitly agree to skip (and 
thereby discard) state that cannot be matched to an operator when starting a 
application from a savepoint. Stateful operators for which no state is found in 
the savepoint are initialized with their default state.
+By default all state stored in a savepoint must be matched to the operators of 
a starting application. However, users can explicitly agree to skip (and 
thereby discard) state that cannot be matched to an operator when starting a 
application from a savepoint. Stateful operators for which no state is found in 
the savepoint are initialized with their default state. Users may enforce best 
practices by calling `ExecutionConfig#disableAutoGeneratedUIDs` which will fail 
the job submission if an [...]
 
 ### Stateful Operators and User Functions
 
diff --git 
a/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java 
b/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
index d288810..0baf8fd 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java
@@ -116,6 +116,8 @@ public class ExecutionConfig implements Serializable, 
ArchiveableIt is highly recommended that users specify UIDs before deploying 
to
+* production since they are used to match state in savepoints to 
operators
+* in a job. Because auto-generated ID's are likely to change when 
modifying
+* a job, specifying custom IDs allow an application to evolve overtime
+* without discarding state.
+*/
+   public void  disableAutoGeneratedUIDs() {
+   enableAutoGeneratedUids = false;
+   }
+
+   /**
+* Checks whether auto generated UIDs are supported.
+*
+* Auto generated UIDs are enabled by default.
+*
+* @see #enableAutoGeneratedUIDs()
+* @see #disableAutoGeneratedUIDs()
+*/
+   public boolean hasAutoGeneratedUIDsEnabled() {
+   return enableAutoGeneratedUids;
+   }
+
+   /**
 * Forces Flink to use the Apache Avro serializer for POJOs.
 *
 * Important: Make sure to include the flink-avro module.
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
index d2841fd..3fd43dc 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamGraphGenerator.java
@@ -204,6 +204,13 @@ public class StreamGraphGenerator {

streamGraph.setTransformationUserHash(transform.getId(), 
transform.getUserProvidedNodeHash());
}
 
+   if 
(!streamGraph.getExecutionConfig().hasAutoGeneratedUIDsEnabled()) {
+   if (transform.getUserProvidedNodeHash() == null && 
transform.getUid() == null) {
+   throw new IllegalStateException("Auto generated 
UIDs have been disabled " +
+   "but no UID or hash has been assigned 
to operator " + transform.getName());
+   }
+   }
+
if (transform.getMinResources() != null && 
transform.getPreferredResources() != null) {
streamGraph.setResources(transform.getId(), 
transform.getMinResources(), transform.getPreferredResources());
}
diff --git 

[flink] branch master updated: [hotfix] Fix typo in some API comments (#7867)

2019-03-04 Thread kurt
This is an automated email from the ASF dual-hosted git repository.

kurt pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new c6f3dbc  [hotfix] Fix typo in some API comments (#7867)
c6f3dbc is described below

commit c6f3dbc3c85a438e6de04109d730f1cd849acf4f
Author: Benchao Li 
AuthorDate: Mon Mar 4 21:54:22 2019 +0800

[hotfix] Fix typo in some API comments (#7867)

* Fix typos in DataStream
* merge other fixes into one PR
---
 .../apache/flink/streaming/api/datastream/DataStream.java  | 14 +++---
 .../flink/streaming/api/datastream/JoinedStreams.java  |  2 +-
 .../flink/streaming/api/datastream/WindowedStream.java |  2 +-
 .../api/transformations/OneInputTransformation.java|  2 +-
 .../api/transformations/TwoInputTransformation.java|  4 ++--
 5 files changed, 12 insertions(+), 12 deletions(-)

diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
index a0af5f1..b33ed2f 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/DataStream.java
@@ -234,7 +234,7 @@ public class DataStream {
 *{@link 
org.apache.flink.streaming.api.collector.selector.OutputSelector}
 *for directing the tuples.
 * @return The {@link SplitStream}
-* @deprecated Please use side ouput instead.
+* @deprecated Please use side output instead.
 */
@Deprecated
public SplitStream split(OutputSelector outputSelector) {
@@ -349,7 +349,7 @@ public class DataStream {
 * Note: This method works only on single field keys.
 *
 * @param partitioner The partitioner to assign partitions to keys.
-* @param field The field index on which the DataStream is to 
partitioned.
+* @param field The field index on which the DataStream is partitioned.
 * @return The partitioned DataStream.
 */
public  DataStream partitionCustom(Partitioner partitioner, 
int field) {
@@ -364,7 +364,7 @@ public class DataStream {
 * Note: This method works only on single field keys.
 *
 * @param partitioner The partitioner to assign partitions to keys.
-* @param field The expression for the field on which the DataStream is 
to partitioned.
+* @param field The expression for the field on which the DataStream is 
partitioned.
 * @return The partitioned DataStream.
 */
public  DataStream partitionCustom(Partitioner partitioner, 
String field) {
@@ -405,7 +405,7 @@ public class DataStream {
 
/**
 * Sets the partitioning of the {@link DataStream} so that the output 
elements
-* are broadcast to every parallel instance of the next operation.
+* are broadcasted to every parallel instance of the next operation.
 *
 * @return The DataStream with broadcast partitioning set.
 */
@@ -797,10 +797,10 @@ public class DataStream {
}
 
/**
-* Windows this data stream to a {@code KeyedTriggerWindowDataStream}, 
which evaluates windows
-* over a key grouped stream. Elements are put into windows by a
+* Windows this data stream to a {@code AllWindowedStream}, which 
evaluates windows
+* over a non key grouped stream. Elements are put into windows by a
 * {@link 
org.apache.flink.streaming.api.windowing.assigners.WindowAssigner}. The 
grouping of
-* elements is done both by key and by window.
+* elements is done by window.
 *
 * A {@link 
org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to 
specify
 * when windows are evaluated. However, {@code WindowAssigners} have a 
default {@code Trigger}
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
index 4c327bf..441b97b 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/JoinedStreams.java
@@ -50,7 +50,7 @@ import static java.util.Objects.requireNonNull;
  * Example:
  *  {@code
  * DataStream> one = ...;
- * DataStream> twp = ...;
+ * DataStream> two = ...;
  *
  * DataStream result = one.join(two)
  * .where(new MyFirstKeySelector())
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/datastream/WindowedStream.java
 

[flink] branch master updated: [FLINK-11802][table-runtime-blink] Create TypeInfo and TypeSerializer for blink data format

2019-03-04 Thread kurt
This is an automated email from the ASF dual-hosted git repository.

kurt pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 76eb039  [FLINK-11802][table-runtime-blink] Create TypeInfo and 
TypeSerializer for blink data format
76eb039 is described below

commit 76eb0397041915c867df57b7e2478caca3afd7bd
Author: JingsongLi 
AuthorDate: Mon Mar 4 10:01:58 2019 +0800

[FLINK-11802][table-runtime-blink] Create TypeInfo and TypeSerializer for 
blink data format

This closes #7887
---
 flink-table/flink-table-runtime-blink/pom.xml  |  17 ++
 .../apache/flink/table/dataformat/BinaryArray.java |  10 +
 .../apache/flink/table/dataformat/BinaryMap.java   |  10 +
 .../apache/flink/table/dataformat/BinaryRow.java   |   8 +-
 .../flink/table/dataformat/BinaryString.java   |  15 ++
 .../flink/table/dataformat/BinaryWriter.java   |  39 +++
 .../flink/table/dataformat/TypeGetterSetters.java  |  39 +++
 .../org/apache/flink/table/type/ArrayType.java |   0
 .../org/apache/flink/table/type/AtomicType.java|   0
 .../org/apache/flink/table/type/BooleanType.java   |   0
 .../java/org/apache/flink/table/type/ByteType.java |   0
 .../java/org/apache/flink/table/type/CharType.java |   0
 .../java/org/apache/flink/table/type/DateType.java |   0
 .../org/apache/flink/table/type/DecimalType.java   |   0
 .../org/apache/flink/table/type/DoubleType.java|   0
 .../org/apache/flink/table/type/FloatType.java |   0
 .../org/apache/flink/table/type/GenericType.java   |   0
 .../java/org/apache/flink/table/type/IntType.java  |   0
 .../org/apache/flink/table/type/InternalType.java  |   0
 .../org/apache/flink/table/type/InternalTypes.java |   0
 .../java/org/apache/flink/table/type/LongType.java |   0
 .../java/org/apache/flink/table/type/MapType.java  |   0
 .../org/apache/flink/table/type/PrimitiveType.java |   0
 .../java/org/apache/flink/table/type/RowType.java  |   2 +-
 .../org/apache/flink/table/type/ShortType.java |   0
 .../org/apache/flink/table/type/StringType.java|   0
 .../java/org/apache/flink/table/type/TimeType.java |   0
 .../org/apache/flink/table/type/TimestampType.java |   0
 .../apache/flink/table/type/TypeConverters.java|  60 -
 .../flink/table/typeutils/BaseRowSerializer.java   | 282 +
 .../flink/table/typeutils/BaseRowTypeInfo.java | 145 +++
 .../table/typeutils/BinaryArraySerializer.java | 107 
 .../flink/table/typeutils/BinaryArrayTypeInfo.java | 105 
 .../flink/table/typeutils/BinaryMapSerializer.java | 107 
 .../flink/table/typeutils/BinaryMapTypeInfo.java   | 113 +
 .../flink/table/typeutils/BinaryRowSerializer.java | 184 ++
 .../table/typeutils/BinaryStringSerializer.java| 114 +
 .../table/typeutils/BinaryStringTypeInfo.java  |  89 +++
 .../org/apache/flink/table/util/SegmentsUtil.java  |  40 +++
 .../apache/flink/table/type/InternalTypeTest.java  |   0
 .../table/typeutils/BaseRowSerializerTest.java | 229 +
 .../table/typeutils/BinaryArraySerializerTest.java |  66 +
 .../table/typeutils/BinaryMapSerializerTest.java   |  66 +
 .../table/typeutils/BinaryRowSerializerTest.java   |  64 +
 .../table/typeutils/BinaryRowTypeInfoTest.java |  77 ++
 .../typeutils/BinaryStringSerializerTest.java} |  45 ++--
 .../flink/table/typeutils/BinaryTypeInfoTest.java} |  35 ++-
 47 files changed, 2017 insertions(+), 51 deletions(-)

diff --git a/flink-table/flink-table-runtime-blink/pom.xml 
b/flink-table/flink-table-runtime-blink/pom.xml
index 29fb8a7..e8b1623 100644
--- a/flink-table/flink-table-runtime-blink/pom.xml
+++ b/flink-table/flink-table-runtime-blink/pom.xml
@@ -60,5 +60,22 @@ under the License.
janino
${janino.version}

+
+   
+
+   
+   org.apache.flink
+   flink-core
+   ${project.version}
+   test-jar
+   test
+   
+
+   
+   org.apache.flink
+   
flink-test-utils_${scala.binary.version}
+   ${project.version}
+   test
+   

 
diff --git 
a/flink-table/flink-table-runtime-blink/src/main/java/org/apache/flink/table/dataformat/BinaryArray.java
 
b/flink-table/flink-table-runtime-blink/src/main/java/org/apache/flink/table/dataformat/BinaryArray.java
index e708710..57ce04b 100644
--- 
a/flink-table/flink-table-runtime-blink/src/main/java/org/apache/flink/table/dataformat/BinaryArray.java
+++ 
b/flink-table/flink-table-runtime-blink/src/main/java/org/apache/flink/table/dataformat/BinaryArray.java
@@ -347,6 +347,16 @@ public class BinaryArray extends BinaryFormat implements 
TypeGetterSetters {
 

[flink] branch sql-parse-exception deleted (was 5ba73ca)

2019-03-04 Thread jark
This is an automated email from the ASF dual-hosted git repository.

jark pushed a change to branch sql-parse-exception
in repository https://gitbox.apache.org/repos/asf/flink.git.


 was 5ba73ca  [FLINK-11801] [table-common] Port SqlParserException to 
flink-table-common

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.



[flink] branch sql-parse-exception deleted (was 5ba73ca)

2019-03-04 Thread jark
This is an automated email from the ASF dual-hosted git repository.

jark pushed a change to branch sql-parse-exception
in repository https://gitbox.apache.org/repos/asf/flink.git.


 was 5ba73ca  [FLINK-11801] [table-common] Port SqlParserException to 
flink-table-common

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.



[flink] branch release-1.8 updated: [FLINK-11791] Describe how to build Flink with Hadoop in build guide

2019-03-04 Thread aljoscha
This is an automated email from the ASF dual-hosted git repository.

aljoscha pushed a commit to branch release-1.8
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.8 by this push:
 new dfac0e2  [FLINK-11791] Describe how to build Flink with Hadoop in 
build guide
dfac0e2 is described below

commit dfac0e236f0479001c3f65e5d9cbfbe329316444
Author: Aljoscha Krettek 
AuthorDate: Mon Mar 4 10:34:49 2019 +0100

[FLINK-11791] Describe how to build Flink with Hadoop in build guide
---
 docs/flinkDev/building.md | 8 
 1 file changed, 8 insertions(+)

diff --git a/docs/flinkDev/building.md b/docs/flinkDev/building.md
index db483fe..56cf928 100644
--- a/docs/flinkDev/building.md
+++ b/docs/flinkDev/building.md
@@ -93,6 +93,14 @@ You can also specify a specific Hadoop version to build 
against:
 mvn clean install -DskipTests -Dhadoop.version=2.6.1
 {% endhighlight %}
 
+### Packaging Hadoop into the Flink distribution
+
+If you want to build a Flink distribution that has a shaded Hadoop 
pre-packaged in the lib folder you can use the `include-hadoop` profile to do 
so. You would build Flink as described above but include the profile:
+
+{% highlight bash %}
+mvn clean install -DskipTests -Pinclude-hadoop
+{% endhighlight %}
+
 ### Vendor-specific Versions
 To check the list of supported vendor versions, look in 
https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs?repo=cloudera
 To build Flink against a vendor specific Hadoop version, issue the following 
command:



[flink] branch master updated: Merge pull request #7879 from aljoscha/build-with-hadoop-guide

2019-03-04 Thread aljoscha
This is an automated email from the ASF dual-hosted git repository.

aljoscha pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 6c6ab037 Merge pull request #7879 from aljoscha/build-with-hadoop-guide
6c6ab037 is described below

commit 6c6ab0376aabf023f31c674c2ac43c0c32edf700
Author: Aljoscha Krettek 
AuthorDate: Mon Mar 4 10:34:49 2019 +0100

Merge pull request #7879 from aljoscha/build-with-hadoop-guide

[FLINK-11791] Describe how to build Flink with Hadoop in build guide
---
 docs/flinkDev/building.md | 8 
 1 file changed, 8 insertions(+)

diff --git a/docs/flinkDev/building.md b/docs/flinkDev/building.md
index db483fe..56cf928 100644
--- a/docs/flinkDev/building.md
+++ b/docs/flinkDev/building.md
@@ -93,6 +93,14 @@ You can also specify a specific Hadoop version to build 
against:
 mvn clean install -DskipTests -Dhadoop.version=2.6.1
 {% endhighlight %}
 
+### Packaging Hadoop into the Flink distribution
+
+If you want to build a Flink distribution that has a shaded Hadoop 
pre-packaged in the lib folder you can use the `include-hadoop` profile to do 
so. You would build Flink as described above but include the profile:
+
+{% highlight bash %}
+mvn clean install -DskipTests -Pinclude-hadoop
+{% endhighlight %}
+
 ### Vendor-specific Versions
 To check the list of supported vendor versions, look in 
https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs?repo=cloudera
 To build Flink against a vendor specific Hadoop version, issue the following 
command:



[flink] branch master updated: [FLINK-11516][table-common] Port and move catalog transitive classes to flink-table-common

2019-03-04 Thread twalthr
This is an automated email from the ASF dual-hosted git repository.

twalthr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 012e611  [FLINK-11516][table-common] Port and move catalog transitive 
classes to flink-table-common
012e611 is described below

commit 012e6118ad3de2bfaadd46c76f25bf9d4ad9a9d1
Author: Dian Fu 
AuthorDate: Fri Feb 1 23:23:06 2019 +0800

[FLINK-11516][table-common] Port and move catalog transitive classes to 
flink-table-common

This closes #7642.
---
 .../ElasticsearchUpsertTableSinkFactoryBase.java   |   4 +-
 .../kafka/KafkaTableSourceSinkFactoryBase.java |   2 +-
 .../gateway/utils/TestTableSinkFactoryBase.java|   2 +-
 .../gateway/utils/TestTableSourceFactoryBase.java  |   2 +-
 .../apache/flink/table/descriptors/Metadata.java   |  78 
 .../flink/table/descriptors/MetadataValidator.java |  41 
 .../apache/flink/table/descriptors/Statistics.java | 168 +
 .../table/descriptors/StatisticsValidator.java | 126 +
 .../StreamTableDescriptorValidator.java|  65 +++
 .../table/descriptors/StreamableDescriptor.java|  73 
 .../apache/flink/table/plan/stats/ColumnStats.java | 124 
 .../apache/flink/table/plan/stats/TableStats.java} |  46 -
 .../descriptors/DescriptorPropertiesTest.java  | 201 
 .../flink/table/descriptors/MetadataTest.java  |  69 +++
 .../flink/table/descriptors/StatisticsTest.java| 102 ++
 .../flink/table/catalog/ExternalCatalogTable.scala |   4 +-
 .../flink/table/catalog/ExternalTableUtil.scala|   1 -
 .../apache/flink/table/descriptors/Metadata.scala  |  88 -
 .../table/descriptors/MetadataValidator.scala  |  43 -
 .../flink/table/descriptors/Statistics.scala   | 166 -
 .../table/descriptors/StatisticsValidator.scala| 120 
 .../StreamTableDescriptorValidator.scala   |  59 --
 .../table/descriptors/StreamableDescriptor.scala   |  67 ---
 .../flink/table/plan/schema/DataSetTable.scala |   2 +-
 .../flink/table/plan/stats/ColumnStats.scala   |  54 --
 .../flink/table/plan/stats/FlinkStatistic.scala|   4 +-
 .../descriptors/DescriptorPropertiesTest.scala | 207 -
 .../flink/table/descriptors/MetadataTest.scala |  59 --
 .../flink/table/descriptors/StatisticsTest.scala   |  89 -
 29 files changed, 1093 insertions(+), 973 deletions(-)

diff --git 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchUpsertTableSinkFactoryBase.java
 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchUpsertTableSinkFactoryBase.java
index 63e9b34..f52de79 100644
--- 
a/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchUpsertTableSinkFactoryBase.java
+++ 
b/flink-connectors/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchUpsertTableSinkFactoryBase.java
@@ -113,7 +113,7 @@ public abstract class 
ElasticsearchUpsertTableSinkFactoryBase implements StreamT
final List properties = new ArrayList<>();
 
// streaming properties
-   properties.add(UPDATE_MODE());
+   properties.add(UPDATE_MODE);
 
// Elasticsearch
properties.add(CONNECTOR_HOSTS + ".#." + 
CONNECTOR_HOSTS_HOSTNAME);
@@ -150,7 +150,7 @@ public abstract class 
ElasticsearchUpsertTableSinkFactoryBase implements StreamT
final DescriptorProperties descriptorProperties = 
getValidatedProperties(properties);
 
return createElasticsearchUpsertTableSink(
-   descriptorProperties.isValue(UPDATE_MODE(), 
UPDATE_MODE_VALUE_APPEND()),
+   descriptorProperties.isValue(UPDATE_MODE, 
UPDATE_MODE_VALUE_APPEND),
descriptorProperties.getTableSchema(SCHEMA()),
getHosts(descriptorProperties),
descriptorProperties.getString(CONNECTOR_INDEX),
diff --git 
a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
 
b/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
index 542dbd9..146eba5 100644
--- 
a/flink-connectors/flink-connector-kafka-base/src/main/java/org/apache/flink/streaming/connectors/kafka/KafkaTableSourceSinkFactoryBase.java
+++