Jenkins build is back to normal : flink-snapshot-deployment-1.7 #160

2019-04-17 Thread Apache Jenkins Server
See 




Jenkins build is back to normal : flink-snapshot-deployment-1.5 #252

2019-04-17 Thread Apache Jenkins Server
See 




[flink] branch master updated: [FLINK-11474][table] Add ReadableCatalog, ReadableWritableCatalog, and other related interfaces.

2019-04-17 Thread jincheng
This is an automated email from the ASF dual-hosted git repository.

jincheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 92ae67d  [FLINK-11474][table] Add ReadableCatalog, 
ReadableWritableCatalog, and other related interfaces.
92ae67d is described below

commit 92ae67d788050f2e2d457692bc0c638bc142a265
Author: Xuefu Zhang 
AuthorDate: Mon Mar 18 20:13:29 2019 -0700

[FLINK-11474][table] Add ReadableCatalog, ReadableWritableCatalog, and 
other related interfaces.

This closes #8007.
---
 .../table/catalog/GenericCatalogDatabase.java  |  71 +++
 .../flink/table/catalog/GenericCatalogTable.java   |  94 +++
 .../flink/table/catalog/GenericCatalogView.java| 102 
 .../table/catalog/GenericInMemoryCatalog.java  | 272 +
 .../flink/table/catalog/CatalogTestUtil.java   |  82 +++
 .../table/catalog/GenericInMemoryCatalogTest.java  | 649 +
 .../flink/table/catalog/CatalogBaseTable.java  |  62 ++
 .../CatalogDatabase.java}  |  49 +-
 .../TableStats.java => catalog/CatalogTable.java}  |  40 +-
 .../apache/flink/table/catalog/CatalogView.java|  47 ++
 .../org/apache/flink/table/catalog/ObjectPath.java |  92 +++
 .../flink/table/catalog/ReadableCatalog.java   | 135 +
 .../table/catalog/ReadableWritableCatalog.java | 134 +
 .../exceptions/CatalogException.java}  |  44 +-
 .../exceptions/DatabaseAlreadyExistException.java} |  40 +-
 .../exceptions/DatabaseNotEmptyException.java} |  41 +-
 .../exceptions/DatabaseNotExistException.java} |  41 +-
 .../exceptions/TableAlreadyExistException.java}|  38 +-
 .../exceptions/TableNotExistException.java}|  38 +-
 .../apache/flink/table/plan/stats/ColumnStats.java |  10 +
 .../apache/flink/table/plan/stats/TableStats.java  |  13 +
 21 files changed, 1852 insertions(+), 242 deletions(-)

diff --git 
a/flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/GenericCatalogDatabase.java
 
b/flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/GenericCatalogDatabase.java
new file mode 100644
index 000..5f2c732
--- /dev/null
+++ 
b/flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/GenericCatalogDatabase.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.catalog;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * A generic catalog database implementation.
+ */
+public class GenericCatalogDatabase implements CatalogDatabase {
+   private final Map properties;
+   // Comment of the database
+   private String comment = "This is a generic catalog database.";
+
+   public GenericCatalogDatabase(Map properties) {
+   this.properties = checkNotNull(properties, "properties cannot 
be null");
+   }
+
+   public GenericCatalogDatabase(Map properties, String 
comment) {
+   this(properties);
+   this.comment = comment;
+   }
+
+   public Map getProperties() {
+   return properties;
+   }
+
+   @Override
+   public GenericCatalogDatabase copy() {
+   return new GenericCatalogDatabase(new HashMap<>(properties), 
comment);
+   }
+
+   @Override
+   public Optional getDescription() {
+   return Optional.of(comment);
+   }
+
+   @Override
+   public Optional getDetailedDescription() {
+   return Optional.of("This is a generic catalog database stored 
in memory only");
+   }
+
+   public String getComment() {
+   return this.comment;
+   }
+
+   public void setComment(String comment) {
+   this.comment = comment;
+   }
+
+}
diff --git 
a/flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/GenericCatalogTable.java
 
b/flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/GenericCatalogTable.java
new file mod

[flink-web] branch asf-site updated (78a7b3b -> a98d311)

2019-04-17 Thread fhueske
This is an automated email from the ASF dual-hosted git repository.

fhueske pushed a change to branch asf-site
in repository https://gitbox.apache.org/repos/asf/flink-web.git.


from 78a7b3b  Rebuild website
 new 87762b6  [FLINK-12191] Fix SVG Flink logos.
 new a98d311  Rebuild website.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 content/img/logo.zip | Bin 7708436 -> 7744668 bytes
 content/img/logo/svg/color_black.svg | 413 ++-
 content/img/logo/svg/color_white.svg | 413 ++-
 content/material.html|   8 +-
 content/zh/material.html |   8 +-
 img/logo.zip | Bin 7708436 -> 7744668 bytes
 img/logo/svg/color_black.svg | 413 ++-
 img/logo/svg/color_white.svg | 413 ++-
 material.md  |   8 +-
 material.zh.md   |   8 +-
 10 files changed, 856 insertions(+), 828 deletions(-)
 mode change 100755 => 100644 content/img/logo.zip
 mode change 100755 => 100644 img/logo.zip



[flink-web] 02/02: Rebuild website.

2019-04-17 Thread fhueske
This is an automated email from the ASF dual-hosted git repository.

fhueske pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/flink-web.git

commit a98d3115460d2b053b8308433b6518660b3fc5fb
Author: Fabian Hueske 
AuthorDate: Wed Apr 17 17:56:02 2019 +0200

Rebuild website.
---
 content/img/logo.zip | Bin 7708436 -> 7744668 bytes
 content/img/logo/svg/color_black.svg | 413 ++-
 content/img/logo/svg/color_white.svg | 413 ++-
 content/material.html|   8 +-
 content/zh/material.html |   8 +-
 5 files changed, 428 insertions(+), 414 deletions(-)

diff --git a/content/img/logo.zip b/content/img/logo.zip
old mode 100755
new mode 100644
index 6c816f5..086d9ca
Binary files a/content/img/logo.zip and b/content/img/logo.zip differ
diff --git a/content/img/logo/svg/color_black.svg 
b/content/img/logo/svg/color_black.svg
index d54707c..7799f55 100755
--- a/content/img/logo/svg/color_black.svg
+++ b/content/img/logo/svg/color_black.svg
@@ -3,6 +3,211 @@
 http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd";>
 http://www.w3.org/2000/svg"; 
xmlns:xlink="http://www.w3.org/1999/xlink"; x="0px" y="0px"
 width="2000px" height="1280px" viewBox="0 0 2000 1280" 
enable-background="new 0 0 2000 1280" xml:space="preserve">
+
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+
 



-   
-   
-   
-   
-   
-   
-   
-   
-   
-   
-   
-   



-   
-   
-   
-   
-   
-   
-   
-   
-   
-   
-   
-   

@@ -116,11 +297,6 @@

c0.469-0.013,0.473-0.646,0.449-0.973c-0.028-0.418-0.197-0.802-0.258-1.213c-0.152,0.038-0.236,0.229-0.408,0.277

c-0.135,0.04-0.33,0.021-0.473,0.015c-0.207-0.009-0.496-0.149-0.684-0.069L41.757-7.496z"/>

-   
-   
- 

[flink-web] 01/02: [FLINK-12191] Fix SVG Flink logos.

2019-04-17 Thread fhueske
This is an automated email from the ASF dual-hosted git repository.

fhueske pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/flink-web.git

commit 87762b6388d249ba8123d75c4f1af65b4ad34fd3
Author: Patrick Lucas 
AuthorDate: Fri Apr 12 16:46:50 2019 +0200

[FLINK-12191] Fix SVG Flink logos.

* Gradients don't appear correctly in Firefox since gradient definitions
  not in  tag
* One squirrel missing eye gradient

This closes #199.
---
 img/logo.zip | Bin 7708436 -> 7744668 bytes
 img/logo/svg/color_black.svg | 413 ++-
 img/logo/svg/color_white.svg | 413 ++-
 material.md  |   8 +-
 material.zh.md   |   8 +-
 5 files changed, 428 insertions(+), 414 deletions(-)

diff --git a/img/logo.zip b/img/logo.zip
old mode 100755
new mode 100644
index 6c816f5..086d9ca
Binary files a/img/logo.zip and b/img/logo.zip differ
diff --git a/img/logo/svg/color_black.svg b/img/logo/svg/color_black.svg
index d54707c..7799f55 100755
--- a/img/logo/svg/color_black.svg
+++ b/img/logo/svg/color_black.svg
@@ -3,6 +3,211 @@
 http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd";>
 http://www.w3.org/2000/svg"; 
xmlns:xlink="http://www.w3.org/1999/xlink"; x="0px" y="0px"
 width="2000px" height="1280px" viewBox="0 0 2000 1280" 
enable-background="new 0 0 2000 1280" xml:space="preserve">
+
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+
 



-   
-   
-   
-   
-   
-   
-   
-   
-   
-   
-   
-   



-   
-   
-   
-   
-   
-   
-   
-   
-   
-   
-   
-   

@@ -116,11 +297,6 @@

c0.469-0.013,0.473-0.646,0.449-0.973c-0.028-0.418-0.197-0.802-0.258-1.213c-0.152,0.038-0.236,0.229-0.408,0.277

c-0.135,0.04-0.33,0.021-0.473,0.015c-0.207-0.00

[flink] branch master updated: [FLINK-12212][docs] Clarify that operator state is checkpointed asynchronously

2019-04-17 Thread srichter
This is an automated email from the ASF dual-hosted git repository.

srichter pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 046d752  [FLINK-12212][docs] Clarify that operator state is 
checkpointed asynchronously
046d752 is described below

commit 046d752c6e41872fbb83163b8c5862c7b8855f5d
Author: Konstantin Knauf 
AuthorDate: Wed Apr 17 16:40:04 2019 +0200

[FLINK-12212][docs] Clarify that operator state is checkpointed 
asynchronously

This closes #8185.
---
 docs/ops/state/large_state_tuning.md | 20 
 1 file changed, 8 insertions(+), 12 deletions(-)

diff --git a/docs/ops/state/large_state_tuning.md 
b/docs/ops/state/large_state_tuning.md
index effea99..9dae3b6 100644
--- a/docs/ops/state/large_state_tuning.md
+++ b/docs/ops/state/large_state_tuning.md
@@ -100,22 +100,18 @@ number of network buffers used per outgoing/incoming 
channel is limited and thus
 may be configured without affecting checkpoint times
 (see [network buffer 
configuration](../config.html#configuring-the-network-buffers)).
 
-## Make state checkpointing Asynchronous where possible
+## Asynchronous Checkpointing
 
 When state is *asynchronously* snapshotted, the checkpoints scale better than 
when the state is *synchronously* snapshotted.
-Especially in more complex streaming applications with multiple joins, 
Co-functions, or windows, this may have a profound
+Especially in more complex streaming applications with multiple joins, 
co-functions, or windows, this may have a profound
 impact.
 
-To get state to be snapshotted asynchronously, applications have to do two 
things:
+For state to be snapshotted asynchronsously, you need to use a state backend 
which supports asynchronous snapshotting.
+Starting from Flink 1.3, both RocksDB-based as well as heap-based state 
backends (`filesystem`) support asynchronous
+snapshotting and use it by default. This applies to to both managed operator 
state as well as managed keyed state (incl. timers state).
 
-  1. Use state that is [managed by Flink](../../dev/stream/state/state.html): 
Managed state means that Flink provides the data
- structure in which the state is stored. Currently, this is true for 
*keyed state*, which is abstracted behind the
- interfaces like `ValueState`, `ListState`, `ReducingState`, ...
-
-  2. Use a state backend that supports asynchronous snapshots. In Flink 1.2, 
only the RocksDB state backend uses
- fully asynchronous snapshots. Starting from Flink 1.3, heap-based state 
backends also support asynchronous snapshots.
-
-The above two points imply that large state should generally be kept as keyed 
state, not as operator state.
+Note *The combination RocksDB state 
backend with heap-based timers currently does NOT support asynchronous 
snapshots for the timers state.
+Other state like keyed state is still snapshotted asynchronously. Please note 
that this is not a regression from previous versions and will be resolved with 
`FLINK-10026`.*
 
 ## Tuning RocksDB
 
@@ -150,7 +146,7 @@ timers, while storing timers inside RocksDB offers higher 
scalability as the num
 When using RockDB as state backend, the type of timer storage can be selected 
through Flink's configuration via option key 
`state.backend.rocksdb.timer-service.factory`.
 Possible choices are `heap` (to store timers on the heap, default) and 
`rocksdb` (to store timers in RocksDB).
 
-Note *The combination RocksDB state 
backend / with incremental checkpoint / with heap-based timers currently does 
NOT support asynchronous snapshots for the timers state.
+Note *The combination RocksDB state 
backend with heap-based timers currently does NOT support asynchronous 
snapshots for the timers state.
 Other state like keyed state is still snapshotted asynchronously. Please note 
that this is not a regression from previous versions and will be resolved with 
`FLINK-10026`.*
 
 **Predefined Options**



[flink] branch cron-master-dependency_check created (now d076fed)

2019-04-17 Thread chesnay
This is an automated email from the ASF dual-hosted git repository.

chesnay pushed a change to branch cron-master-dependency_check
in repository https://gitbox.apache.org/repos/asf/flink.git.


  at d076fed  [FLINK-12119][build] Setup dependency-check cron job

This branch includes the following new commits:

 new d076fed  [FLINK-12119][build] Setup dependency-check cron job

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[flink] 01/01: [FLINK-12119][build] Setup dependency-check cron job

2019-04-17 Thread chesnay
This is an automated email from the ASF dual-hosted git repository.

chesnay pushed a commit to branch cron-master-dependency_check
in repository https://gitbox.apache.org/repos/asf/flink.git

commit d076feda170a2fb31d4d727227ad8d016485b82a
Author: Chesnay Schepler 
AuthorDate: Wed Apr 17 16:29:34 2019 +0200

[FLINK-12119][build] Setup dependency-check cron job
---
 .travis.yml |  53 
 LICENSE | 201 
 check.sh|  37 
 tools/travis/setup_maven.sh |  38 +
 4 files changed, 329 insertions(+)

diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 000..b673daf
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,53 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# s3 deployment based on 
http://about.travis-ci.org/blog/2012-12-18-travis-artifacts/
+
+# send to fully-virtualized infrastructure: 
https://docs.travis-ci.com/user/trusty-ci-environment/
+sudo: required
+dist: trusty
+
+cache:
+  directories:
+  - $HOME/.m2
+  # keep in sync with tools/travis/setup_maven.sh
+  - $HOME/maven_cache
+
+install: true
+
+language: java
+jdk: "openjdk8"
+script: "./check.sh"
+
+matrix:
+  include:
+- env:
+  - REMOTE="apache"
+  - BRANCH="master"
+
+git:
+  depth: 100
+
+env:
+global:
+# Global variable to avoid hanging travis builds when downloading 
cache archives.
+- MALLOC_ARENA_MAX=2
+
+notifications:
+  slack:
+rooms:
+  - secure: 
ikPQn5JTpkyzxVyOPm/jIl3FPm6hY8xAdG4pSwxGWjBqF+NmmNTp9YZsJ6fD8xPql6T5n1hNDbZSC14jVUw/vvXGvibDXLN+06f25ZQl+4LJBXaiR7gTG6y3nO8G90Vw7XpvCme6n5Md9tvjygb17a4FEgRJFfwzWnnyPA1yvK0=
+on_success: never
+on_pull_requests: false
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 000..261eeb9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+   Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+  "License" shall mean the terms and conditions for use, reproduction,
+  and distribution as defined by Sections 1 through 9 of this document.
+
+  "Licensor" shall mean the copyright owner or entity authorized by
+  the copyright owner that is granting the License.
+
+  "Legal Entity" shall mean the union of the acting entity and all
+  other entities that control, are controlled by, or are under common
+  control with that entity. For the purposes of this definition,
+  "control" means (i) the power, direct or indirect, to cause the
+  direction or management of such entity, whether by contract or
+  otherwise, or (ii) ownership of fifty percent (50%) or more of the
+  outstanding shares, or (iii) beneficial ownership of such entity.
+
+  "You" (or "Your") shall mean an individual or Legal Entity
+  exercising permissions granted by this License.
+
+  "Source" form shall mean the preferred form for making modifications,
+  including but not limited to software source code, documentation
+  source, and configuration files.
+
+  "Object" form shall mean any form resulting from mechanical
+  transformation or translation of a Source form, including but
+  not limited to compiled object code, generated documentation,
+  and conversions to other media types.
+
+  "Work" shall mean the work of authorship, whether in Source or
+  Object form, made available under the License, as indicated by a
+  copyright notice that is included in or attached to the work
+  (an example is provided in the Appendix below).
+
+  "Derivative Works" shall mean any work, whether in Source or Object
+  form, that is based on (or derived from) the Work and for which the
+  editorial revisions, annotations, elaborations, or other modifications
+  represent, as a whole, an original work of authorship. For the purposes
+  of this License, Derivative Works shall not include works that remain
+  separable from, or merely link (or bind by name) to the interfaces of,
+  the Work and Derivative Works ther

[flink] branch master updated: [FLINK-12119][build] Add owasp-dependency-check plugin

2019-04-17 Thread chesnay
This is an automated email from the ASF dual-hosted git repository.

chesnay pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new a505838  [FLINK-12119][build] Add owasp-dependency-check plugin
a505838 is described below

commit a5058388d463da5ab54127fa58ab1c62115b137e
Author: Konstantin Knauf 
AuthorDate: Sat Apr 6 00:05:40 2019 +0200

[FLINK-12119][build] Add owasp-dependency-check plugin

Run via "mvn org.owasp:dependency-check-maven:aggregate".
Prints a report to stdout and creates a report in the root /target 
directory.
---
 pom.xml | 17 +
 1 file changed, 17 insertions(+)

diff --git a/pom.xml b/pom.xml
index a8ab781..72a2cf8 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1813,6 +1813,23 @@ under the License.


 
+   
+   
+   org.owasp
+   
dependency-check-maven
+   5.0.0-M2
+   
+   ALL
+   
true
+   
true
+   
+   
*flink-docs
+   
*flink-end-to-end-tests
+   
*flink-fs-tests*
+   
*flink-yarn-tests*
+   
+   
+   






[flink] branch master updated (8a91b07 -> c28c6e8)

2019-04-17 Thread kkloudas
This is an automated email from the ASF dual-hosted git repository.

kkloudas pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git.


from 8a91b07  [hotfix][rockdb] Fix a swallowed exception cause in 
RocksDBListState.
 new f22dd5b  [FLINK-11667][checkpointing] Add Synchronous Checkpoint 
handling in StreamTask.
 new 093ddc1  [FLINK-11668][checkpointing] Allow sources to advance to 
MAX_WATERMARK.
 new 1b5a251  [FLINK-11669][checkpointing] Wire the Suspend/Terminate to 
the JM/TM.
 new c28c6e8  [FLINK-11670][FLINK-11671][rest][cli] Expose Suspend/Drain 
via REST and CLI.

The 16322 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/flink/client/cli/CliFrontend.java   |  41 +--
 .../apache/flink/client/cli/CliFrontendParser.java |  23 +-
 .../org/apache/flink/client/cli/StopOptions.java   |  33 +-
 .../apache/flink/client/program/ClusterClient.java |   8 +-
 .../flink/client/program/MiniClusterClient.java|   4 +-
 .../client/program/rest/RestClusterClient.java |  39 ++-
 java => CliFrontendStopWithSavepointTest.java} |  93 -
 .../client/program/rest/RestClusterClientTest.java |   4 -
 .../runtime/checkpoint/CheckpointCoordinator.java  |  62 +++-
 .../runtime/checkpoint/CheckpointProperties.java   |  24 +-
 .../flink/runtime/checkpoint/CheckpointType.java   |  30 +-
 .../flink/runtime/dispatcher/Dispatcher.java   |  13 +
 .../flink/runtime/executiongraph/Execution.java|  29 +-
 .../network/api/serialization/EventSerializer.java |   6 +
 .../runtime/jobgraph/tasks/AbstractInvokable.java  |   6 +-
 .../jobmanager/slots/ActorTaskManagerGateway.java  |   5 +-
 .../jobmanager/slots/TaskManagerGateway.java   |   5 +-
 .../apache/flink/runtime/jobmaster/JobMaster.java  |  58 
 .../flink/runtime/jobmaster/JobMasterGateway.java  |  15 +
 .../runtime/jobmaster/RpcTaskManagerGateway.java   |   5 +-
 .../flink/runtime/minicluster/MiniCluster.java |   4 +
 .../handler/job/savepoints/SavepointHandlers.java  |  63 +++-
 .../StopWithSavepointRequestBody.java} |  24 +-
 .../StopWithSavepointTriggerHeaders.java}  |  26 +-
 .../flink/runtime/taskexecutor/TaskExecutor.java   |  11 +-
 .../runtime/taskexecutor/TaskExecutorGateway.java  |   9 +-
 .../BlockingCallMonitoringThreadPool.java  | 104 ++
 .../org/apache/flink/runtime/taskmanager/Task.java |  52 ++-
 .../flink/runtime/webmonitor/RestfulGateway.java   |  19 +
 .../runtime/webmonitor/WebMonitorEndpoint.java |   8 +
 .../checkpoint/CheckpointCoordinatorTest.java  |   4 +-
 .../runtime/checkpoint/CheckpointTypeTest.java |   1 +
 .../runtime/checkpoint/PendingCheckpointTest.java  |  15 +
 ...ncurrentFailoverStrategyExecutionGraphTest.java |   6 +-
 .../utils/SimpleAckingTaskManagerGateway.java  |   3 +-
 .../serialization/CheckpointSerializationTest.java |  85 +
 .../api/serialization/EventSerializerTest.java |  31 --
 .../jobmaster/utils/TestingJobMasterGateway.java   |  10 +
 .../utils/TestingJobMasterGatewayBuilder.java  |   8 +-
 ...est.java => StopWithSavepointHandlersTest.java} | 124 +++
 .../taskexecutor/TestingTaskExecutorGateway.java   |   2 +-
 .../runtime/taskmanager/TaskAsyncCallTest.java |  30 +-
 .../webmonitor/TestingDispatcherGateway.java   |   5 +-
 .../runtime/webmonitor/TestingRestfulGateway.java  |  25 +-
 .../streaming/state/RocksDBKeyedStateBackend.java  |   4 +-
 .../state/snapshot/RocksFullSnapshotStrategy.java  |   4 +-
 .../streaming/state/RocksDBAsyncSnapshotTest.java  |   5 +-
 .../streaming/api/operators/StreamSource.java  |  11 +-
 .../streaming/runtime/tasks/SourceStreamTask.java  |  16 +-
 .../flink/streaming/runtime/tasks/StreamTask.java  |  93 -
 .../runtime/tasks/SynchronousSavepointLatch.java   | 120 +++
 .../AbstractUdfStreamOperatorLifecycleTest.java|   3 +-
 .../api/operators/async/AsyncWaitOperatorTest.java |   4 +-
 .../runtime/io/BarrierBufferTestBase.java  |   3 +-
 .../streaming/runtime/io/BarrierTrackerTest.java   |   2 +-
 .../runtime/tasks/OneInputStreamTaskTest.java  |   2 +-
 .../runtime/tasks/RestoreStreamTaskTest.java   |   2 +-
 .../tasks/SourceExternalCheckpointTriggerTest.java |   4 +-
 .../runtime/tasks/SourceStreamTaskTest.java|   2 +-
 .../runtime/tasks/SourceTaskTerminationTest.java   | 243 +
 .../tasks/StreamTaskCancellationBarrierTest.java   |   2 +-
 .../runtime/tasks/StreamTaskTerminationTest.java   |   2 +-
 .../streaming/runtime/tasks/StreamTaskTest.java|  10 +-
 .../runtime/tasks/StreamTaskTestHarness.java   |   7 +-
 .../runtime/tasks/SynchronousCheckpointITCase.java | 333 ++
 .../runtime/tasks/SynchronousCheckpointTest.java   | 188 ++
 .../tasks/Synchronou

[flink] 02/04: [FLINK-12204][jdbc] Improve JDBCOutputFormat ClassCastException.

2019-04-17 Thread fhueske
This is an automated email from the ASF dual-hosted git repository.

fhueske pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 3b1c14f9cbc3f4db0c4c14a8ba55edbccef76729
Author: hequn8128 
AuthorDate: Tue Apr 16 15:07:43 2019 +0800

[FLINK-12204][jdbc] Improve JDBCOutputFormat ClassCastException.

This closes #8182.
---
 .../flink/api/java/io/jdbc/JDBCOutputFormat.java  |  8 ++--
 .../apache/flink/api/java/io/jdbc/JDBCFullTest.java   | 19 +++
 .../apache/flink/api/java/io/jdbc/JDBCTestBase.java   |  5 +
 3 files changed, 30 insertions(+), 2 deletions(-)

diff --git 
a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormat.java
 
b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormat.java
index 85c..f773635 100644
--- 
a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormat.java
+++ 
b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCOutputFormat.java
@@ -198,8 +198,12 @@ public class JDBCOutputFormat extends 
RichOutputFormat {
// case 
java.sql.Types.STRUC
}
} catch (ClassCastException e) {
-   throw new 
RuntimeException(
-   "Field index: " 
+ index + ", field value: " + row.getField(index) + " " + e.getMessage(), e);
+   // enrich the exception 
with detailed information.
+   String errorMessage = 
String.format(
+   "%s, field 
index: %s, field value: %s.", e.getMessage(), index, row.getField(index));
+   ClassCastException 
enrichedException = new ClassCastException(errorMessage);
+   
enrichedException.setStackTrace(e.getStackTrace());
+   throw enrichedException;
}
}
}
diff --git 
a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCFullTest.java
 
b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCFullTest.java
index c1f2b25..51d39b2 100644
--- 
a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCFullTest.java
+++ 
b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCFullTest.java
@@ -50,6 +50,25 @@ public class JDBCFullTest extends JDBCTestBase {
runTest(true);
}
 
+   @Test
+   public void testEnrichedClassCastException() throws Exception {
+   exception.expect(ClassCastException.class);
+   exception.expectMessage(
+   "java.lang.String cannot be cast to java.lang.Double, 
field index: 3, field value: 11.11.");
+
+   JDBCOutputFormat jdbcOutputFormat = 
JDBCOutputFormat.buildJDBCOutputFormat()
+   .setDrivername(JDBCTestBase.DRIVER_CLASS)
+   .setDBUrl(JDBCTestBase.DB_URL)
+   .setQuery("insert into newbooks (id, title, author, 
price, qty) values (?,?,?,?,?)")
+   .setSqlTypes(new int[]{Types.INTEGER, Types.VARCHAR, 
Types.VARCHAR, Types.DOUBLE, Types.INTEGER})
+   .finish();
+
+   jdbcOutputFormat.open(1, 1);
+   Row inputRow = Row.of(1001, "Java public for dummies", "Tan Ah 
Teck", "11.11", 11);
+   jdbcOutputFormat.writeRecord(inputRow);
+   jdbcOutputFormat.close();
+   }
+
private void runTest(boolean exploitParallelism) throws Exception {
ExecutionEnvironment environment = 
ExecutionEnvironment.getExecutionEnvironment();
JDBCInputFormatBuilder inputBuilder = 
JDBCInputFormat.buildJDBCInputFormat()
diff --git 
a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCTestBase.java
 
b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCTestBase.java
index 1d41d37..febbbd3 100644
--- 
a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCTestBase.java
+++ 
b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCTestBase.java
@@ -23,6 +23,8 @@ import org.apache.flink.api.java.typeutils.RowTypeInfo;
 
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.ExpectedException;
 
 import java.io.O

[flink] branch master updated (e42f6b5 -> 8a91b07)

2019-04-17 Thread fhueske
This is an automated email from the ASF dual-hosted git repository.

fhueske pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git.


from e42f6b5  [FLINK-12170] [table-planner-blink] Add support for 
generating optimized logical plan for Over aggregate (#8157)
 new 494f5be  [FLINK-12198][jdbc] Add support for setting auto-commit mode 
of JDBCInputFormat DB connection.
 new 3b1c14f  [FLINK-12204][jdbc] Improve JDBCOutputFormat 
ClassCastException.
 new 3422308  [hotfix][yarn] Fix typo in comment of YarnConfigurationITCase.
 new 8a91b07  [hotfix][rockdb] Fix a swallowed exception cause in 
RocksDBListState.

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../flink/api/java/io/jdbc/JDBCInputFormat.java| 19 
 .../flink/api/java/io/jdbc/JDBCOutputFormat.java   |  8 +++--
 .../flink/api/java/io/jdbc/JDBCFullTest.java   | 19 
 .../api/java/io/jdbc/JDBCInputFormatTest.java  | 35 ++
 .../flink/api/java/io/jdbc/JDBCTestBase.java   |  5 
 .../contrib/streaming/state/RocksDBListState.java  |  2 +-
 .../apache/flink/yarn/YarnConfigurationITCase.java |  2 +-
 7 files changed, 86 insertions(+), 4 deletions(-)



[flink] 04/04: [hotfix][rockdb] Fix a swallowed exception cause in RocksDBListState.

2019-04-17 Thread fhueske
This is an automated email from the ASF dual-hosted git repository.

fhueske pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 8a91b07d5ab85b6bf100a36c5e26e6297a7f3a2a
Author: blueszheng 
AuthorDate: Thu Mar 14 16:21:56 2019 +0800

[hotfix][rockdb] Fix a swallowed exception cause in RocksDBListState.

This closes #7983.
---
 .../java/org/apache/flink/contrib/streaming/state/RocksDBListState.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBListState.java
 
b/flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBListState.java
index 00123f5..c18adb1 100644
--- 
a/flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBListState.java
+++ 
b/flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBListState.java
@@ -148,7 +148,7 @@ class RocksDBListState
return element;
}
} catch (IOException e) {
-   throw new FlinkRuntimeException("Unexpected list 
element deserialization failure");
+   throw new FlinkRuntimeException("Unexpected list 
element deserialization failure", e);
}
return null;
}



[flink] 03/04: [hotfix][yarn] Fix typo in comment of YarnConfigurationITCase.

2019-04-17 Thread fhueske
This is an automated email from the ASF dual-hosted git repository.

fhueske pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 3422308f54d4d4ce64231ba75ff39764bc8e
Author: Paul Lam 
AuthorDate: Wed Mar 20 16:36:18 2019 +0800

[hotfix][yarn] Fix typo in comment of YarnConfigurationITCase.

This closes #8161.
---
 .../src/test/java/org/apache/flink/yarn/YarnConfigurationITCase.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnConfigurationITCase.java
 
b/flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnConfigurationITCase.java
index a2772b7..6da6055 100644
--- 
a/flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnConfigurationITCase.java
+++ 
b/flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnConfigurationITCase.java
@@ -181,7 +181,7 @@ public class YarnConfigurationITCase extends YarnTestBase {
final long expectedHeadSize = 
containeredTaskManagerParameters.taskManagerHeapSizeMB() << 20L;
 
// We compare here physical memory assigned to 
a container with the heap memory that we should pass to
-   // jvm as Xmx parameter. Those value might 
differ significantly due to sytem page size or jvm
+   // jvm as Xmx parameter. Those value might 
differ significantly due to system page size or jvm
// implementation therefore we use 15% 
threshold here.
assertThat(
(double) 
taskManagerInfo.getHardwareDescription().getSizeOfJvmHeap() / (double) 
expectedHeadSize,



[flink] 01/04: [FLINK-12198][jdbc] Add support for setting auto-commit mode of JDBCInputFormat DB connection.

2019-04-17 Thread fhueske
This is an automated email from the ASF dual-hosted git repository.

fhueske pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 494f5be09d165c5adca092ef1bdcfec1a238c316
Author: Konstantinos Papadopoulos 
AuthorDate: Tue Apr 16 13:55:56 2019 +0300

[FLINK-12198][jdbc] Add support for setting auto-commit mode of 
JDBCInputFormat DB connection.

This closes #8186.
---
 .../flink/api/java/io/jdbc/JDBCInputFormat.java| 19 
 .../api/java/io/jdbc/JDBCInputFormatTest.java  | 35 ++
 2 files changed, 54 insertions(+)

diff --git 
a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormat.java
 
b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormat.java
index 7d08814..cba125e 100644
--- 
a/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormat.java
+++ 
b/flink-connectors/flink-jdbc/src/main/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormat.java
@@ -116,6 +116,8 @@ public class JDBCInputFormat extends RichInputFormat implements
private transient PreparedStatement statement;
private transient ResultSet resultSet;
private int fetchSize;
+   // Boolean to distinguish between default value and explicitly set 
autoCommit mode.
+   private Boolean autoCommit;
 
private boolean hasNext;
private Object[][] parameterValues;
@@ -143,6 +145,13 @@ public class JDBCInputFormat extends RichInputFormat implements
} else {
dbConn = DriverManager.getConnection(dbURL, 
username, password);
}
+
+   // set autoCommit mode only if it was explicitly 
configured.
+   // keep connection default otherwise.
+   if (autoCommit != null) {
+   dbConn.setAutoCommit(autoCommit);
+   }
+
statement = dbConn.prepareStatement(queryTemplate, 
resultSetType, resultSetConcurrency);
if (fetchSize == Integer.MIN_VALUE || fetchSize > 0) {
statement.setFetchSize(fetchSize);
@@ -323,6 +332,11 @@ public class JDBCInputFormat extends RichInputFormat implements
return statement;
}
 
+   @VisibleForTesting
+   Connection getDbConn() {
+   return dbConn;
+   }
+
/**
 * A builder used to set parameters to the output format's 
configuration in a fluent way.
 * @return builder
@@ -396,6 +410,11 @@ public class JDBCInputFormat extends RichInputFormat implements
return this;
}
 
+   public JDBCInputFormatBuilder setAutoCommit(Boolean autoCommit) 
{
+   format.autoCommit = autoCommit;
+   return this;
+   }
+
public JDBCInputFormat finish() {
if (format.username == null) {
LOG.info("Username was not supplied 
separately.");
diff --git 
a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormatTest.java
 
b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormatTest.java
index 10e8c66..81320e6 100644
--- 
a/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormatTest.java
+++ 
b/flink-connectors/flink-jdbc/src/test/java/org/apache/flink/api/java/io/jdbc/JDBCInputFormatTest.java
@@ -155,6 +155,41 @@ public class JDBCInputFormatTest extends JDBCTestBase {
}
 
@Test
+   public void testDefaultAutoCommitIsUsedIfNotConfiguredOtherwise() 
throws SQLException, ClassNotFoundException {
+
+   jdbcInputFormat = JDBCInputFormat.buildJDBCInputFormat()
+   .setDrivername(DRIVER_CLASS)
+   .setDBUrl(DB_URL)
+   .setQuery(SELECT_ALL_BOOKS)
+   .setRowTypeInfo(ROW_TYPE_INFO)
+   .finish();
+   jdbcInputFormat.openInputFormat();
+
+   Class.forName(DRIVER_CLASS);
+   final boolean defaultAutoCommit = 
DriverManager.getConnection(DB_URL).getAutoCommit();
+
+   Assert.assertEquals(defaultAutoCommit, 
jdbcInputFormat.getDbConn().getAutoCommit());
+
+   }
+
+   @Test
+   public void testAutoCommitCanBeConfigured() throws SQLException {
+
+   final boolean desiredAutoCommit = false;
+   jdbcInputFormat = JDBCInputFormat.buildJDBCInputFormat()
+   .setDrivername(DRIVER_CLASS)
+   .setDBUrl(DB_URL)
+   .setQuery(SELECT_ALL_BOOKS)
+   .setRowTypeInfo(ROW_TYPE_INFO)
+   .setAutoCommit(desiredAutoCommit)
+  

[flink] branch master updated: [FLINK-12170] [table-planner-blink] Add support for generating optimized logical plan for Over aggregate (#8157)

2019-04-17 Thread kurt
This is an automated email from the ASF dual-hosted git repository.

kurt pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new e42f6b5  [FLINK-12170] [table-planner-blink] Add support for 
generating optimized logical plan for Over aggregate (#8157)
e42f6b5 is described below

commit e42f6b52cf43a1a3aaac37d3f8c66e647be63ece
Author: godfrey he 
AuthorDate: Wed Apr 17 19:49:54 2019 +0800

[FLINK-12170] [table-planner-blink] Add support for generating optimized 
logical plan for Over aggregate (#8157)
---
 .../flink/table/api/PlannerConfigOptions.java  |   4 +-
 .../flink/table/expressions/ExpressionBuilder.java |   6 +
 .../aggfunctions/DenseRankAggFunction.java |  80 +++
 .../{ => aggfunctions}/LeadLagAggFunction.java |   3 +-
 .../functions/aggfunctions/RankAggFunction.java|  92 
 .../aggfunctions/RankLikeAggFunctionBase.java  | 129 +
 .../aggfunctions/RowNumberAggFunction.java |  86 
 .../physical/batch/BatchExecOverAggregate.scala|   6 +-
 .../table/plan/rules/FlinkBatchRuleSets.scala  |   1 +
 .../table/plan/rules/FlinkStreamRuleSets.scala |   1 +
 .../batch/BatchExecOverWindowAggRule.scala | 181 +++
 .../batch/BatchExecSortMergeJoinRule.scala |   2 +-
 .../stream/StreamExecOverAggregateRule.scala   |  79 +++
 .../flink/table/plan/util/AggFunctionFactory.scala |  33 +-
 .../flink/table/plan/util/OverAggregateUtil.scala  | 107 
 .../utils/JavaUserDefinedScalarFunctions.java  |  31 ++
 .../plan/batch/sql/{ => agg}/HashAggregateTest.xml |   0
 .../plan/batch/sql/agg/OverWindowAggregateTest.xml | 542 +
 .../plan/batch/sql/{ => agg}/SortAggregateTest.xml |   0
 .../stream/sql/agg/OverWindowAggregateTest.xml | 474 ++
 .../batch/sql/{ => agg}/AggregateTestBase.scala|   2 +-
 .../batch/sql/{ => agg}/HashAggregateTest.scala|   2 +-
 .../batch/sql/agg/OverWindowAggregateTest.scala| 298 +++
 .../batch/sql/{ => agg}/SortAggregateTest.scala|   2 +-
 .../stream/sql/agg/OverWindowAggregateTest.scala   | 422 
 .../apache/flink/table/util/TableTestBase.scala|  11 +-
 26 files changed, 2573 insertions(+), 21 deletions(-)

diff --git 
a/flink-table/flink-table-planner-blink/src/main/java/org/apache/flink/table/api/PlannerConfigOptions.java
 
b/flink-table/flink-table-planner-blink/src/main/java/org/apache/flink/table/api/PlannerConfigOptions.java
index a0d6849..e20256c 100644
--- 
a/flink-table/flink-table-planner-blink/src/main/java/org/apache/flink/table/api/PlannerConfigOptions.java
+++ 
b/flink-table/flink-table-planner-blink/src/main/java/org/apache/flink/table/api/PlannerConfigOptions.java
@@ -60,8 +60,8 @@ public class PlannerConfigOptions {
"3. L and R shuffle by 
c1 and c2\n" +
"It can reduce some 
shuffle cost someTimes.");
 
-   public static final ConfigOption 
SQL_OPTIMIZER_SMJ_REMOVE_SORT_ENABLE =
-   key("sql.optimizer.smj.remove-sort.enable")
+   public static final ConfigOption 
SQL_OPTIMIZER_SMJ_REMOVE_SORT_ENABLED =
+   key("sql.optimizer.smj.remove-sort.enabled")
.defaultValue(false)
.withDescription("When true, the 
optimizer will try to remove redundant sort for SortMergeJoin. " +
"However that will 
increase optimization time. Default value is false.");
diff --git 
a/flink-table/flink-table-planner-blink/src/main/java/org/apache/flink/table/expressions/ExpressionBuilder.java
 
b/flink-table/flink-table-planner-blink/src/main/java/org/apache/flink/table/expressions/ExpressionBuilder.java
index 232e1d7..2069de1 100644
--- 
a/flink-table/flink-table-planner-blink/src/main/java/org/apache/flink/table/expressions/ExpressionBuilder.java
+++ 
b/flink-table/flink-table-planner-blink/src/main/java/org/apache/flink/table/expressions/ExpressionBuilder.java
@@ -21,6 +21,7 @@ package org.apache.flink.table.expressions;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.List;
 
 import static 
org.apache.flink.table.expressions.BuiltInFunctionDefinitions.AND;
@@ -33,6 +34,7 @@ import static 
org.apache.flink.table.expressions.BuiltInFunctionDefinitions.IF;
 import static 
org.apache.flink.table.expressions.BuiltInFunctionDefinitions.IS_NULL;
 import static 
org.apache.flink.table.expressions.BuiltInFunctionDefinitions.LESS_THAN;
 import static 
org.apache.flink.table.expressions.BuiltInFunctionDefinitions.MINUS;
+import static 
org.apache.flink.table.expressions.BuiltInFunctionDefinitions.NOT;
 import static org.apache.flink.table.expressions.Bu

[flink] branch master updated: [FLINK-10712] Support state restore for RestartPipelinedRegionStrategy

2019-04-17 Thread srichter
This is an automated email from the ASF dual-hosted git repository.

srichter pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new c3a9293  [FLINK-10712] Support state restore for 
RestartPipelinedRegionStrategy
c3a9293 is described below

commit c3a929346f9993d5177978053fc0bedd482faf7c
Author: Yun Tang 
AuthorDate: Wed Apr 17 19:48:40 2019 +0800

[FLINK-10712] Support state restore for RestartPipelinedRegionStrategy

This closes #7813.
---
 .../fs/RollingSinkFaultToleranceITCase.java|  12 +-
 .../BucketingSinkFaultToleranceITCase.java |  12 +-
 .../runtime/checkpoint/CheckpointCoordinator.java  |  15 +-
 .../flink/runtime/executiongraph/Execution.java|   2 -
 .../executiongraph/failover/FailoverRegion.java|  28 +-
 .../failover/RestartPipelinedRegionStrategy.java   |  18 +-
 ...ncurrentFailoverStrategyExecutionGraphTest.java |   4 +-
 .../runtime/executiongraph/FailoverRegionTest.java | 219 +++-
 .../ContinuousFileProcessingCheckpointITCase.java  |  23 +-
 .../test/checkpointing/RegionFailoverITCase.java   | 394 +
 .../StreamFaultToleranceTestBase.java  |  87 -
 11 files changed, 748 insertions(+), 66 deletions(-)

diff --git 
a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkFaultToleranceITCase.java
 
b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkFaultToleranceITCase.java
index 886055d..daa8884 100644
--- 
a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkFaultToleranceITCase.java
+++ 
b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/RollingSinkFaultToleranceITCase.java
@@ -32,9 +32,9 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.rules.TemporaryFolder;
 
@@ -77,8 +77,8 @@ public class RollingSinkFaultToleranceITCase extends 
StreamFaultToleranceTestBas
 
private static String outPath;
 
-   @BeforeClass
-   public static void createHDFS() throws IOException {
+   @Before
+   public void createHDFS() throws IOException {
Configuration conf = new Configuration();
 
File dataDir = tempFolder.newFolder();
@@ -94,8 +94,8 @@ public class RollingSinkFaultToleranceITCase extends 
StreamFaultToleranceTestBas
+ "/string-non-rolling-out";
}
 
-   @AfterClass
-   public static void destroyHDFS() {
+   @After
+   public void destroyHDFS() {
if (hdfsCluster != null) {
hdfsCluster.shutdown();
}
diff --git 
a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFaultToleranceITCase.java
 
b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFaultToleranceITCase.java
index dcb77bf..71b210d 100644
--- 
a/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFaultToleranceITCase.java
+++ 
b/flink-connectors/flink-connector-filesystem/src/test/java/org/apache/flink/streaming/connectors/fs/bucketing/BucketingSinkFaultToleranceITCase.java
@@ -32,9 +32,9 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.rules.TemporaryFolder;
 
@@ -77,8 +77,8 @@ public class BucketingSinkFaultToleranceITCase extends 
StreamFaultToleranceTestB
 
private static String outPath;
 
-   @BeforeClass
-   public static void createHDFS() throws IOException {
+   @Before
+   public void createHDFS() throws IOException {
Configuration conf = new Configuration();
 
File dataDir = tempFolder.newFolder();
@@ -94,8 +94,8 @@ public class BucketingSinkFaultToleranceITCase extends 
StreamFaultToleranceTestB
+ "/string-non-rolling-out";
}
 
-   @AfterClass
-   public static void destroyHDFS() {
+   @After
+   public void destroyHDFS() {
if (hdfsCluster != null) {
hdfsCluster.shutdown();
  

[flink-shaded] branch master updated: Update note about transitive dependencies

2019-04-17 Thread chesnay
This is an automated email from the ASF dual-hosted git repository.

chesnay pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink-shaded.git


The following commit(s) were added to refs/heads/master by this push:
 new 2ef6ad9  Update note about transitive dependencies
2ef6ad9 is described below

commit 2ef6ad9ca9474487e39eb0db73fe0ce5f9f1b56e
Author: Chesnay Schepler 
AuthorDate: Wed Apr 17 12:41:08 2019 +0200

Update note about transitive dependencies
---
 README.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index 05d4bb2..6483eec 100644
--- a/README.md
+++ b/README.md
@@ -23,7 +23,7 @@ This repository contains a number of shaded dependencies for 
the [Apache Flink](
 
 The purpose of these dependencies is to provide a single instance of a shaded 
dependency in the Flink distribution, instead of each individual module shading 
the dependency.
 
-The shaded dependencies contained here do not expose any transitive 
dependencies. They may or may not be self-contained.
+With the exceptin of `flink-shaded-hadoop-2`, shaded dependencies contained 
here do not expose any transitive dependencies. They may or may not be 
self-contained.
 
 When using these dependencies it is recommended to work directly against the 
shaded namespaces.
 
@@ -35,4 +35,4 @@ However, it is possible to build these jars locally by 
cloning the repository an
 
 ## About
 
-Apache Flink is an open source project of [The Apache Software 
Foundation](https://apache.org/) (ASF).
\ No newline at end of file
+Apache Flink is an open source project of [The Apache Software 
Foundation](https://apache.org/) (ASF).



[flink] branch master updated: [hotfix][utils] Remove duplicate LeaderRetrievalUtils#getRecoverMode mode

2019-04-17 Thread chesnay
This is an automated email from the ASF dual-hosted git repository.

chesnay pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new a670b97  [hotfix][utils] Remove duplicate 
LeaderRetrievalUtils#getRecoverMode mode
a670b97 is described below

commit a670b9781e07ce56004eed887c754ae5633c4fd8
Author: Zili Chen 
AuthorDate: Wed Apr 17 17:59:36 2019 +0800

[hotfix][utils] Remove duplicate LeaderRetrievalUtils#getRecoverMode mode
---
 .../highavailability/HighAvailabilityServicesUtils.java |  5 ++---
 .../apache/flink/runtime/util/LeaderRetrievalUtils.java | 17 -
 2 files changed, 2 insertions(+), 20 deletions(-)

diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/HighAvailabilityServicesUtils.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/HighAvailabilityServicesUtils.java
index ee919c6..1263dc1 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/HighAvailabilityServicesUtils.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/highavailability/HighAvailabilityServicesUtils.java
@@ -34,7 +34,6 @@ import org.apache.flink.runtime.jobmaster.JobMaster;
 import org.apache.flink.runtime.net.SSLUtils;
 import org.apache.flink.runtime.resourcemanager.ResourceManager;
 import org.apache.flink.runtime.rpc.akka.AkkaRpcServiceUtils;
-import org.apache.flink.runtime.util.LeaderRetrievalUtils;
 import org.apache.flink.runtime.util.ZooKeeperUtils;
 import org.apache.flink.util.ConfigurationException;
 import org.apache.flink.util.FlinkException;
@@ -52,7 +51,7 @@ public class HighAvailabilityServicesUtils {
public static HighAvailabilityServices 
createAvailableOrEmbeddedServices(
Configuration config,
Executor executor) throws Exception {
-   HighAvailabilityMode highAvailabilityMode = 
LeaderRetrievalUtils.getRecoveryMode(config);
+   HighAvailabilityMode highAvailabilityMode = 
HighAvailabilityMode.fromConfig(config);
 
switch (highAvailabilityMode) {
case NONE:
@@ -80,7 +79,7 @@ public class HighAvailabilityServicesUtils {
Executor executor,
AddressResolution addressResolution) throws Exception {
 
-   HighAvailabilityMode highAvailabilityMode = 
LeaderRetrievalUtils.getRecoveryMode(configuration);
+   HighAvailabilityMode highAvailabilityMode = 
HighAvailabilityMode.fromConfig(configuration);
 
switch (highAvailabilityMode) {
case NONE:
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/util/LeaderRetrievalUtils.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/util/LeaderRetrievalUtils.java
index 69dfbf7..e0dca79 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/util/LeaderRetrievalUtils.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/util/LeaderRetrievalUtils.java
@@ -19,11 +19,7 @@
 package org.apache.flink.runtime.util;
 
 import org.apache.flink.api.common.time.Time;
-import org.apache.flink.configuration.Configuration;
-import org.apache.flink.configuration.HighAvailabilityOptions;
-import org.apache.flink.configuration.IllegalConfigurationException;
 import org.apache.flink.runtime.concurrent.FutureUtils;
-import org.apache.flink.runtime.jobmanager.HighAvailabilityMode;
 import org.apache.flink.runtime.leaderretrieval.LeaderRetrievalException;
 import org.apache.flink.runtime.leaderretrieval.LeaderRetrievalListener;
 import org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService;
@@ -164,19 +160,6 @@ public class LeaderRetrievalUtils {
}
}
}
-
-   /**
-* Gets the recovery mode as configured, based on {@link 
HighAvailabilityOptions#HA_MODE}.
-* 
-* @param config The configuration to read the recovery mode from.
-* @return The recovery mode.
-* 
-* @throws IllegalConfigurationException Thrown, if the recovery mode 
does not correspond
-*   to a known value.
-*/
-   public static HighAvailabilityMode getRecoveryMode(Configuration 
config) {
-   return HighAvailabilityMode.fromConfig(config);
-   }

// 





[flink] branch release-1.8 updated (302c26e -> 294e3a3)

2019-04-17 Thread pnowojski
This is an automated email from the ASF dual-hosted git repository.

pnowojski pushed a change to branch release-1.8
in repository https://gitbox.apache.org/repos/asf/flink.git.


from 302c26e  [hotfix][docs] Update process function example to use 
KeyedProcessFunction.
 new a4aeac0  [FLINK-10941][Network] Release partition after consumer end 
of reception confirmation
 new 294e3a3  [FLINK-10941][RM] Release task executor after all its result 
partitions are released

The 16109 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../configuration/ResourceManagerOptions.java  |  22 +
 .../io/network/netty/PartitionRequestQueue.java|  22 ++---
 .../io/network/partition/ResultPartition.java  |  10 ++
 .../network/partition/ResultPartitionManager.java  |  11 +++
 .../ResourceManagerRuntimeServices.java|   3 +-
 .../resourcemanager/slotmanager/SlotManager.java   |  38 ++--
 .../slotmanager/SlotManagerConfiguration.java  |  15 ++-
 .../flink/runtime/taskexecutor/TaskExecutor.java   |   6 ++
 .../runtime/taskexecutor/TaskExecutorGateway.java  |   7 ++
 .../flink/runtime/taskexecutor/slot/TaskSlot.java  |   2 +-
 .../runtime/taskexecutor/slot/TaskSlotTable.java   |   2 +-
 .../ManuallyTriggeredScheduledExecutor.java|   7 ++
 .../io/network/partition/ResultPartitionTest.java  |   2 +
 .../resourcemanager/ResourceManagerHATest.java |   3 +-
 .../ResourceManagerJobMasterTest.java  |  10 +-
 .../ResourceManagerTaskExecutorTest.java   |  10 +-
 .../resourcemanager/ResourceManagerTest.java   |   9 +-
 .../slotmanager/SlotManagerBuilder.java|  78 +++
 .../slotmanager/SlotManagerTest.java   | 106 +
 .../slotmanager/SlotProtocolTest.java  |  17 ++--
 .../taskexecutor/TestingTaskExecutorGateway.java   |  12 ++-
 .../TestingTaskExecutorGatewayBuilder.java |  10 +-
 .../apache/flink/yarn/YarnResourceManagerTest.java |  10 +-
 23 files changed, 313 insertions(+), 99 deletions(-)
 create mode 100644 
flink-runtime/src/test/java/org/apache/flink/runtime/resourcemanager/slotmanager/SlotManagerBuilder.java



[flink] branch master updated: [FLINK-11923][metrics] Move reporter setup out of MetricRegistry

2019-04-17 Thread chesnay
This is an automated email from the ASF dual-hosted git repository.

chesnay pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 6f78a94  [FLINK-11923][metrics] Move reporter setup out of 
MetricRegistry
6f78a94 is described below

commit 6f78a943ed626e4c232e9a706b9debf594c67ebd
Author: Chesnay Schepler 
AuthorDate: Wed Apr 17 09:59:31 2019 +0200

[FLINK-11923][metrics] Move reporter setup out of MetricRegistry
---
 .../ScheduledDropwizardReporterTest.java   |  11 +-
 .../DropwizardFlinkHistogramWrapperTest.java   |  15 +-
 .../metrics/influxdb/InfluxdbReporterTest.java |  23 +--
 .../apache/flink/metrics/jmx/JMXReporterTest.java  |  50 +++--
 .../PrometheusReporterTaskScopeTest.java   |   7 +-
 .../metrics/prometheus/PrometheusReporterTest.java |  49 ++---
 .../flink/metrics/slf4j/Slf4jReporterTest.java |  10 +-
 .../flink/metrics/statsd/StatsDReporterTest.java   |  36 ++--
 .../runtime/entrypoint/ClusterEntrypoint.java  |   5 +-
 .../metrics/MetricRegistryConfiguration.java   |  75 +--
 .../flink/runtime/metrics/MetricRegistryImpl.java  |  43 ++--
 .../flink/runtime/metrics/ReporterSetup.java   | 176 +
 .../flink/runtime/minicluster/MiniCluster.java |   5 +-
 .../runtime/taskexecutor/TaskManagerRunner.java|   5 +-
 .../metrics/MetricRegistryConfigurationTest.java   | 138 -
 .../runtime/metrics/MetricRegistryImplTest.java| 134 +
 .../flink/runtime/metrics/ReporterSetupTest.java   | 217 +
 .../metrics/groups/AbstractMetricGroupTest.java|  30 ++-
 .../groups/MetricGroupRegistrationTest.java|  11 +-
 19 files changed, 607 insertions(+), 433 deletions(-)

diff --git 
a/flink-metrics/flink-metrics-dropwizard/src/test/java/org/apache/flink/dropwizard/ScheduledDropwizardReporterTest.java
 
b/flink-metrics/flink-metrics-dropwizard/src/test/java/org/apache/flink/dropwizard/ScheduledDropwizardReporterTest.java
index b69b8d8..02d2c79 100644
--- 
a/flink-metrics/flink-metrics-dropwizard/src/test/java/org/apache/flink/dropwizard/ScheduledDropwizardReporterTest.java
+++ 
b/flink-metrics/flink-metrics-dropwizard/src/test/java/org/apache/flink/dropwizard/ScheduledDropwizardReporterTest.java
@@ -19,7 +19,6 @@
 package org.apache.flink.dropwizard;
 
 import org.apache.flink.api.common.JobID;
-import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.configuration.MetricOptions;
 import org.apache.flink.dropwizard.metrics.DropwizardMeterWrapper;
@@ -36,6 +35,7 @@ import org.apache.flink.metrics.reporter.MetricReporter;
 import org.apache.flink.runtime.jobgraph.JobVertexID;
 import org.apache.flink.runtime.metrics.MetricRegistryConfiguration;
 import org.apache.flink.runtime.metrics.MetricRegistryImpl;
+import org.apache.flink.runtime.metrics.ReporterSetup;
 import org.apache.flink.runtime.metrics.groups.TaskManagerJobMetricGroup;
 import org.apache.flink.runtime.metrics.groups.TaskManagerMetricGroup;
 import org.apache.flink.runtime.metrics.groups.TaskMetricGroup;
@@ -45,6 +45,7 @@ import com.codahale.metrics.ScheduledReporter;
 import org.junit.Test;
 
 import java.lang.reflect.InvocationTargetException;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
@@ -82,16 +83,14 @@ public class ScheduledDropwizardReporterTest {
String taskManagerId = "tas:kMana::ger";
String counterName = "testCounter";
 
-   configuration.setString(
-   ConfigConstants.METRICS_REPORTER_PREFIX + 
"test." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX,
-   
"org.apache.flink.dropwizard.ScheduledDropwizardReporterTest$TestingScheduledDropwizardReporter");
-
configuration.setString(MetricOptions.SCOPE_NAMING_TASK, 
"..");
configuration.setString(MetricOptions.SCOPE_DELIMITER, "_");
 
MetricRegistryConfiguration metricRegistryConfiguration = 
MetricRegistryConfiguration.fromConfiguration(configuration);
 
-   MetricRegistryImpl metricRegistry = new 
MetricRegistryImpl(metricRegistryConfiguration);
+   MetricRegistryImpl metricRegistry = new MetricRegistryImpl(
+   metricRegistryConfiguration,
+   
Collections.singletonList(ReporterSetup.forReporter("test", new 
TestingScheduledDropwizardReporter(;
 
char delimiter = metricRegistry.getDelimiter();
 
diff --git 
a/flink-metrics/flink-metrics-dropwizard/src/test/java/org/apache/flink/dropwizard/metrics/DropwizardFlinkHistogramWrapperTest.java
 
b/flink-metrics/flink-metrics-dropwizard/src/test/java/org/apache/flink/dropwizard/metrics/DropwizardFlinkHistogramWrapperTest.java
index d23a22c..feec0f8 100644
--- 
a/flink-metrics

[flink] branch master updated: [FLINK-12102][tests] Fix FlinkILoopTest on Java 9

2019-04-17 Thread chesnay
This is an automated email from the ASF dual-hosted git repository.

chesnay pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 9438147  [FLINK-12102][tests] Fix FlinkILoopTest on Java 9
9438147 is described below

commit 943814706a192a9528ab1ecaa19fa7dc32721438
Author: Chesnay Schepler 
AuthorDate: Wed Apr 10 11:04:38 2019 +0200

[FLINK-12102][tests] Fix FlinkILoopTest on Java 9
---
 .../src/test/java/org/apache/flink/api/java/FlinkILoopTest.java | 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/flink-scala-shell/src/test/java/org/apache/flink/api/java/FlinkILoopTest.java 
b/flink-scala-shell/src/test/java/org/apache/flink/api/java/FlinkILoopTest.java
index 5f772f6..ca93abd 100644
--- 
a/flink-scala-shell/src/test/java/org/apache/flink/api/java/FlinkILoopTest.java
+++ 
b/flink-scala-shell/src/test/java/org/apache/flink/api/java/FlinkILoopTest.java
@@ -35,6 +35,7 @@ import org.mockito.Matchers;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.powermock.api.mockito.PowerMockito;
+import org.powermock.core.classloader.annotations.PowerMockIgnore;
 import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
@@ -52,6 +53,7 @@ import static org.junit.Assert.assertTrue;
  */
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(PlanExecutor.class)
+@PowerMockIgnore("javax.tools.*")
 public class FlinkILoopTest extends TestLogger {
 
@Test



[flink] branch master updated: [FLINK-12117][cassandra] Disable tests on Java 9

2019-04-17 Thread chesnay
This is an automated email from the ASF dual-hosted git repository.

chesnay pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new dd4566d  [FLINK-12117][cassandra] Disable tests on Java 9
dd4566d is described below

commit dd4566de0c935afae9b9064321359b9018319d0f
Author: Chesnay Schepler 
AuthorDate: Fri Apr 5 12:56:44 2019 +0200

[FLINK-12117][cassandra] Disable tests on Java 9
---
 flink-connectors/flink-connector-cassandra/pom.xml | 21 +
 tools/travis/stage.sh  |  1 -
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git a/flink-connectors/flink-connector-cassandra/pom.xml 
b/flink-connectors/flink-connector-cassandra/pom.xml
index f22e945..0218d6e 100644
--- a/flink-connectors/flink-connector-cassandra/pom.xml
+++ b/flink-connectors/flink-connector-cassandra/pom.xml
@@ -41,6 +41,27 @@ under the License.
3.0.0

 
+   
+   
+   java9
+   
+   9
+   
+   
+   
+   
+   
org.apache.maven.plugins
+   
maven-surefire-plugin
+   
+   
+   true
+   
+   
+   
+   
+   
+   
+



diff --git a/tools/travis/stage.sh b/tools/travis/stage.sh
index 95ba1ff..be2c573 100644
--- a/tools/travis/stage.sh
+++ b/tools/travis/stage.sh
@@ -99,7 +99,6 @@ MODULES_CONNECTORS_JDK9_EXCLUSIONS="\
 !flink-filesystems/flink-s3-fs-presto,\
 !flink-formats/flink-avro,\
 !flink-connectors/flink-hbase,\
-!flink-connectors/flink-connector-cassandra,\
 !flink-connectors/flink-connector-kafka-0.9,\
 !flink-connectors/flink-connector-kafka-0.10,\
 !flink-connectors/flink-connector-kafka-0.11"



[flink] branch master updated: [FLINK-12217][table] OperationTreeBuilder.map() should perform ExpressionResolver.resolve()

2019-04-17 Thread dwysakowicz
This is an automated email from the ASF dual-hosted git repository.

dwysakowicz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 7fba616  [FLINK-12217][table] OperationTreeBuilder.map() should 
perform ExpressionResolver.resolve()
7fba616 is described below

commit 7fba616857a6a71c6a818c008e59af94bc933375
Author: hequn8128 
AuthorDate: Wed Apr 17 12:29:10 2019 +0800

[FLINK-12217][table] OperationTreeBuilder.map() should perform 
ExpressionResolver.resolve()
---
 .../flink/table/operations/OperationTreeBuilder.scala   |  7 +--
 .../stream/table/stringexpr/CalcStringExpressionTest.scala  | 13 +
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git 
a/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/operations/OperationTreeBuilder.scala
 
b/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/operations/OperationTreeBuilder.scala
index 1579eb4..c6fec01 100644
--- 
a/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/operations/OperationTreeBuilder.scala
+++ 
b/flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/operations/OperationTreeBuilder.scala
@@ -348,12 +348,15 @@ class OperationTreeBuilder(private val tableEnv: 
TableEnvironment) {
 
   def map(mapFunction: Expression, child: TableOperation): TableOperation = {
 
-if (!isScalarFunction(mapFunction)) {
+val resolver = resolverFor(tableCatalog, functionCatalog, child).build()
+val resolvedMapFunction = resolveSingleExpression(mapFunction, resolver)
+
+if (!isScalarFunction(resolvedMapFunction)) {
   throw new ValidationException("Only ScalarFunction can be used in the 
map operator.")
 }
 
 val expandedFields = new CallExpression(BuiltInFunctionDefinitions.FLATTEN,
-  List(mapFunction).asJava)
+  List(resolvedMapFunction).asJava)
 project(Collections.singletonList(expandedFields), child)
   }
 
diff --git 
a/flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/table/stringexpr/CalcStringExpressionTest.scala
 
b/flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/table/stringexpr/CalcStringExpressionTest.scala
index 0fd5528..999c81b 100644
--- 
a/flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/table/stringexpr/CalcStringExpressionTest.scala
+++ 
b/flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/table/stringexpr/CalcStringExpressionTest.scala
@@ -21,6 +21,7 @@ package org.apache.flink.table.api.stream.table.stringexpr
 import org.apache.flink.api.scala._
 import org.apache.flink.table.api.scala._
 import org.apache.flink.table.expressions.Literal
+import org.apache.flink.table.expressions.utils.Func23
 import org.apache.flink.table.utils.TableTestBase
 import org.junit.Test
 
@@ -167,4 +168,16 @@ class CalcStringExpressionTest extends TableTestBase {
 
 verifyTableEquals(t1, t2)
   }
+
+  @Test
+  def testMap(): Unit = {
+val util = streamTestUtil()
+val t = util.addTable[(Int, Long, String)]("Table3",'a, 'b, 'c)
+util.tableEnv.registerFunction("func", Func23)
+
+val t1 = t.map("func(a, b, c)")
+val t2 = t.map(Func23('a, 'b, 'c))
+
+verifyTableEquals(t1, t2)
+  }
 }