[flink] branch master updated: [FLINK-27219][sql-client] Print exception stack when get errors

2022-05-23 Thread hxb
This is an automated email from the ASF dual-hosted git repository.

hxb pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 3c5d3bacda5 [FLINK-27219][sql-client] Print exception stack when get 
errors
3c5d3bacda5 is described below

commit 3c5d3bacda580de1dc4986909a0aa5fc30e37885
Author: Shengkai <1059623...@qq.com>
AuthorDate: Mon May 23 19:40:04 2022 +0800

[FLINK-27219][sql-client] Print exception stack when get errors

This closes #19796.
---
 flink-table/flink-sql-client/src/test/resources/sql/set.q | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/flink-table/flink-sql-client/src/test/resources/sql/set.q 
b/flink-table/flink-sql-client/src/test/resources/sql/set.q
index 5768953d401..a2d01e005f8 100644
--- a/flink-table/flink-sql-client/src/test/resources/sql/set.q
+++ b/flink-table/flink-sql-client/src/test/resources/sql/set.q
@@ -116,6 +116,10 @@ Was expecting one of:
 
 !error
 
+set 'sql-client.verbose' = 'true';
+[INFO] Session property has been set.
+!info
+
 set;
 'execution.attached' = 'true'
 'execution.savepoint-restore-mode' = 'NO_CLAIM'
@@ -126,6 +130,7 @@ set;
 'pipeline.classpaths' = ''
 'pipeline.jars' = ''
 'rest.port' = '$VAR_REST_PORT'
+'sql-client.verbose' = 'true'
 'table.exec.legacy-cast-behaviour' = 'DISABLED'
 !ok
 
@@ -147,6 +152,7 @@ set;
 'pipeline.classpaths' = ''
 'pipeline.jars' = ''
 'rest.port' = '$VAR_REST_PORT'
+'sql-client.verbose' = 'true'
 'table.exec.legacy-cast-behaviour' = 'DISABLED'
 !ok
 
@@ -169,6 +175,7 @@ set;
 'pipeline.classpaths' = ''
 'pipeline.jars' = '$VAR_PIPELINE_JARS_URL'
 'rest.port' = '$VAR_REST_PORT'
+'sql-client.verbose' = 'true'
 'table.exec.legacy-cast-behaviour' = 'DISABLED'
 !ok
 



[flink] branch release-1.14 updated: [FLINK-27733][python] Rework on_timer output behind watermark bug fix

2022-05-23 Thread hxb
This is an automated email from the ASF dual-hosted git repository.

hxb pushed a commit to branch release-1.14
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.14 by this push:
 new 945c15341b9 [FLINK-27733][python] Rework on_timer output behind 
watermark bug fix
945c15341b9 is described below

commit 945c15341b93a9bfadc7b6ce239a96c2b7baf592
Author: Juntao Hu 
AuthorDate: Sun May 22 23:16:12 2022 +0800

[FLINK-27733][python] Rework on_timer output behind watermark bug fix

This closes #19788.
---
 .../python/AbstractPythonFunctionOperator.java | 28 +-
 .../python/PythonKeyedCoProcessOperator.java   | 33 --
 .../python/PythonKeyedProcessOperator.java | 33 --
 .../operators/python/timer/TimerRegistration.java  | 16 ---
 .../api/operators/python/timer/TimerUtils.java | 30 
 ...thonStreamGroupWindowAggregateOperatorTest.java |  2 --
 ...onGroupWindowAggregateFunctionOperatorTest.java | 19 +++--
 ...ArrowPythonRowTimeBoundedRangeOperatorTest.java |  6 ++--
 ...mArrowPythonRowTimeBoundedRowsOperatorTest.java |  6 ++--
 9 files changed, 49 insertions(+), 124 deletions(-)

diff --git 
a/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
 
b/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
index 2c04bc6ef78..98b85f6db9f 100644
--- 
a/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
+++ 
b/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
@@ -31,6 +31,7 @@ import org.apache.flink.python.metric.FlinkMetricContainer;
 import org.apache.flink.runtime.state.KeyedStateBackend;
 import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
 import org.apache.flink.streaming.api.operators.ChainingStrategy;
+import org.apache.flink.streaming.api.operators.InternalTimeServiceManager;
 import 
org.apache.flink.streaming.api.operators.sorted.state.BatchExecutionInternalTimeServiceManager;
 import 
org.apache.flink.streaming.api.operators.sorted.state.BatchExecutionKeyedStateBackend;
 import 
org.apache.flink.streaming.api.runners.python.beam.BeamPythonFunctionRunner;
@@ -218,14 +219,18 @@ public abstract class AbstractPythonFunctionOperator 
extends AbstractStream
 // Approach 1) is the easiest and gives better latency, yet 2)
 // gives better throughput due to the bundle not getting cut on
 // every watermark. So we have implemented 2) below.
+
+// advance the watermark and do not emit watermark to downstream 
operators
+if (getTimeServiceManager().isPresent()) {
+getTimeServiceManager().get().advanceWatermark(mark);
+}
+
 if (mark.getTimestamp() == Long.MAX_VALUE) {
 invokeFinishBundle();
 processElementsOfCurrentKeyIfNeeded(null);
-preEmitWatermark(mark);
+advanceWatermark(mark);
 output.emitWatermark(mark);
 } else if (isBundleFinished()) {
-// forward the watermark immediately if the bundle is already 
finished.
-preEmitWatermark(mark);
 output.emitWatermark(mark);
 } else {
 // It is not safe to advance the output watermark yet, so add a 
hold on the current
@@ -233,8 +238,8 @@ public abstract class AbstractPythonFunctionOperator 
extends AbstractStream
 bundleFinishedCallback =
 () -> {
 try {
+advanceWatermark(mark);
 // at this point the bundle is finished, allow the 
watermark to pass
-preEmitWatermark(mark);
 output.emitWatermark(mark);
 } catch (Exception e) {
 throw new RuntimeException(
@@ -318,10 +323,19 @@ public abstract class AbstractPythonFunctionOperator 
extends AbstractStream
 }
 }
 
-/** Called before emitting watermark to downstream. */
-protected void preEmitWatermark(Watermark mark) throws Exception {
+/**
+ * Advances the watermark of all managed timer services, potentially 
firing event time timers.
+ * It also ensures that the fired timers are processed in the Python 
user-defined functions.
+ */
+private void advanceWatermark(Watermark watermark) throws Exception {
 if (getTimeServiceManager().isPresent()) {
-getTimeServiceManager().get().advanceWatermark(mark);
+InternalTimeServiceManager timeServiceManager = 
getTimeServiceManager().get();
+timeServiceManager.advanceWatermark(watermark);
+
+while (!isBundleFinished()) {
+

[flink] branch release-1.15 updated: [FLINK-27733][python] Rework on_timer output behind watermark bug fix

2022-05-23 Thread hxb
This is an automated email from the ASF dual-hosted git repository.

hxb pushed a commit to branch release-1.15
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/release-1.15 by this push:
 new f413c40c8ab [FLINK-27733][python] Rework on_timer output behind 
watermark bug fix
f413c40c8ab is described below

commit f413c40c8ab8145d3bdea8dbc6372961a598be37
Author: Juntao Hu 
AuthorDate: Sun May 22 23:16:12 2022 +0800

[FLINK-27733][python] Rework on_timer output behind watermark bug fix

This closes #19788.
---
 .../python/AbstractPythonFunctionOperator.java | 28 +-
 .../python/PythonKeyedCoProcessOperator.java   | 33 --
 .../python/PythonKeyedProcessOperator.java | 33 --
 .../operators/python/timer/TimerRegistration.java  | 16 ---
 .../api/operators/python/timer/TimerUtils.java | 30 
 ...thonStreamGroupWindowAggregateOperatorTest.java |  2 --
 ...onGroupWindowAggregateFunctionOperatorTest.java | 19 +++--
 ...ArrowPythonRowTimeBoundedRangeOperatorTest.java |  6 ++--
 ...mArrowPythonRowTimeBoundedRowsOperatorTest.java |  6 ++--
 9 files changed, 49 insertions(+), 124 deletions(-)

diff --git 
a/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
 
b/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
index f229ea7023c..5324df04f8a 100644
--- 
a/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
+++ 
b/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
@@ -25,6 +25,7 @@ import org.apache.flink.python.metric.FlinkMetricContainer;
 import org.apache.flink.runtime.state.KeyedStateBackend;
 import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
 import org.apache.flink.streaming.api.operators.ChainingStrategy;
+import org.apache.flink.streaming.api.operators.InternalTimeServiceManager;
 import 
org.apache.flink.streaming.api.operators.sorted.state.BatchExecutionInternalTimeServiceManager;
 import 
org.apache.flink.streaming.api.operators.sorted.state.BatchExecutionKeyedStateBackend;
 import org.apache.flink.streaming.api.watermark.Watermark;
@@ -180,14 +181,18 @@ public abstract class AbstractPythonFunctionOperator 
extends AbstractStream
 // Approach 1) is the easiest and gives better latency, yet 2)
 // gives better throughput due to the bundle not getting cut on
 // every watermark. So we have implemented 2) below.
+
+// advance the watermark and do not emit watermark to downstream 
operators
+if (getTimeServiceManager().isPresent()) {
+getTimeServiceManager().get().advanceWatermark(mark);
+}
+
 if (mark.getTimestamp() == Long.MAX_VALUE) {
 invokeFinishBundle();
 processElementsOfCurrentKeyIfNeeded(null);
-preEmitWatermark(mark);
+advanceWatermark(mark);
 output.emitWatermark(mark);
 } else if (isBundleFinished()) {
-// forward the watermark immediately if the bundle is already 
finished.
-preEmitWatermark(mark);
 output.emitWatermark(mark);
 } else {
 // It is not safe to advance the output watermark yet, so add a 
hold on the current
@@ -195,8 +200,8 @@ public abstract class AbstractPythonFunctionOperator 
extends AbstractStream
 bundleFinishedCallback =
 () -> {
 try {
+advanceWatermark(mark);
 // at this point the bundle is finished, allow the 
watermark to pass
-preEmitWatermark(mark);
 output.emitWatermark(mark);
 } catch (Exception e) {
 throw new RuntimeException(
@@ -263,10 +268,19 @@ public abstract class AbstractPythonFunctionOperator 
extends AbstractStream
 
 protected abstract PythonEnvironmentManager 
createPythonEnvironmentManager();
 
-/** Called before emitting watermark to downstream. */
-protected void preEmitWatermark(Watermark mark) throws Exception {
+/**
+ * Advances the watermark of all managed timer services, potentially 
firing event time timers.
+ * It also ensures that the fired timers are processed in the Python 
user-defined functions.
+ */
+private void advanceWatermark(Watermark watermark) throws Exception {
 if (getTimeServiceManager().isPresent()) {
-getTimeServiceManager().get().advanceWatermark(mark);
+InternalTimeServiceManager timeServiceManager = 
getTimeServiceManager().get();
+timeServiceManager.advanceWatermark(watermark);
+
+while 

[flink] branch master updated: [FLINK-27733][python] Rework on_timer output behind watermark bug fix

2022-05-23 Thread hxb
This is an automated email from the ASF dual-hosted git repository.

hxb pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new a0ef9eb46ad [FLINK-27733][python] Rework on_timer output behind 
watermark bug fix
a0ef9eb46ad is described below

commit a0ef9eb46ad3896d6d87595dbe364f69d583794c
Author: Juntao Hu 
AuthorDate: Sun May 22 23:16:12 2022 +0800

[FLINK-27733][python] Rework on_timer output behind watermark bug fix

This closes #19788.
---
 .../python/AbstractPythonFunctionOperator.java | 28 +-
 .../python/PythonKeyedCoProcessOperator.java   | 33 --
 .../python/PythonKeyedProcessOperator.java | 33 --
 .../operators/python/timer/TimerRegistration.java  | 16 ---
 .../api/operators/python/timer/TimerUtils.java | 30 
 ...thonStreamGroupWindowAggregateOperatorTest.java |  2 --
 ...onGroupWindowAggregateFunctionOperatorTest.java | 19 +++--
 ...ArrowPythonRowTimeBoundedRangeOperatorTest.java |  6 ++--
 ...mArrowPythonRowTimeBoundedRowsOperatorTest.java |  6 ++--
 9 files changed, 49 insertions(+), 124 deletions(-)

diff --git 
a/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
 
b/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
index f229ea7023c..5324df04f8a 100644
--- 
a/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
+++ 
b/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
@@ -25,6 +25,7 @@ import org.apache.flink.python.metric.FlinkMetricContainer;
 import org.apache.flink.runtime.state.KeyedStateBackend;
 import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
 import org.apache.flink.streaming.api.operators.ChainingStrategy;
+import org.apache.flink.streaming.api.operators.InternalTimeServiceManager;
 import 
org.apache.flink.streaming.api.operators.sorted.state.BatchExecutionInternalTimeServiceManager;
 import 
org.apache.flink.streaming.api.operators.sorted.state.BatchExecutionKeyedStateBackend;
 import org.apache.flink.streaming.api.watermark.Watermark;
@@ -180,14 +181,18 @@ public abstract class AbstractPythonFunctionOperator 
extends AbstractStream
 // Approach 1) is the easiest and gives better latency, yet 2)
 // gives better throughput due to the bundle not getting cut on
 // every watermark. So we have implemented 2) below.
+
+// advance the watermark and do not emit watermark to downstream 
operators
+if (getTimeServiceManager().isPresent()) {
+getTimeServiceManager().get().advanceWatermark(mark);
+}
+
 if (mark.getTimestamp() == Long.MAX_VALUE) {
 invokeFinishBundle();
 processElementsOfCurrentKeyIfNeeded(null);
-preEmitWatermark(mark);
+advanceWatermark(mark);
 output.emitWatermark(mark);
 } else if (isBundleFinished()) {
-// forward the watermark immediately if the bundle is already 
finished.
-preEmitWatermark(mark);
 output.emitWatermark(mark);
 } else {
 // It is not safe to advance the output watermark yet, so add a 
hold on the current
@@ -195,8 +200,8 @@ public abstract class AbstractPythonFunctionOperator 
extends AbstractStream
 bundleFinishedCallback =
 () -> {
 try {
+advanceWatermark(mark);
 // at this point the bundle is finished, allow the 
watermark to pass
-preEmitWatermark(mark);
 output.emitWatermark(mark);
 } catch (Exception e) {
 throw new RuntimeException(
@@ -263,10 +268,19 @@ public abstract class AbstractPythonFunctionOperator 
extends AbstractStream
 
 protected abstract PythonEnvironmentManager 
createPythonEnvironmentManager();
 
-/** Called before emitting watermark to downstream. */
-protected void preEmitWatermark(Watermark mark) throws Exception {
+/**
+ * Advances the watermark of all managed timer services, potentially 
firing event time timers.
+ * It also ensures that the fired timers are processed in the Python 
user-defined functions.
+ */
+private void advanceWatermark(Watermark watermark) throws Exception {
 if (getTimeServiceManager().isPresent()) {
-getTimeServiceManager().get().advanceWatermark(mark);
+InternalTimeServiceManager timeServiceManager = 
getTimeServiceManager().get();
+timeServiceManager.advanceWatermark(watermark);
+
+while (!isBundleFinished()) 

[flink] branch master updated: Revert "[FLINK-25188][python][build] Support m1 chip."

2022-05-23 Thread hxb
This is an automated email from the ASF dual-hosted git repository.

hxb pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 99c74d5b430 Revert "[FLINK-25188][python][build] Support m1 chip."
99c74d5b430 is described below

commit 99c74d5b4301436fbaf597dc24ff852428243a8d
Author: huangxingbo 
AuthorDate: Tue May 24 09:57:40 2022 +0800

Revert "[FLINK-25188][python][build] Support m1 chip."

This reverts commit 7e9be789
---
 NOTICE |   2 +-
 flink-python/dev/dev-requirements.txt  |  13 ++--
 flink-python/dev/lint-python.sh|   6 --
 flink-python/lib/cloudpickle-1.2.2-src.zip | Bin 0 -> 23105 bytes
 flink-python/lib/cloudpickle-2.0.0-src.zip | Bin 24548 -> 0 bytes
 flink-python/pom.xml   |  19 ++
 .../pyflink/fn_execution/beam/beam_operations.py   |   8 +--
 .../pyflink/table/tests/test_dependency.py |   2 +-
 flink-python/setup.py  |  10 +--
 .../fnexecution/state/GrpcStateService.java|   6 +-
 .../io/grpc/internal/SharedResourceHolder.java |   4 +-
 .../beam/BeamDataStreamPythonFunctionRunner.java   |   4 +-
 .../python/beam/BeamPythonFunctionRunner.java  |   8 +--
 .../python/beam/state/BeamBagStateHandler.java |   2 +-
 .../python/beam/state/BeamMapStateHandler.java |   2 +-
 .../python/beam/state/BeamStateRequestHandler.java |   4 +-
 .../flink/streaming/api/utils/ProtoUtils.java  |   2 +-
 .../python/beam/BeamTablePythonFunctionRunner.java |   2 +-
 flink-python/src/main/resources/META-INF/NOTICE|  75 ++---
 .../PassThroughPythonAggregateFunctionRunner.java  |   2 +-
 .../PassThroughPythonScalarFunctionRunner.java |   2 +-
 .../PassThroughPythonTableFunctionRunner.java  |   2 +-
 ...ThroughStreamAggregatePythonFunctionRunner.java |   2 +-
 ...amGroupWindowAggregatePythonFunctionRunner.java |   2 +-
 ...ghStreamTableAggregatePythonFunctionRunner.java |   2 +-
 pom.xml|   2 +-
 tools/releasing/NOTICE-binary_PREAMBLE.txt |   2 +-
 27 files changed, 83 insertions(+), 102 deletions(-)

diff --git a/NOTICE b/NOTICE
index 98237e26c68..759a9f65adb 100644
--- a/NOTICE
+++ b/NOTICE
@@ -17,7 +17,7 @@ See bundled license files for details.
 This project bundles the following dependencies under the BSD license.
 See bundled license files for details.
 
-- cloudpickle:2.0.0
+- cloudpickle:1.2.2
 - net.sf.py4j:py4j:0.10.9.3
 
 This project bundles the following dependencies under SIL OFL 1.1 license 
(https://opensource.org/licenses/OFL-1.1).
diff --git a/flink-python/dev/dev-requirements.txt 
b/flink-python/dev/dev-requirements.txt
index e5bad6014cc..b061a67a20d 100755
--- a/flink-python/dev/dev-requirements.txt
+++ b/flink-python/dev/dev-requirements.txt
@@ -14,18 +14,17 @@
 # limitations under the License.
 setuptools>=18.0
 wheel
-apache-beam==2.38.0
+apache-beam==2.27.0
 cython==0.29.24
 py4j==0.10.9.3
 python-dateutil==2.8.0
-cloudpickle==2.0.0
+cloudpickle==1.2.2
 avro-python3>=1.8.1,!=1.9.2,<1.10.0
-pandas>=1.3.0
-pyarrow>=5.0.0
+pandas>=1.0,<1.2.0
+pyarrow>=0.15.1,<3.0.0
 pytz>=2018.3
-numpy>=1.21.4
+numpy>=1.14.3,<1.20
 fastavro>=0.21.4,<0.24
 grpcio>=1.29.0,<2
 grpcio-tools>=1.3.5,<=1.14.2
-pemja==0.1.5; python_version >= '3.7'
-httplib2>=0.8,<0.19.0
+pemja==0.1.4; python_version >= '3.7'
diff --git a/flink-python/dev/lint-python.sh b/flink-python/dev/lint-python.sh
index 630a0505cf4..09d986fb7f7 100755
--- a/flink-python/dev/lint-python.sh
+++ b/flink-python/dev/lint-python.sh
@@ -209,12 +209,6 @@ function install_miniconda() {
 if [ ! -d "$CURRENT_DIR/.conda" ]; then
 print_function "STEP" "installing conda..."
 $CONDA_INSTALL_SH -b -p $CURRENT_DIR/.conda 2>&1 >/dev/null
-
-# orjson depend on pip >= 20.3
-print_function "STEP" "upgrade pip..."
-$CURRENT_DIR/.conda/bin/python -m pip install --upgrade pip 2>&1 
>/dev/null
-print_function "STEP" "upgrade pip... [SUCCESS]"
-
 if [ $? -ne 0 ]; then
 echo "install miniconda failed"
 exit $CONDA_INSTALL_STATUS
diff --git a/flink-python/lib/cloudpickle-1.2.2-src.zip 
b/flink-python/lib/cloudpickle-1.2.2-src.zip
new file mode 100644
index 000..4d73a881f5e
Binary files /dev/null and b/flink-python/lib/cloudpickle-1.2.2-src.zip differ
diff --git a/flink-python/lib/cloudpickle-2.0.0-src.zip 
b/flink-python/lib/cloudpickle-2.0.0-src.zip
deleted file mode 100644
index ed416829018..000
Binary files a/flink-python/lib/cloudpickle-2.0.0-src.zip and /dev/null differ
diff --git a/flink-python/pom.xml b/flink-python/pom.xml
index 3e62acf94c6..2783ecd70da 100644
--- a/flink-python/pom.xml
+++ b/flink-python/pom.xml
@@ -34,7 +34,7 @@ under the License.
jar
 

- 

[flink-kubernetes-operator] branch main updated: [hotfix] Make image tag in helm chart values as an explicit string

2022-05-23 Thread gyfora
This is an automated email from the ASF dual-hosted git repository.

gyfora pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-kubernetes-operator.git


The following commit(s) were added to refs/heads/main by this push:
 new 1d1cf5a  [hotfix] Make image tag in helm chart values as an explicit 
string
1d1cf5a is described below

commit 1d1cf5a440c23439e8c450340727ec9892cc165f
Author: wangyang0918 
AuthorDate: Mon May 23 17:23:49 2022 +0800

[hotfix] Make image tag in helm chart values as an explicit string
---
 tools/releasing/create_source_release.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tools/releasing/create_source_release.sh 
b/tools/releasing/create_source_release.sh
index 7d306c1..c638e32 100755
--- a/tools/releasing/create_source_release.sh
+++ b/tools/releasing/create_source_release.sh
@@ -100,7 +100,7 @@ cd ${CLONE_DIR}
 
 # TODO: We might want to be more specific here later on what to replace
 perl -pi -e "s#^  repository: .*#  repository: 
ghcr.io/apache/flink-kubernetes-operator#" 
flink-kubernetes-operator-${RELEASE_VERSION}/helm/flink-kubernetes-operator/values.yaml
-perl -pi -e "s#^  tag: .*#  tag: ${commit_hash}#" 
flink-kubernetes-operator-${RELEASE_VERSION}/helm/flink-kubernetes-operator/values.yaml
+perl -pi -e "s#^  tag: .*#  tag: \"${commit_hash}\"#" 
flink-kubernetes-operator-${RELEASE_VERSION}/helm/flink-kubernetes-operator/values.yaml
 
 helm package --app-version ${RELEASE_VERSION} --version ${RELEASE_VERSION} 
--destination ${RELEASE_DIR} 
flink-kubernetes-operator-${RELEASE_VERSION}/helm/flink-kubernetes-operator
 mv ${RELEASE_DIR}/flink-kubernetes-operator-${RELEASE_VERSION}.tgz 
${RELEASE_DIR}/flink-kubernetes-operator-${RELEASE_VERSION}-helm.tgz



[flink] branch master updated: [FLINK-25188][python][build] Support m1 chip.

2022-05-23 Thread hxb
This is an automated email from the ASF dual-hosted git repository.

hxb pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 7e9be78974f [FLINK-25188][python][build] Support m1 chip.
7e9be78974f is described below

commit 7e9be78974f95e4eb3c8bb442564c81ea61c563e
Author: Ada Wong 
AuthorDate: Tue Jan 18 16:06:11 2022 +0800

[FLINK-25188][python][build] Support m1 chip.

This closes #18769.
---
 NOTICE |   2 +-
 flink-python/dev/dev-requirements.txt  |  13 ++--
 flink-python/dev/lint-python.sh|   6 ++
 flink-python/lib/cloudpickle-1.2.2-src.zip | Bin 23105 -> 0 bytes
 flink-python/lib/cloudpickle-2.0.0-src.zip | Bin 0 -> 24548 bytes
 flink-python/pom.xml   |  19 --
 .../pyflink/fn_execution/beam/beam_operations.py   |   8 ++-
 .../pyflink/table/tests/test_dependency.py |   2 +-
 flink-python/setup.py  |  10 +--
 .../fnexecution/state/GrpcStateService.java|   6 +-
 .../io/grpc/internal/SharedResourceHolder.java |   4 +-
 .../beam/BeamDataStreamPythonFunctionRunner.java   |   4 +-
 .../python/beam/BeamPythonFunctionRunner.java  |   8 +--
 .../python/beam/state/BeamBagStateHandler.java |   2 +-
 .../python/beam/state/BeamMapStateHandler.java |   2 +-
 .../python/beam/state/BeamStateRequestHandler.java |   4 +-
 .../flink/streaming/api/utils/ProtoUtils.java  |   2 +-
 .../python/beam/BeamTablePythonFunctionRunner.java |   2 +-
 flink-python/src/main/resources/META-INF/NOTICE|  75 +++--
 .../PassThroughPythonAggregateFunctionRunner.java  |   2 +-
 .../PassThroughPythonScalarFunctionRunner.java |   2 +-
 .../PassThroughPythonTableFunctionRunner.java  |   2 +-
 ...ThroughStreamAggregatePythonFunctionRunner.java |   2 +-
 ...amGroupWindowAggregatePythonFunctionRunner.java |   2 +-
 ...ghStreamTableAggregatePythonFunctionRunner.java |   2 +-
 pom.xml|   2 +-
 tools/releasing/NOTICE-binary_PREAMBLE.txt |   2 +-
 27 files changed, 102 insertions(+), 83 deletions(-)

diff --git a/NOTICE b/NOTICE
index 759a9f65adb..98237e26c68 100644
--- a/NOTICE
+++ b/NOTICE
@@ -17,7 +17,7 @@ See bundled license files for details.
 This project bundles the following dependencies under the BSD license.
 See bundled license files for details.
 
-- cloudpickle:1.2.2
+- cloudpickle:2.0.0
 - net.sf.py4j:py4j:0.10.9.3
 
 This project bundles the following dependencies under SIL OFL 1.1 license 
(https://opensource.org/licenses/OFL-1.1).
diff --git a/flink-python/dev/dev-requirements.txt 
b/flink-python/dev/dev-requirements.txt
index b061a67a20d..e5bad6014cc 100755
--- a/flink-python/dev/dev-requirements.txt
+++ b/flink-python/dev/dev-requirements.txt
@@ -14,17 +14,18 @@
 # limitations under the License.
 setuptools>=18.0
 wheel
-apache-beam==2.27.0
+apache-beam==2.38.0
 cython==0.29.24
 py4j==0.10.9.3
 python-dateutil==2.8.0
-cloudpickle==1.2.2
+cloudpickle==2.0.0
 avro-python3>=1.8.1,!=1.9.2,<1.10.0
-pandas>=1.0,<1.2.0
-pyarrow>=0.15.1,<3.0.0
+pandas>=1.3.0
+pyarrow>=5.0.0
 pytz>=2018.3
-numpy>=1.14.3,<1.20
+numpy>=1.21.4
 fastavro>=0.21.4,<0.24
 grpcio>=1.29.0,<2
 grpcio-tools>=1.3.5,<=1.14.2
-pemja==0.1.4; python_version >= '3.7'
+pemja==0.1.5; python_version >= '3.7'
+httplib2>=0.8,<0.19.0
diff --git a/flink-python/dev/lint-python.sh b/flink-python/dev/lint-python.sh
index 09d986fb7f7..630a0505cf4 100755
--- a/flink-python/dev/lint-python.sh
+++ b/flink-python/dev/lint-python.sh
@@ -209,6 +209,12 @@ function install_miniconda() {
 if [ ! -d "$CURRENT_DIR/.conda" ]; then
 print_function "STEP" "installing conda..."
 $CONDA_INSTALL_SH -b -p $CURRENT_DIR/.conda 2>&1 >/dev/null
+
+# orjson depend on pip >= 20.3
+print_function "STEP" "upgrade pip..."
+$CURRENT_DIR/.conda/bin/python -m pip install --upgrade pip 2>&1 
>/dev/null
+print_function "STEP" "upgrade pip... [SUCCESS]"
+
 if [ $? -ne 0 ]; then
 echo "install miniconda failed"
 exit $CONDA_INSTALL_STATUS
diff --git a/flink-python/lib/cloudpickle-1.2.2-src.zip 
b/flink-python/lib/cloudpickle-1.2.2-src.zip
deleted file mode 100644
index 4d73a881f5e..000
Binary files a/flink-python/lib/cloudpickle-1.2.2-src.zip and /dev/null differ
diff --git a/flink-python/lib/cloudpickle-2.0.0-src.zip 
b/flink-python/lib/cloudpickle-2.0.0-src.zip
new file mode 100644
index 000..ed416829018
Binary files /dev/null and b/flink-python/lib/cloudpickle-2.0.0-src.zip differ
diff --git a/flink-python/pom.xml b/flink-python/pom.xml
index 2783ecd70da..3e62acf94c6 100644
--- a/flink-python/pom.xml
+++ b/flink-python/pom.xml
@@ -34,7 +34,7 @@ under the License.
jar
 

-   0.16.0
+   

[flink] branch release-1.15 updated (73590f432d8 -> 9a4ca4c4bfd)

2022-05-23 Thread leonard
This is an automated email from the ASF dual-hosted git repository.

leonard pushed a change to branch release-1.15
in repository https://gitbox.apache.org/repos/asf/flink.git


from 73590f432d8 [hotfix][python][connector/pulsar] Improve 
PulsarDeserializationSchema.flink_type_info set execution_config default to None
 add 9a4ca4c4bfd [FLINK-24735][sql-client] Catch Throwable rather than 
Exception in LocalExecutor to avoid client crash

No new revisions were added by this update.

Summary of changes:
 .../flink/table/client/gateway/local/LocalExecutor.java  | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)



[flink] branch master updated: [hotfix][tests] Fix command not found error in azure watchdog script

2022-05-23 Thread martijnvisser
This is an automated email from the ASF dual-hosted git repository.

martijnvisser pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 9920140209a [hotfix][tests] Fix command not found error in azure 
watchdog script
9920140209a is described below

commit 9920140209a11bf0dae1c400cfaba395cc499a13
Author: Paul Lam 
AuthorDate: Sat Sep 18 16:39:27 2021 +0800

[hotfix][tests] Fix command not found error in azure watchdog script
---
 tools/azure-pipelines/uploading_watchdog.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tools/azure-pipelines/uploading_watchdog.sh 
b/tools/azure-pipelines/uploading_watchdog.sh
index c49f093200a..152f11581d0 100755
--- a/tools/azure-pipelines/uploading_watchdog.sh
+++ b/tools/azure-pipelines/uploading_watchdog.sh
@@ -57,7 +57,7 @@ function timeout_watchdog() {
   if [[ $secondsToKill -lt 0 ]]; then
 secondsToKill=0
   fi
-  sleep $(secondsToKill)
+  sleep ${secondsToKill}
   print_stacktraces | tee "$DEBUG_FILES_OUTPUT_DIR/jps-traces.1"
 
   echo "="



[flink-playgrounds] branch release-1.14 created (now e891d49)

2022-05-23 Thread danderson
This is an automated email from the ASF dual-hosted git repository.

danderson pushed a change to branch release-1.14
in repository https://gitbox.apache.org/repos/asf/flink-playgrounds.git


  at e891d49  [FLINK-27509] update table-walkthrough playground for Flink 
1.14

No new revisions were added by this update.



[flink-playgrounds] branch master updated (de4d2d7 -> e891d49)

2022-05-23 Thread danderson
This is an automated email from the ASF dual-hosted git repository.

danderson pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink-playgrounds.git


from de4d2d7  [FLINK-27507] update operations-walkthrough playground for 
Flink 1.14
 add e891d49  [FLINK-27509] update table-walkthrough playground for Flink 
1.14

No new revisions were added by this update.

Summary of changes:
 README.md  |  2 +-
 docker/data-generator/Dockerfile   |  2 +-
 table-walkthrough/Dockerfile   | 10 +-
 table-walkthrough/docker-compose.yml   |  6 +++---
 table-walkthrough/pom.xml  |  8 +---
 .../org/apache/flink/playgrounds/spendreport/SpendReport.java  |  1 +
 6 files changed, 12 insertions(+), 17 deletions(-)



[flink] branch master updated (5ee8fb44029 -> 0ad2fa90df5)

2022-05-23 Thread chesnay
This is an automated email from the ASF dual-hosted git repository.

chesnay pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


from 5ee8fb44029 [hotfix][docs][connector/pulsar] Fix doc typo of 
setDeliveryGuarantee
 add 0ad2fa90df5 [FLINK-27732][tests] Migrate flink-examples-table to JUnit5

No new revisions were added by this update.

Summary of changes:
 .../java/basics/GettingStartedExampleITCase.java   | 15 +++---
 .../java/basics/StreamSQLExampleITCase.java| 16 +++
 .../java/basics/UpdatingTopCityExampleITCase.java  | 22 -
 .../java/basics/WordCountSQLExampleITCase.java | 16 +++
 .../functions/AdvancedFunctionsExampleITCase.java  | 56 ++
 .../scala/basics/GettingStartedExampleITCase.java  | 15 +++---
 .../scala/basics/StreamSQLExampleITCase.java   | 16 +++
 .../scala/basics/WordCountSQLExampleITCase.java| 16 +++
 .../examples/utils/ExampleOutputTestBase.java  | 22 -
 .../org.junit.jupiter.api.extension.Extension  |  0
 10 files changed, 81 insertions(+), 113 deletions(-)
 copy {flink-connectors/flink-connector-aws-base => 
flink-examples/flink-examples-table}/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension
 (100%)



svn commit: r54682 - /dev/flink/flink-kubernetes-operator-1.0.0-rc1/

2022-05-23 Thread wangyang0918
Author: wangyang0918
Date: Mon May 23 09:48:48 2022
New Revision: 54682

Log:
Apache Flink Kubernetes Operator, version 1.0.0, release candidate 1 (Fix image 
tag in helm chart values).

Modified:

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.asc

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.sha512

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz.asc

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz.sha512
dev/flink/flink-kubernetes-operator-1.0.0-rc1/index.yaml
dev/flink/flink-kubernetes-operator-1.0.0-rc1/index.yaml.asc
dev/flink/flink-kubernetes-operator-1.0.0-rc1/index.yaml.sha512

Modified: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz
==
Binary files - no diff available.

Modified: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.asc
==
--- 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.asc
 (original)
+++ 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.asc
 Mon May 23 09:48:48 2022
@@ -1,16 +1,16 @@
 -BEGIN PGP SIGNATURE-
 
-iQIzBAABCAAdFiEEL/KXe7v/3yg8b+fGowEAbzWR7iwFAmKLIlwACgkQowEAbzWR
-7izTtw/9HqCUxRAxZkDx7fCVLd+jQEx5IaPBSLCu8hhvktgMdVed7o207Fe5mqiT
-Wpq0LKUa7rgTvHUhDJuaR5wL7wt6YkRssAlFliM5J2tiPx2sqBmegu5j70wIZQ7Q
-wIeX2PsqfEheUOWXCp8tL2pkMsJvr9bf2qdbjbUKrB5reaCxSX6ULdkZCK9tt1AW
-zKrCpDnPr7saOsiWGYcw0fVgGtcF6Eqvt1rk8ex3l9fk/PVmNS+gksg06Ikyi+sI
-m3rAcgaHPaWU44kop57Z0aIuRKRx4O1oDk8imNlB9c6Iu2bP91XkeM0U31O8xxG3
-+gCGw2IjyRVVtu+1X+N+kyO8nrwSwDR7rmFhOGPD/vGTUBnHL40f4p6RIZEw6mRY
-wTBq/tYlE55fbamsi9dx1AY5QxcV1pOK2ZTTciXhqaSP50zVnpTWs1EleO9N6IYO
-MwMFsL9TV01xqmoiyLhmgxfs2lou5ZEKB/y7YtuVAgnaonz1+RiRJ5bIb1hAYULA
-stBnkaDV7ynbDHTNH9l/9kirYIIamNwRgoMiRGbsVYt0iFqefTNG1G8Rq3ibSWTk
-oML+/22IwASMDWu4XGsBo4bL0QwBmmZ7owrRwn/ItZhwK2mZ0yYQocIy5nz389hd
-8Nd12ambNvx5Q0OOMA1n1bytNF8aYCHl+VDWM1CtrRAn7fe0HFs=
-=Fr1T
+iQIzBAABCAAdFiEEL/KXe7v/3yg8b+fGowEAbzWR7iwFAmKLVg0ACgkQowEAbzWR
+7iz58g/7B6y5pzIH30FWoRhKIdacZ18iNAFZ57neI3DAXPoQDYCQ2g+VxMZu/jmm
+D+iYlAGbAtsvAHmDjup9DzeXrEPwMgy+ZCtwksU5g4YSjH3EpvdxGeOWQwuyxEv4
+Fy/vryQYzZMlKifP0NKP3a4VT3iAo+kOF4EoRqxKO2qfL/kKHD5Q5NE01bi5RrIE
+8DYyfnwfIcfTlyfSAfEdGdHLFKoXyA/HP5Uuir88zzExbWrtvy5E3Hr9a2IRWys5
+1JLGqTRWVaWSwGVLCKXgSe0chp4OgjzpsRrY5RAb93ZMcOyQaMMbfUa0WtS0OGoz
+/xzqG3GAz3B2Y71XNbyFB7IBLo409y82MPhlkQO/atfD0rFxx6vkB9Hk3vbLN8r/
+BWNTX76X9jcDB7Z0fWmxDAd7k/l4I5pq90yQs9rXsVlWozFKCdik1bbF/KP3sPhJ
+l5Vm+4Re7BL55qRS/Osing7oRWs/+0DUznbCuOeCfQy+3KvNdi24KCyx4rrJctTK
+U+QDoU02xni2hd0U3UHz7PeS0hvOE6lCP1ED7TKVmlWg6+tOr374hi2xzXjzDU+G
+f2YbXpTpWaFBpnUCz7fcuHg8Bmdi9sApL/bFnG7slI9zcJKlzfvfn63PxOvxWUrL
+CwbYkX9djT5/pnmuEKIlW8UYiZ/eMYZF49qTUcuCz+NGKGWeUMM=
+=etIB
 -END PGP SIGNATURE-

Modified: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.sha512
==
--- 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.sha512
 (original)
+++ 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.sha512
 Mon May 23 09:48:48 2022
@@ -1 +1 @@
-4883378930b780ed0f37b347215a4ec71ba46f15a3dd050f06914db2d264aa4d216ca84522c7aba4e195b38d8f4c11c8df01075df12c3c6c5fe7c27dd02ee29e
  flink-kubernetes-operator-1.0.0-helm.tgz
+5de754d63cec5945d0b2a428135a893dd866a0416f6b947ea4fee78c69b48e7501e5571c452af5e29f58a6989d4dad65c02aefb9bd5cac050f638eec3791019a
  flink-kubernetes-operator-1.0.0-helm.tgz

Modified: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz
==
Binary files - no diff available.

Modified: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz.asc
==
--- 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz.asc
 (original)
+++ 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz.asc
 Mon May 23 09:48:48 2022
@@ -1,16 +1,16 @@
 -BEGIN PGP SIGNATURE-
 
-iQIzBAABCAAdFiEEL/KXe7v/3yg8b+fGowEAbzWR7iwFAmKLIlwACgkQowEAbzWR
-7iwHYw/+JPk95BmXcDmmoHICr+kIEfmAI9QNGNkuRuPqk/n0t5Up2CJVgA2jaDUq
-bP70nAaowattC9DHgXmPr3RUSB/OlAs3EkZYPaIevP14m/t7621kY+8zcRga9FWV
-ZyOnRLgQZS8kFhpDAWptRwVRZPCYqXT93z3ggOtmmwFOe9jxGpiLR2LTs4VOF37O

[flink] branch master updated (6200128038e -> 5ee8fb44029)

2022-05-23 Thread martijnvisser
This is an automated email from the ASF dual-hosted git repository.

martijnvisser pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


from 6200128038e [hotfix][docs] Update java doc to use DataStream API 
instead of deprecated DataSet API
 add 5ee8fb44029 [hotfix][docs][connector/pulsar] Fix doc typo of 
setDeliveryGuarantee

No new revisions were added by this update.

Summary of changes:
 docs/content.zh/docs/connectors/datastream/pulsar.md | 2 +-
 docs/content/docs/connectors/datastream/pulsar.md| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)



[flink-playgrounds] branch dependabot/maven/table-walkthrough/junit-junit-4.13.1 created (now 76577c8)

2022-05-23 Thread github-bot
This is an automated email from the ASF dual-hosted git repository.

github-bot pushed a change to branch 
dependabot/maven/table-walkthrough/junit-junit-4.13.1
in repository https://gitbox.apache.org/repos/asf/flink-playgrounds.git


  at 76577c8  Bump junit from 4.12 to 4.13.1 in /table-walkthrough

No new revisions were added by this update.



[flink-playgrounds] branch master updated: [FLINK-27507] update operations-walkthrough playground for Flink 1.14

2022-05-23 Thread danderson
This is an automated email from the ASF dual-hosted git repository.

danderson pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink-playgrounds.git


The following commit(s) were added to refs/heads/master by this push:
 new de4d2d7  [FLINK-27507] update operations-walkthrough playground for 
Flink 1.14
de4d2d7 is described below

commit de4d2d7b53be22d2ab1c30d6dd10f3c3f3fcba01
Author: Shubham Bansal 
AuthorDate: Mon May 23 02:04:41 2022 -0700

[FLINK-27507] update operations-walkthrough playground for Flink 1.14

* [FLINK-27507] update operations-walkthrough playground for Flink 1.14

* [FLINK-27507] removed docker socket mount
---
 README.md  |  2 +-
 docker/ops-playground-image/Dockerfile |  4 +--
 .../java/flink-playground-clickcountjob/pom.xml|  4 +--
 .../ops/clickcount/ClickEventCount.java| 23 +++--
 .../ClickEventStatisticsSerializationSchema.java   | 29 ++
 operations-playground/README.md|  2 +-
 operations-playground/docker-compose.yaml  | 10 
 table-walkthrough/docker-compose.yml   |  2 --
 8 files changed, 34 insertions(+), 42 deletions(-)

diff --git a/README.md b/README.md
index f825a08..84937d4 100644
--- a/README.md
+++ b/README.md
@@ -11,7 +11,7 @@ Currently, the following playgrounds are available:
 
 * The **Flink Operations Playground** (in the `operations-playground` folder) 
lets you explore and play with Flink's features to manage and operate stream 
processing jobs. You can witness how Flink recovers a job from a failure, 
upgrade and rescale a job, and query job metrics. The playground consists of a 
Flink cluster, a Kafka cluster and an example 
 Flink job. The playground is presented in detail in
-["Flink Operations 
Playground"](https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/try-flink/flink-operations-playground),
 which is part of the _Try Flink_ section of the Flink documentation.
+["Flink Operations 
Playground"](https://ci.apache.org/projects/flink/flink-docs-release-1.14/docs/try-flink/flink-operations-playground),
 which is part of the _Try Flink_ section of the Flink documentation.
 
 * The **Table Walkthrough** (in the `table-walkthrough` folder) shows to use 
the Table API to build an analytics pipeline that reads streaming data from 
Kafka and writes results to MySQL, along with a real-time dashboard in Grafana. 
The walkthrough is presented in detail in ["Real Time Reporting with the Table 
API"](https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/try-flink/table_api),
 which is part of the _Try Flink_ section of the Flink documentation.
 
diff --git a/docker/ops-playground-image/Dockerfile 
b/docker/ops-playground-image/Dockerfile
index e7c89aa..3673167 100644
--- a/docker/ops-playground-image/Dockerfile
+++ b/docker/ops-playground-image/Dockerfile
@@ -20,7 +20,7 @@
 # Build Click Count Job
 ###
 
-FROM maven:3.6-jdk-8-slim AS builder
+FROM maven:3.8-jdk-8-slim AS builder
 
 # Get Click Count job and compile it
 COPY ./java/flink-playground-clickcountjob /opt/flink-playground-clickcountjob
@@ -32,7 +32,7 @@ RUN mvn clean install
 # Build Operations Playground Image
 ###
 
-FROM apache/flink:1.13.1-scala_2.12-java8
+FROM apache/flink:1.14.4-scala_2.12-java8
 
 WORKDIR /opt/flink/bin
 
diff --git 
a/docker/ops-playground-image/java/flink-playground-clickcountjob/pom.xml 
b/docker/ops-playground-image/java/flink-playground-clickcountjob/pom.xml
index a2e99ee..b62376d 100644
--- a/docker/ops-playground-image/java/flink-playground-clickcountjob/pom.xml
+++ b/docker/ops-playground-image/java/flink-playground-clickcountjob/pom.xml
@@ -22,7 +22,7 @@ under the License.
 
org.apache.flink
flink-playground-clickcountjob
-   1-FLINK-1.13_2.12
+   1-FLINK-1.14_2.12
 
flink-playground-clickcountjob
jar
@@ -44,7 +44,7 @@ under the License.
 
 

UTF-8
-   1.13.1
+   1.14.4
1.8
2.12
${java.version}
diff --git 
a/docker/ops-playground-image/java/flink-playground-clickcountjob/src/main/java/org/apache/flink/playgrounds/ops/clickcount/ClickEventCount.java
 
b/docker/ops-playground-image/java/flink-playground-clickcountjob/src/main/java/org/apache/flink/playgrounds/ops/clickcount/ClickEventCount.java
index 489fd19..359ef2e 100644
--- 
a/docker/ops-playground-image/java/flink-playground-clickcountjob/src/main/java/org/apache/flink/playgrounds/ops/clickcount/ClickEventCount.java
+++ 
b/docker/ops-playground-image/java/flink-playground-clickcountjob/src/main/java/org/apache/flink/playgrounds/ops/clickcount/ClickEventCount.java
@@ -19,6 +19,9 @@ package 

[flink] branch master updated: [hotfix][docs] Update java doc to use DataStream API instead of deprecated DataSet API

2022-05-23 Thread leonard
This is an automated email from the ASF dual-hosted git repository.

leonard pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 6200128038e [hotfix][docs] Update java doc to use DataStream API 
instead of deprecated DataSet API
6200128038e is described below

commit 6200128038e2184889a5cd0750bd534c16be2fba
Author: Jing Ge 
AuthorDate: Mon May 23 10:12:09 2022 +0200

[hotfix][docs] Update java doc to use DataStream API instead of deprecated 
DataSet API

This closes #19784.
---
 .../api/common/functions/CoGroupFunction.java  | 32 --
 1 file changed, 18 insertions(+), 14 deletions(-)

diff --git 
a/flink-core/src/main/java/org/apache/flink/api/common/functions/CoGroupFunction.java
 
b/flink-core/src/main/java/org/apache/flink/api/common/functions/CoGroupFunction.java
index bd863888264..94989405197 100644
--- 
a/flink-core/src/main/java/org/apache/flink/api/common/functions/CoGroupFunction.java
+++ 
b/flink-core/src/main/java/org/apache/flink/api/common/functions/CoGroupFunction.java
@@ -24,28 +24,32 @@ import org.apache.flink.util.Collector;
 import java.io.Serializable;
 
 /**
- * The interface for CoGroup functions. CoGroup functions combine two data 
sets by first grouping
- * each data set after a key and then "joining" the groups by calling this 
function with the two
- * sets for each key. If a key is present in only one of the two inputs, it 
may be that one of the
- * groups is empty.
+ * The interface for CoGroup functions. CoGroup functions combine two {@code 
DataStream}s by first
+ * grouping each data stream after a key and then "joining" the groups by 
calling this function with
+ * the two streams for each key. If a key is present in only one of the two 
inputs, it may be that
+ * one of the groups is empty.
  *
- * The basic syntax for using CoGroup on two data sets is as follows:
+ * The basic syntax for using CoGroup on two data streams is as follows:
  *
  * {@code
- * DataSet set1 = ...;
- * DataSet set2 = ...;
+ * DataStream stream1 = ...;
+ * DataStream stream2 = ...;
  *
- * 
set1.coGroup(set2).where().equalTo().with(new 
MyCoGroupFunction());
+ * stream1.coGroup(stream2)
+ *.where()
+ *.equalTo()
+ *.window()
+ *.apply(new MyCoGroupFunction());
  * }
  *
- * {@code set1} is here considered the first input, {@code set2} the second 
input.
+ * {@code stream1} is here considered the first input, {@code stream2} the 
second input.
  *
- * Some keys may only be contained in one of the two original data sets. In 
that case, the
- * CoGroup function is invoked with in empty input for the side of the data 
set that did not contain
- * elements with that specific key.
+ * Some keys may only be contained in one of the two original data streams. 
In that case, the
+ * CoGroup function is invoked with in empty input for the side of the data 
stream that did not
+ * contain elements with that specific key.
  *
- * @param  The data type of the first input data set.
- * @param  The data type of the second input data set.
+ * @param  The data type of the first input data stream.
+ * @param  The data type of the second input data stream.
  * @param  The data type of the returned elements.
  */
 @Public



[flink-web] branch asf-site updated: Rebuild website

2022-05-23 Thread martijnvisser
This is an automated email from the ASF dual-hosted git repository.

martijnvisser pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/flink-web.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new ffa580a1b Rebuild website
ffa580a1b is described below

commit ffa580a1b5e893e3360ec7a06c445a210b77e503
Author: MartijnVisser 
AuthorDate: Mon May 23 10:06:52 2022 +0200

Rebuild website
---
 content/2022/05/23/latency-part2.html  | 356 
 content/blog/feed.xml  | 448 -
 content/blog/index.html|  36 +-
 content/blog/page10/index.html |  36 +-
 content/blog/page11/index.html |  36 +-
 content/blog/page12/index.html |  38 +-
 content/blog/page13/index.html |  38 +-
 content/blog/page14/index.html |  38 +-
 content/blog/page15/index.html |  40 +-
 content/blog/page16/index.html |  40 +-
 content/blog/page17/index.html |  40 +-
 content/blog/page18/index.html |  40 +-
 content/blog/page19/index.html |  25 ++
 content/blog/page2/index.html  |  36 +-
 content/blog/page3/index.html  |  38 +-
 content/blog/page4/index.html  |  40 +-
 content/blog/page5/index.html  |  38 +-
 content/blog/page6/index.html  |  38 +-
 content/blog/page7/index.html  |  41 +-
 content/blog/page8/index.html  |  39 +-
 content/blog/page9/index.html  |  36 +-
 .../img/blog/2022-05-23-latency-part2/async-io.png | Bin 0 -> 110500 bytes
 .../enriching-with-async-io.png| Bin 0 -> 246599 bytes
 .../blog/2022-05-23-latency-part2/spread-work.png  | Bin 0 -> 153305 bytes
 content/index.html |   6 +-
 content/zh/index.html  |   6 +-
 26 files changed, 908 insertions(+), 621 deletions(-)

diff --git a/content/2022/05/23/latency-part2.html 
b/content/2022/05/23/latency-part2.html
new file mode 100644
index 0..b417caeb1
--- /dev/null
+++ b/content/2022/05/23/latency-part2.html
@@ -0,0 +1,356 @@
+
+
+  
+
+
+
+
+Apache Flink: Getting into Low-Latency Gears with Apache Flink - 
Part Two
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  var _paq = window._paq = window._paq || [];
+  /* tracker methods like "setCustomDimension" should be called before 
"trackPageView" */
+  /* We explicitly disable cookie tracking to avoid privacy issues */
+  _paq.push(['disableCookies']);
+  /* Measure a visit to flink.apache.org and nightlies.apache.org/flink as 
the same visit */
+  _paq.push(["setDomains", 
["*.flink.apache.org","*.nightlies.apache.org/flink"]]);
+  _paq.push(['trackPageView']);
+  _paq.push(['enableLinkTracking']);
+  (function() {
+var u="//matomo.privacy.apache.org/";
+_paq.push(['setTrackerUrl', u+'matomo.php']);
+_paq.push(['setSiteId', '1']);
+var d=document, g=d.createElement('script'), 
s=d.getElementsByTagName('script')[0];
+g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
+  })();
+
+
+  
+
+
+
+
+
+
+
+  
+ 
+
+
+
+
+
+
+  
+
+
+
+  
+  
+
+  
+
+  
+
+
+
+
+  
+
+
+
+
+
+
+
+What is Apache 
Flink?
+
+
+
+
+
+https://nightlies.apache.org/flink/flink-statefun-docs-stable/;>What is 
Stateful Functions?
+
+
+
+https://nightlies.apache.org/flink/flink-ml-docs-stable/;>What is Flink 
ML?
+
+
+Use Cases
+
+
+Powered By
+
+
+
+
+
+
+Downloads
+
+
+
+  Getting Started
+  
+https://nightlies.apache.org/flink/flink-docs-release-1.15//docs/try-flink/local_installation/;
 target="_blank">With Flink 
+https://nightlies.apache.org/flink/flink-statefun-docs-release-3.2/getting-started/project-setup.html;
 target="_blank">With Flink Stateful Functions 
+https://nightlies.apache.org/flink/flink-ml-docs-release-2.0/try-flink-ml/quick-start.html;
 target="_blank">With Flink ML 
+https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-release-0.1/try-flink-kubernetes-operator/quick-start.html;
 target="_blank">With Flink Kubernetes Operator 
+ 

[flink-web] branch asf-site updated: add low latency techniques blog post part2

2022-05-23 Thread martijnvisser
This is an automated email from the ASF dual-hosted git repository.

martijnvisser pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/flink-web.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new e11781f4d add low latency techniques blog post part2
e11781f4d is described below

commit e11781f4dd965d0561d1199ae4dd13e7f596afd4
Author: Jun Qin <11677043+qinjunje...@users.noreply.github.com>
AuthorDate: Tue May 17 20:13:28 2022 +0200

add low latency techniques blog post part2
---
 _posts/2022-05-23-latency-part2.md |  97 +
 img/blog/2022-05-23-latency-part2/async-io.png | Bin 0 -> 110500 bytes
 .../enriching-with-async-io.png| Bin 0 -> 246599 bytes
 img/blog/2022-05-23-latency-part2/spread-work.png  | Bin 0 -> 153305 bytes
 4 files changed, 97 insertions(+)

diff --git a/_posts/2022-05-23-latency-part2.md 
b/_posts/2022-05-23-latency-part2.md
new file mode 100644
index 0..2a96f1985
--- /dev/null
+++ b/_posts/2022-05-23-latency-part2.md
@@ -0,0 +1,97 @@
+---
+layout: post
+title: "Getting into Low-Latency Gears with Apache Flink - Part Two"
+date: 2022-05-23 00:00:00
+authors:
+- Jun Qin:
+  name: "Jun Qin"
+- Nico Kruber:
+  name: "Nico Kruber"
+excerpt: This multi-part series of blog post presents a collection of 
low-latency techniques in Flink. Following with part one, Part two continues  
with a few more techniques that optimize latency directly.
+---
+
+This series of blog posts present a collection of low-latency techniques in 
Flink. In [part one](https://flink.apache.org/2022/05/18/latency-part1.html), 
we discussed the types of latency in Flink and the way we measure end-to-end 
latency and presented a few techniques that optimize latency directly. In this 
post, we will continue with a few more direct latency optimization techniques. 
Just like in part one, for each optimization technique, we will clarify what it 
is, when to use it, and [...]
+
+
+# Direct latency optimization
+
+## Spread work across time
+
+When you use timers or do windowing in a job, timer or window firing may 
create load spikes due to heavy computation or state access. If the allocated 
resources cannot cope with these load spikes, timer or window firing will take 
a long time to finish. This often results in high latency.
+
+To avoid this situation, you should change your code to spread out the 
workload as much as possible such that you do not accumulate too much work to 
be done at a single point in time. In the case of windowing, you should 
consider using incremental window aggregation with `AggregateFunction` or 
`ReduceFunction`. In the case of timers in a `ProcessFunction`, the operations 
executed in the `onTimer()` method should be optimized such that the time spent 
there is reduced to a minimum. If you  [...]
+
+**You can apply this optimization** if you are using timer-based processing 
(e.g., timers, windowing) and an efficient aggregation can be applied whenever 
an event arrives instead of waiting for timers to fire.
+
+**Keep in mind** that when you spread work across time, you should consider 
not only computation but also state access, especially when using RocksDB. 
Spreading one type of work while accumulating the other may result in higher 
latencies.
+
+[WindowingJob](https://github.com/ververica/lab-flink-latency/blob/main/src/main/java/com/ververica/lablatency/job/WindowingJob.java)
 already does incremental window aggregation with `AggregateFunction`. To show 
the latency improvement of this technique, we compared 
[WindowingJob](https://github.com/ververica/lab-flink-latency/blob/main/src/main/java/com/ververica/lablatency/job/WindowingJob.java)
 with a variant that does not do incremental aggregation, 
[WindowingJobNoAggregation](https: [...]
+
+
+
+
+
+
+
+## Access external systems efficiently
+
+### Using async I/O
+
+When interacting with external systems (e.g., RDBMS, object stores, web 
services) in a Flink job for data enrichment, the latency in getting responses 
from external systems often dominates the overall latency of the job. With 
Flinkā€™s [Async I/O 
API](https://ci.apache.org/projects/flink/flink-docs-stable/dev/stream/operators/asyncio.html)
 (e.g., `AsyncDataStream.unorderedWait()` or `AsyncDataStream.orderedWait()`), 
a single parallel function instance can handle many requests concurrently  [...]
+
+
+
+
+
+**You can apply this optimization** if the client of your external system 
supports asynchronous requests. If it does not, you can use a thread pool of 
multiple clients to handle synchronous requests in parallel. You can also use a 
cache to speed up lookups if the data in the external system is not changing 
frequently. A cache, however, comes at the cost of working with outdated data.
+
+In this experiment, we simulated an external system that returns responses 
within 1 to 6 ms randomly, and we keep the external system response in a 

[flink] branch master updated: [FLINK-24735][sql-client] Catch Throwable rather than Exception in LocalExecutor to avoid client crash

2022-05-23 Thread leonard
This is an automated email from the ASF dual-hosted git repository.

leonard pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new cdbc3f61583 [FLINK-24735][sql-client] Catch Throwable rather than 
Exception in LocalExecutor to avoid client crash
cdbc3f61583 is described below

commit cdbc3f61583b339f508c54717a34bad16a00a681
Author: Shengkai <33114724+fsk...@users.noreply.github.com>
AuthorDate: Mon May 23 15:57:31 2022 +0800

[FLINK-24735][sql-client] Catch Throwable rather than Exception in 
LocalExecutor to avoid client crash

This closes #19773.
---
 .../flink/table/client/gateway/local/LocalExecutor.java  | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git 
a/flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
 
b/flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
index 3639ef8729e..1540dec8f65 100644
--- 
a/flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
+++ 
b/flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/LocalExecutor.java
@@ -170,8 +170,8 @@ public class LocalExecutor implements Executor {
 List operations;
 try {
 operations = context.wrapClassLoader(() -> 
parser.parse(statement));
-} catch (Exception e) {
-throw new SqlExecutionException("Failed to parse statement: " + 
statement, e);
+} catch (Throwable t) {
+throw new SqlExecutionException("Failed to parse statement: " + 
statement, t);
 }
 if (operations.isEmpty()) {
 throw new SqlExecutionException("Failed to parse statement: " + 
statement);
@@ -207,8 +207,8 @@ public class LocalExecutor implements Executor {
 (TableEnvironmentInternal) context.getTableEnvironment();
 try {
 return context.wrapClassLoader(() -> 
tEnv.executeInternal(operation));
-} catch (Exception e) {
-throw new SqlExecutionException(MESSAGE_SQL_EXECUTION_ERROR, e);
+} catch (Throwable t) {
+throw new SqlExecutionException(MESSAGE_SQL_EXECUTION_ERROR, t);
 }
 }
 
@@ -220,8 +220,8 @@ public class LocalExecutor implements Executor {
 (TableEnvironmentInternal) context.getTableEnvironment();
 try {
 return context.wrapClassLoader(() -> 
tEnv.executeInternal(operations));
-} catch (Exception e) {
-throw new SqlExecutionException(MESSAGE_SQL_EXECUTION_ERROR, e);
+} catch (Throwable t) {
+throw new SqlExecutionException(MESSAGE_SQL_EXECUTION_ERROR, t);
 }
 }
 
@@ -299,8 +299,8 @@ public class LocalExecutor implements Executor {
 try {
 // this operator will also stop flink job
 result.close();
-} catch (Exception e) {
-throw new SqlExecutionException("Could not cancel the query 
execution", e);
+} catch (Throwable t) {
+throw new SqlExecutionException("Could not cancel the query 
execution", t);
 }
 resultStore.removeResult(resultId);
 }



[flink-table-store] branch master updated: [FLINK-27705] Prevent num-sorted-run.compaction-trigger from interfering num-levels

2022-05-23 Thread lzljs3620320
This is an automated email from the ASF dual-hosted git repository.

lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink-table-store.git


The following commit(s) were added to refs/heads/master by this push:
 new a163dc8  [FLINK-27705] Prevent num-sorted-run.compaction-trigger from 
interfering num-levels
a163dc8 is described below

commit a163dc83c0c89fe62a7aecce0ead985710eec425
Author: Jane Chan <55568005+ladyfor...@users.noreply.github.com>
AuthorDate: Mon May 23 15:54:46 2022 +0800

[FLINK-27705] Prevent num-sorted-run.compaction-trigger from interfering 
num-levels

This closes #132
---
 .../store/connector/ForceCompactionITCase.java | 44 --
 .../flink/table/store/file/mergetree/Levels.java   | 10 -
 2 files changed, 49 insertions(+), 5 deletions(-)

diff --git 
a/flink-table-store-connector/src/test/java/org/apache/flink/table/store/connector/ForceCompactionITCase.java
 
b/flink-table-store-connector/src/test/java/org/apache/flink/table/store/connector/ForceCompactionITCase.java
index 0940e36..d0fb504 100644
--- 
a/flink-table-store-connector/src/test/java/org/apache/flink/table/store/connector/ForceCompactionITCase.java
+++ 
b/flink-table-store-connector/src/test/java/org/apache/flink/table/store/connector/ForceCompactionITCase.java
@@ -20,7 +20,7 @@ package org.apache.flink.table.store.connector;
 
 import org.junit.Test;
 
-import java.util.Collections;
+import java.util.Arrays;
 import java.util.List;
 
 import static org.assertj.core.api.Assertions.assertThat;
@@ -30,12 +30,17 @@ public class ForceCompactionITCase extends 
FileStoreTableITCase {
 
 @Override
 protected List ddl() {
-return Collections.singletonList(
+return Arrays.asList(
 "CREATE TABLE IF NOT EXISTS T (\n"
 + "  f0 INT\n, "
 + "  f1 STRING\n, "
 + "  f2 STRING\n"
-+ ") PARTITIONED BY (f1)");
++ ") PARTITIONED BY (f1)",
+"CREATE TABLE IF NOT EXISTS T1 (\n"
++ "  f0 INT\n, "
++ "  f1 STRING\n, "
++ "  f2 STRING\n"
++ ")");
 }
 
 @Test
@@ -74,4 +79,37 @@ public class ForceCompactionITCase extends 
FileStoreTableITCase {
 
 assertThat(batchSql("SELECT * FROM T")).hasSize(21);
 }
+
+@Test
+public void testNoDefaultNumOfLevels() throws Exception {
+bEnv.executeSql("ALTER TABLE T1 SET ('commit.force-compact' = 
'true')");
+bEnv.executeSql(
+"INSERT INTO T1 VALUES(1, 'Winter', 'Winter is 
Coming'),"
++ "(2, 'Winter', 'The First Snowflake'), "
++ "(2, 'Spring', 'The First Rose in Spring'), "
++ "(7, 'Summer', 'Summertime Sadness')")
+.await();
+bEnv.executeSql("INSERT INTO T1 VALUES(12, 'Winter', 'Last 
Christmas')").await();
+bEnv.executeSql("INSERT INTO T1 VALUES(11, 'Winter', 'Winter is 
Coming')").await();
+bEnv.executeSql("INSERT INTO T1 VALUES(10, 'Autumn', 
'Refrain')").await();
+bEnv.executeSql(
+"INSERT INTO T1 VALUES(6, 'Summer', 'Watermelon 
Sugar'), "
++ "(4, 'Spring', 'Spring Water')")
+.await();
+bEnv.executeSql(
+"INSERT INTO T1 VALUES(66, 'Summer', 'Summer Vibe'), "
++ "(9, 'Autumn', 'Wake Me Up When September 
Ends')")
+.await();
+bEnv.executeSql(
+"INSERT INTO T1 VALUES(666, 'Summer', 'Summer Vibe'), "
++ "(9, 'Autumn', 'Wake Me Up When September 
Ends')")
+.await();
+bEnv.executeSql("ALTER TABLE T1 SET 
('num-sorted-run.compaction-trigger' = '2')");
+bEnv.executeSql(
+"INSERT INTO T1 VALUES(666, 'Summer', 'Summer Vibe'), "
++ "(9, 'Autumn', 'Wake Me Up When September 
Ends')")
+.await();
+
+assertThat(batchSql("SELECT * FROM T1")).hasSize(15);
+}
 }
diff --git 
a/flink-table-store-core/src/main/java/org/apache/flink/table/store/file/mergetree/Levels.java
 
b/flink-table-store-core/src/main/java/org/apache/flink/table/store/file/mergetree/Levels.java
index 4169901..644a4ef 100644
--- 
a/flink-table-store-core/src/main/java/org/apache/flink/table/store/file/mergetree/Levels.java
+++ 
b/flink-table-store-core/src/main/java/org/apache/flink/table/store/file/mergetree/Levels.java
@@ -43,11 +43,17 @@ public class Levels {
 
 public Levels(Comparator keyComparator, List 
inputFiles, int numLevels) {
 this.keyComparator = keyComparator;
-checkArgument(numLevels > 1, "levels must be at least 2.");
+
+// 

[flink] branch master updated: [FLINK-27735][testinfrastructure] Update testcontainers dependency to v1.17.2

2022-05-23 Thread martijnvisser
This is an automated email from the ASF dual-hosted git repository.

martijnvisser pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new f026f396305 [FLINK-27735][testinfrastructure] Update testcontainers 
dependency to v1.17.2
f026f396305 is described below

commit f026f396305858c8be3a5aeacd1aa9dd3df02c87
Author: Sergey Nuyanzin 
AuthorDate: Sun May 22 15:19:06 2022 +0200

[FLINK-27735][testinfrastructure] Update testcontainers dependency to 
v1.17.2
---
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pom.xml b/pom.xml
index addd4ad8649..b7500689754 100644
--- a/pom.xml
+++ b/pom.xml
@@ -138,7 +138,7 @@ under the License.
2.27.0
3.17.3
3.14.9
-   1.16.2
+   1.17.2
1.8.0
false
validate



[flink] branch master updated: [FLINK-26043][runtime][security] Add periodic kerberos relogin to KerberosDelegationTokenManager [FLINK-27605][tests] Updated Mockito version to 3.4.6 in order to use st

2022-05-23 Thread dmvk
This is an automated email from the ASF dual-hosted git repository.

dmvk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git


The following commit(s) were added to refs/heads/master by this push:
 new 220ef999d2a [FLINK-26043][runtime][security] Add periodic kerberos 
relogin to KerberosDelegationTokenManager [FLINK-27605][tests] Updated Mockito 
version to 3.4.6 in order to use static method mocking
220ef999d2a is described below

commit 220ef999d2a353fd52cc0aa1a93c26d9b696c1ce
Author: gabor.g.somogyi 
AuthorDate: Mon Feb 14 14:23:17 2022 +0100

[FLINK-26043][runtime][security] Add periodic kerberos relogin to 
KerberosDelegationTokenManager
[FLINK-27605][tests] Updated Mockito version to 3.4.6 in order to use 
static method mocking
---
 .../generated/security_auth_kerberos_section.html  |   6 +
 .../generated/security_configuration.html  |   6 +
 .../kinesis/FlinkKinesisConsumerTest.java  | 215 +++--
 .../flink/configuration/SecurityOptions.java   |   9 +
 .../runtime/entrypoint/ClusterEntrypoint.java  |  15 +-
 .../flink/runtime/minicluster/MiniCluster.java |  15 +-
 .../security/token/DelegationTokenManager.java |   2 +-
 .../token/KerberosDelegationTokenManager.java  |  87 -
 .../KerberosDelegationTokenManagerFactory.java |  58 ++
 .../token/KerberosRenewalPossibleProvider.java |  66 +++
 .../runtime/rest/FileUploadHandlerITCase.java  |  14 +-
 .../token/KerberosDelegationTokenManagerTest.java  |  69 +--
 .../token/KerberosRenewalPossibleProviderTest.java |  78 
 .../apache/flink/yarn/YarnClusterDescriptor.java   |   2 +-
 pom.xml|  14 +-
 15 files changed, 508 insertions(+), 148 deletions(-)

diff --git 
a/docs/layouts/shortcodes/generated/security_auth_kerberos_section.html 
b/docs/layouts/shortcodes/generated/security_auth_kerberos_section.html
index 580b51dae73..638ee4c9ec0 100644
--- a/docs/layouts/shortcodes/generated/security_auth_kerberos_section.html
+++ b/docs/layouts/shortcodes/generated/security_auth_kerberos_section.html
@@ -38,5 +38,11 @@
 Boolean
 Indicates whether to read from your Kerberos ticket cache.
 
+
+security.kerberos.relogin.period
+1 min
+Duration
+The time period when keytab login happens automatically in 
order to always have a valid TGT.
+
 
 
diff --git a/docs/layouts/shortcodes/generated/security_configuration.html 
b/docs/layouts/shortcodes/generated/security_configuration.html
index 2afa767a44f..87a3aab06d5 100644
--- a/docs/layouts/shortcodes/generated/security_configuration.html
+++ b/docs/layouts/shortcodes/generated/security_configuration.html
@@ -50,6 +50,12 @@
 Boolean
 Indicates whether to read from your Kerberos ticket cache.
 
+
+security.kerberos.relogin.period
+1 min
+Duration
+The time period when keytab login happens automatically in 
order to always have a valid TGT.
+
 
 security.module.factory.classes
 "org.apache.flink.runtime.security.modules.HadoopModuleFactory";"org.apache.flink.runtime.security.modules.JaasModuleFactory";"org.apache.flink.runtime.security.modules.ZookeeperModuleFactory"
diff --git 
a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
 
b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
index d48e04ed4e6..91473a8ca2c 100644
--- 
a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
+++ 
b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
@@ -68,6 +68,7 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.Matchers;
+import org.mockito.MockedStatic;
 import org.mockito.Mockito;
 import org.powermock.api.mockito.PowerMockito;
 import org.powermock.core.classloader.annotations.PrepareForTest;
@@ -97,13 +98,14 @@ import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.mockStatic;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
 /** Suite of FlinkKinesisConsumer tests for the methods called throughout the 
source life cycle. */
 @RunWith(PowerMockRunner.class)
-@PrepareForTest({FlinkKinesisConsumer.class, KinesisConfigUtil.class})
+@PrepareForTest(FlinkKinesisConsumer.class)
 public class FlinkKinesisConsumerTest extends 

[flink-kubernetes-operator] annotated tag release-1.0.0-rc1 updated (2417603 -> bb4e4f6)

2022-05-23 Thread wangyang0918
This is an automated email from the ASF dual-hosted git repository.

wangyang0918 pushed a change to annotated tag release-1.0.0-rc1
in repository https://gitbox.apache.org/repos/asf/flink-kubernetes-operator.git


*** WARNING: tag release-1.0.0-rc1 was modified! ***

from 2417603  (commit)
  to bb4e4f6  (tag)
 tagging 241760304d54bf80838709116993d73d47cd6f11 (commit)
  by wangyang0918
  on Mon May 23 13:42:37 2022 +0800

- Log -
release-1.0.0-rc1
-BEGIN PGP SIGNATURE-

iQIzBAABCAAdFiEEL/KXe7v/3yg8b+fGowEAbzWR7iwFAmKLHs0ACgkQowEAbzWR
7iwE9A//eMwdGWRDRLuFfpq/Kd14/fdkc5L3HGaYeFMUNwITZG5v2cOLVFzrDEpv
2HsP2XBKCBGbhZWhFrxMz3+cuhgaXyi5GaWSK+fyKGp2utJz31XvYJ6PQy9qHJ6N
7Zx5HbyOzxvPGJ/J+AeQeV+I/tFGMIij4YsvggSG/cTIjmAcibIqg/nVqblmNKxj
O+Y8rGtQwQWmdRw5Gqj5arTtCTMJ0i3UtyyhDPUAgyaViOlr3i+bjcL3qbh2sVrY
B/kcS9Oeuq6dBTKMnjG0K5uzw5D6gThqrrJXdrrQ2RuHZlxTolEAFQKsFwM3PxVX
A1guGS9x+0+oKsj1R8YT83bQFdSnZf+g4QFLUr9+D195Z1RM2ZAsdePmEjqaMNTO
Yq9/rlFUmByCY8EVCY1cfequTtw5gaGl2SPikn3tt9bw2xm5oBo9mBd9DMVRGdrf
1CwqfziBML+MYib0yra1AxoKztDboSWUZtt8iCQnUPCJjlUUS4mmHQA86qtSVzan
r195r54zD242aMc8MhyRuL6hPuSZlY6/HW1PVX778F5niLVG9VENuS7ijAj0hOz0
KOy4DYxVt0LE1dXcFScMLoj/T1AiFWO9mS8YZMliJ8qE7wGeZHIZlnbC+Gf1H8S9
us1vfQwGFemBCEEVnjeKnBwBT14pDP73rllbxWYFHqyhsgap4VQ=
=/orh
-END PGP SIGNATURE-
---


No new revisions were added by this update.

Summary of changes:



svn commit: r54679 - /dev/flink/flink-kubernetes-operator-1.0.0-rc1/

2022-05-23 Thread wangyang0918
Author: wangyang0918
Date: Mon May 23 06:31:09 2022
New Revision: 54679

Log:
Apache Flink Kubernetes Operator, version 1.0.0, release candidate 1

Added:
dev/flink/flink-kubernetes-operator-1.0.0-rc1/

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz
   (with props)

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.asc

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.sha512

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz
   (with props)

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz.asc

dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz.sha512
dev/flink/flink-kubernetes-operator-1.0.0-rc1/index.yaml
dev/flink/flink-kubernetes-operator-1.0.0-rc1/index.yaml.asc
dev/flink/flink-kubernetes-operator-1.0.0-rc1/index.yaml.sha512

Added: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz
==
Binary file - no diff available.

Propchange: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz
--
svn:mime-type = application/octet-stream

Added: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.asc
==
--- 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.asc
 (added)
+++ 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.asc
 Mon May 23 06:31:09 2022
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIzBAABCAAdFiEEL/KXe7v/3yg8b+fGowEAbzWR7iwFAmKLIlwACgkQowEAbzWR
+7izTtw/9HqCUxRAxZkDx7fCVLd+jQEx5IaPBSLCu8hhvktgMdVed7o207Fe5mqiT
+Wpq0LKUa7rgTvHUhDJuaR5wL7wt6YkRssAlFliM5J2tiPx2sqBmegu5j70wIZQ7Q
+wIeX2PsqfEheUOWXCp8tL2pkMsJvr9bf2qdbjbUKrB5reaCxSX6ULdkZCK9tt1AW
+zKrCpDnPr7saOsiWGYcw0fVgGtcF6Eqvt1rk8ex3l9fk/PVmNS+gksg06Ikyi+sI
+m3rAcgaHPaWU44kop57Z0aIuRKRx4O1oDk8imNlB9c6Iu2bP91XkeM0U31O8xxG3
++gCGw2IjyRVVtu+1X+N+kyO8nrwSwDR7rmFhOGPD/vGTUBnHL40f4p6RIZEw6mRY
+wTBq/tYlE55fbamsi9dx1AY5QxcV1pOK2ZTTciXhqaSP50zVnpTWs1EleO9N6IYO
+MwMFsL9TV01xqmoiyLhmgxfs2lou5ZEKB/y7YtuVAgnaonz1+RiRJ5bIb1hAYULA
+stBnkaDV7ynbDHTNH9l/9kirYIIamNwRgoMiRGbsVYt0iFqefTNG1G8Rq3ibSWTk
+oML+/22IwASMDWu4XGsBo4bL0QwBmmZ7owrRwn/ItZhwK2mZ0yYQocIy5nz389hd
+8Nd12ambNvx5Q0OOMA1n1bytNF8aYCHl+VDWM1CtrRAn7fe0HFs=
+=Fr1T
+-END PGP SIGNATURE-

Added: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.sha512
==
--- 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.sha512
 (added)
+++ 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-helm.tgz.sha512
 Mon May 23 06:31:09 2022
@@ -0,0 +1 @@
+4883378930b780ed0f37b347215a4ec71ba46f15a3dd050f06914db2d264aa4d216ca84522c7aba4e195b38d8f4c11c8df01075df12c3c6c5fe7c27dd02ee29e
  flink-kubernetes-operator-1.0.0-helm.tgz

Added: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz
==
Binary file - no diff available.

Propchange: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz
--
svn:mime-type = application/octet-stream

Added: 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz.asc
==
--- 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz.asc
 (added)
+++ 
dev/flink/flink-kubernetes-operator-1.0.0-rc1/flink-kubernetes-operator-1.0.0-src.tgz.asc
 Mon May 23 06:31:09 2022
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIzBAABCAAdFiEEL/KXe7v/3yg8b+fGowEAbzWR7iwFAmKLIlwACgkQowEAbzWR
+7iwHYw/+JPk95BmXcDmmoHICr+kIEfmAI9QNGNkuRuPqk/n0t5Up2CJVgA2jaDUq
+bP70nAaowattC9DHgXmPr3RUSB/OlAs3EkZYPaIevP14m/t7621kY+8zcRga9FWV
+ZyOnRLgQZS8kFhpDAWptRwVRZPCYqXT93z3ggOtmmwFOe9jxGpiLR2LTs4VOF37O
+3hxPY00Pm+E2N7RXaScV4LW92xo6yz5KNqTSNHmjZFw3FVZBm06A0d6xoy3/X/as
+RworYV+uKo9WTATsGb/kMVVgAFr6PHuzx4zzRVAhO7RJbh9o3Ma4jJ7Oy5Wwp2QN
+t+18jfZEQbMvYOkcS7omxuDI/lfhMLwEFRUoml2jS77EunPeXCP846Rb4NvmDR+h
+Ho11wc/3hvjTwhlzDbHG31/ASvNUbZaXIc4U+wKLxDxrI9kmLPXXWv8ucxJ0c21b
+aDJradO/L7c4BwxRH4CuxYq3vrP9nY0pbcQJ6tKm4YyB8HTu+vz4EkHEzlS3Le+k
+EiwjB4X5Sj3ghNc9yMMQbwZ2R+Hn4fa/zzpxRoHKpHYHUn3k5V/AlcVowOmhXfEH
+LdU3pGYuhWiaq71FKuCX4zGJqkg1d94GOGX1140aRwQAJ3s9UUF9uNq7vLKOVbZl
+LIGskaxS4C84xmn0klWcKvDbG4N7+O24BgRQ9VA8bBe3js0umX0=
+=3Fla
+-END PGP