This is an automated email from the ASF dual-hosted git repository.

martijnvisser pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/flink-web.git

commit 6e8fb6ece4ca9cfe5dc0adab93352d1654ed195c
Author: MartijnVisser <martijnvis...@apache.org>
AuthorDate: Tue Apr 19 10:11:53 2022 +0200

    Rebuild website
---
 content/img/flink_feature_radar_3.svg    |   4 +
 content/img/flink_feature_radar_zh_3.svg |   4 +
 content/q/gradle-quickstart.sh           |  71 ++++----
 content/roadmap.html                     | 173 +++++--------------
 content/zh/roadmap.html                  | 286 +++++++++++--------------------
 5 files changed, 186 insertions(+), 352 deletions(-)

diff --git a/content/img/flink_feature_radar_3.svg 
b/content/img/flink_feature_radar_3.svg
new file mode 100644
index 000000000..08e5e4ad8
--- /dev/null
+++ b/content/img/flink_feature_radar_3.svg
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Do not edit this file with editors other than diagrams.net -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" 
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd";>
+<svg xmlns="http://www.w3.org/2000/svg"; 
xmlns:xlink="http://www.w3.org/1999/xlink"; version="1.1" width="1461px" 
height="1611px" viewBox="-0.5 -0.5 1461 1611" content="&lt;mxfile 
host=&quot;app.diagrams.net&quot; modified=&quot;2022-04-19T07:50:28.330Z&quot; 
agent=&quot;5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 
(KHTML, like Gecko) Version/15.3 Safari/605.1.15&quot; 
etag=&quot;4SBsAjeUeQcoES03DVG8&quot; version=&quot;17.4.3&quot; 
type=&quot;device&quot;&gt;&lt;diagram id [...]
\ No newline at end of file
diff --git a/content/img/flink_feature_radar_zh_3.svg 
b/content/img/flink_feature_radar_zh_3.svg
new file mode 100644
index 000000000..08e5e4ad8
--- /dev/null
+++ b/content/img/flink_feature_radar_zh_3.svg
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Do not edit this file with editors other than diagrams.net -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" 
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd";>
+<svg xmlns="http://www.w3.org/2000/svg"; 
xmlns:xlink="http://www.w3.org/1999/xlink"; version="1.1" width="1461px" 
height="1611px" viewBox="-0.5 -0.5 1461 1611" content="&lt;mxfile 
host=&quot;app.diagrams.net&quot; modified=&quot;2022-04-19T07:50:28.330Z&quot; 
agent=&quot;5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 
(KHTML, like Gecko) Version/15.3 Safari/605.1.15&quot; 
etag=&quot;4SBsAjeUeQcoES03DVG8&quot; version=&quot;17.4.3&quot; 
type=&quot;device&quot;&gt;&lt;diagram id [...]
\ No newline at end of file
diff --git a/content/q/gradle-quickstart.sh b/content/q/gradle-quickstart.sh
index b977c69e2..bb17a40ab 100755
--- a/content/q/gradle-quickstart.sh
+++ b/content/q/gradle-quickstart.sh
@@ -42,7 +42,10 @@ defaultProjectName="quickstart"
 defaultOrganization="org.myorg.quickstart"
 defaultVersion="0.1-SNAPSHOT"
 defaultFlinkVersion="${1:-1.14.4}"
-defaultScalaBinaryVersion="${2:-2.11}"
+# flink-docs-master/docs/dev/datastream/project-configuration/#gradle
+# passes the scala version prefixed with a _, e.g.: _2.12
+scalaBinaryVersionFromCmdArg="${2/_/}"
+defaultScalaBinaryVersion="${scalaBinaryVersionFromCmdArg:-2.12}"
 
 echo "This script creates a Flink project using Java and Gradle."
 
@@ -100,53 +103,49 @@ rootProject.name = '${projectName}'
 EOF
 
 cat > build.gradle <<EOF
-buildscript {
-    repositories {
-        jcenter() // this applies only to the Gradle 'Shadow' plugin
-    }
-    dependencies {
-        classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.4'
-    }
-}
-
 plugins {
     id 'java'
     id 'application'
     // shadow plugin to produce fat JARs
-    id 'com.github.johnrengelman.shadow' version '2.0.4'
+    id 'com.github.johnrengelman.shadow' version '7.1.2'
 }
 
-
-// artifact properties
-group = '${organization}'
-version = '${version}'
-mainClassName = '${organization}.StreamingJob'
-description = """Flink Quickstart Job"""
-
 ext {
     javaVersion = '1.8'
     flinkVersion = '${flinkVersion}'
     scalaBinaryVersion = '${scalaBinaryVersion}'
-    slf4jVersion = '1.7.7'
-    log4jVersion = '1.2.17'
+    slf4jVersion = '1.7.32'
+    log4jVersion = '2.17.1'
+    flinkVersionNew = flinkVersion.toString().replace("-SNAPSHOT", "") >= 
"1.15"
 }
 
+// artifact properties
+group = '${organization}'
+version = '${version}'
+if (flinkVersionNew) {
+    mainClassName = '${organization}.DataStreamJob'
+} else {
+    mainClassName = '${organization}.StreamingJob'
+}
+description = """Flink Quickstart Job"""
 
 sourceCompatibility = javaVersion
 targetCompatibility = javaVersion
 tasks.withType(JavaCompile) {
-       options.encoding = 'UTF-8'
+    options.encoding = 'UTF-8'
 }
 
-applicationDefaultJvmArgs = ["-Dlog4j.configuration=log4j.properties"]
-
-task wrapper(type: Wrapper) {
-    gradleVersion = '3.1'
-}
+applicationDefaultJvmArgs = ["-Dlog4j.configurationFile=log4j2.properties"]
 
 // declare where to find the dependencies of your project
 repositories {
-    mavenCentral()$( if [[ "${flinkVersion}" == *-SNAPSHOT ]] ; then echo -e 
"\n    maven { url 
\"https://repository.apache.org/content/repositories/snapshots/\"; }" ; else 
echo ""; fi )
+    mavenCentral()
+    maven {
+        url "https://repository.apache.org/content/repositories/snapshots";
+        mavenContent {
+            snapshotsOnly()
+        }
+    }
 }
 
 // NOTE: We cannot use "compileOnly" or "shadow" configurations since then we 
could not run code
@@ -160,7 +159,7 @@ configurations {
     flinkShadowJar.exclude group: 'org.apache.flink', module: 'force-shading'
     flinkShadowJar.exclude group: 'com.google.code.findbugs', module: 'jsr305'
     flinkShadowJar.exclude group: 'org.slf4j'
-    flinkShadowJar.exclude group: 'log4j'
+    flinkShadowJar.exclude group: 'org.apache.logging.log4j'
 }
 
 // declare the dependencies for your production and test code
@@ -169,17 +168,23 @@ dependencies {
     // Compile-time dependencies that should NOT be part of the
     // shadow jar and are provided in the lib folder of Flink
     // --------------------------------------------------------------
-    compile "org.apache.flink:flink-java:\${flinkVersion}"
-    compile 
"org.apache.flink:flink-streaming-java_\${scalaBinaryVersion}:\${flinkVersion}"
+    if (flinkVersionNew) {
+        implementation "org.apache.flink:flink-streaming-java:\${flinkVersion}"
+        implementation "org.apache.flink:flink-clients:\${flinkVersion}"
+    } else {
+        implementation 
"org.apache.flink:flink-streaming-java_\${scalaBinaryVersion}:\${flinkVersion}"
+        implementation 
"org.apache.flink:flink-clients_\${scalaBinaryVersion}:\${flinkVersion}"
+    }
 
     // --------------------------------------------------------------
     // Dependencies that should be part of the shadow jar, e.g.
     // connectors. These must be in the flinkShadowJar configuration!
     // --------------------------------------------------------------
-    //flinkShadowJar 
"org.apache.flink:flink-connector-kafka-0.11_\${scalaBinaryVersion}:\${flinkVersion}"
+    //flinkShadowJar "org.apache.flink:flink-connector-kafka:\${flinkVersion}"
 
-    compile "log4j:log4j:\${log4jVersion}"
-    compile "org.slf4j:slf4j-log4j12:\${slf4jVersion}"
+    runtimeOnly "org.apache.logging.log4j:log4j-slf4j-impl:\${log4jVersion}"
+    runtimeOnly "org.apache.logging.log4j:log4j-api:\${log4jVersion}"
+    runtimeOnly "org.apache.logging.log4j:log4j-core:\${log4jVersion}"
 
     // Add test dependencies here.
     // testCompile "junit:junit:4.12"
diff --git a/content/roadmap.html b/content/roadmap.html
index 1b7e7786c..7dd8314b8 100644
--- a/content/roadmap.html
+++ b/content/roadmap.html
@@ -258,18 +258,15 @@ under the License.
   <li><a 
href="#unified-analytics-where-batch-and-streaming-come-together-sql-and-beyond"
 
id="markdown-toc-unified-analytics-where-batch-and-streaming-come-together-sql-and-beyond">Unified
 Analytics: Where Batch and Streaming come Together; SQL and beyond.</a>    <ul>
       <li><a href="#a-unified-sql-platform" 
id="markdown-toc-a-unified-sql-platform">A unified SQL Platform</a></li>
       <li><a href="#deep-batch--streaming-unification-for-the-datastream-api" 
id="markdown-toc-deep-batch--streaming-unification-for-the-datastream-api">Deep 
Batch / Streaming Unification for the DataStream API</a></li>
-      <li><a href="#subsuming-dataset-with-datastream-and-table-api" 
id="markdown-toc-subsuming-dataset-with-datastream-and-table-api">Subsuming 
DataSet with DataStream and Table API</a></li>
     </ul>
   </li>
   <li><a href="#applications-vs-clusters-flink-as-a-library" 
id="markdown-toc-applications-vs-clusters-flink-as-a-library">Applications vs. 
Clusters; “Flink as a Library”</a></li>
   <li><a href="#performance" id="markdown-toc-performance">Performance</a>    
<ul>
       <li><a href="#faster-checkpoints-and-recovery" 
id="markdown-toc-faster-checkpoints-and-recovery">Faster Checkpoints and 
Recovery</a></li>
-      <li><a href="#large-scale-batch-applications" 
id="markdown-toc-large-scale-batch-applications">Large Scale Batch 
Applications</a></li>
     </ul>
   </li>
-  <li><a href="#python-apis" id="markdown-toc-python-apis">Python APIs</a></li>
+  <li><a href="#apache-flink-as-part-of-an-ever-evolving-data-ecosystem" 
id="markdown-toc-apache-flink-as-part-of-an-ever-evolving-data-ecosystem">Apache
 Flink as part of an ever evolving data ecosystem</a></li>
   <li><a href="#documentation" 
id="markdown-toc-documentation">Documentation</a></li>
-  <li><a href="#miscellaneous-operational-tools" 
id="markdown-toc-miscellaneous-operational-tools">Miscellaneous Operational 
Tools</a></li>
   <li><a href="#stateful-functions" 
id="markdown-toc-stateful-functions">Stateful Functions</a></li>
 </ul>
 
@@ -298,7 +295,7 @@ are approaching end-of-life. For questions, please contact 
the developer mailing
 <a 
href="&#109;&#097;&#105;&#108;&#116;&#111;:&#100;&#101;&#118;&#064;&#102;&#108;&#105;&#110;&#107;&#046;&#097;&#112;&#097;&#099;&#104;&#101;&#046;&#111;&#114;&#103;">&#100;&#101;&#118;&#064;&#102;&#108;&#105;&#110;&#107;&#046;&#097;&#112;&#097;&#099;&#104;&#101;&#046;&#111;&#114;&#103;</a></p>
 
 <div class="row front-graphic">
-  <img src="/img/flink_feature_radar_2.svg" width="700px" />
+  <img src="/img/flink_feature_radar_3.svg" width="700px" />
 </div>
 
 <h2 id="feature-stages">Feature Stages</h2>
@@ -335,39 +332,34 @@ platform, and is continuing to do so.</p>
 and as continuous queries. Flink already contains an efficient unified query 
engine, and a wide set of
 integrations. With user feedback, those are continuously improved.</p>
 
-<p><strong>More Connector and Change Data Capture Support</strong></p>
+<p><strong>Going Beyond a SQL Stream/Batch Processing Engine</strong></p>
 
 <ul>
-  <li>Change-Data-Capture: Capturing a stream of data changes, directly from 
databases, by attaching to the
-transaction log. The community is adding more CDC intrgrations.
-    <ul>
-      <li>External CDC connectors: <a 
href="https://flink-packages.org/packages/cdc-connectors";>https://flink-packages.org/packages/cdc-connectors</a></li>
-      <li>Background: <a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=147427289";>FLIP-105</a>
-(CDC support for SQL) and <a href="https://debezium.io/";>Debezium</a>.</li>
-    </ul>
-  </li>
-  <li>Data Lake Connectors: Unified streaming &amp; batch is a powerful value 
proposition for Data Lakes: supporting
-same APIs, semantics, and engine for streaming real-time processing and batch 
processing of historic data.
-The community is adding deeper integrations with various Data Lake systems:
-    <ul>
-      <li><a href="https://iceberg.apache.org/";>Apache Iceberg</a>: <a 
href="https://iceberg.apache.org/flink/";>https://iceberg.apache.org/flink/</a></li>
-      <li><a href="https://hudi.apache.org/";>Apache Hudi</a>: <a 
href="https://hudi.apache.org/blog/apache-hudi-meets-apache-flink/";>https://hudi.apache.org/blog/apache-hudi-meets-apache-flink/</a></li>
-      <li><a href="https://pinot.apache.org/";>Apache Pinot</a>: <a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=177045634";>FLIP-166</a></li>
-    </ul>
-  </li>
+  <li>To extend the capability of a pure stream processor and make Flink ready 
for future use cases, 
+<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-188%3A+Introduce+Built-in+Dynamic+Table+Storage";>FLIP-188</a>
+has been announced adding built in dynamic table storage.</li>
+  <li>The experience of updating Flink SQL based jobs has been rather 
cumbersome as it could have
+lead to new job graphs making restoring from savepoints/checkpoints 
impossible. 
+<a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=191336489&amp;src=contextnavpagetreemode";>FLIP-190</a>
+that already has been shipped as MVP is targeting this.</li>
 </ul>
 
 <p><strong>Platform Infrastructure</strong></p>
 
 <ul>
-  <li>To simplify the building of production SQL platforms with Flink, we are 
improving the SQL client and are
-working on SQL gateway components that interface between client and cluster: 
<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-163%3A+SQL+Client+Improvements";>FLIP-163</a></li>
+  <li>After <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-163%3A+SQL+Client+Improvements";>FLIP-163</a>
 
+the community is working again on a set of SQL Client usability improvements
+(<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-189%3A+SQL+Client+Usability+Improvements";>FLIP-189</a>)
+which is aiming at improving the user experience, when using the SQL 
client.</li>
 </ul>
 
 <p><strong>Support for Common Languages, Formats, Catalogs</strong></p>
 
 <ul>
-  <li>Hive Query Compatibility: <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-152%3A+Hive+Query+Syntax+Compatibility";>FLIP-152</a></li>
+  <li>With <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-216%3A++Introduce+pluggable+dialect+and++decouple+Hive+connector";>FLIP-216</a>
+there’s now the initiative to introduce pluggable dialects on the example of 
the Hive connector.
+Including so many dependencies to make dialects work has lead to an overhead 
for contributors 
+and users.</li>
 </ul>
 
 <p>Flink has a broad SQL coverage for batch (full TPC-DS support) and a 
state-of-the-art set of supported
@@ -408,59 +400,6 @@ API in the process. See <a 
href="https://cwiki.apache.org/confluence/display/FLI
   </li>
 </ul>
 
-<p><strong>DataStream Batch Execution</strong></p>
-
-<ul>
-  <li>
-    <p>Flink is adding a <em>batch execution mode</em> for bounded DataStream 
programs. This gives users faster and simpler
-execution and recovery of their bounded streaming applications; users do not 
need to worry about watermarks and
-state sizes in this execution mode: <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-140%3A+Introduce+batch-style+execution+for+bounded+keyed+streams";>FLIP-140</a></p>
-
-    <p>The core batch execution mode is implemented with <a 
href="https://flink.apache.org/news/2020/12/10/release-1.12.0.html#batch-execution-mode-in-the-datastream-api";>great
 results</a>;
-there are ongoing improvements around aspects like broadcast state and 
processing-time-timers.
-This mode requires the new unified sources and sinks that are mentioned above, 
so it is limited
-to the connectors that have been ported to those new APIs.</p>
-  </li>
-</ul>
-
-<p><strong>Mixing bounded/unbounded streams, and batch/streaming 
execution</strong></p>
-
-<ul>
-  <li>
-    <p>Support checkpointing when some tasks finished &amp; Bounded stream 
programs shut down with a final
-checkpoint: <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-147%3A+Support+Checkpoints+After+Tasks+Finished";>FLIP-147</a></p>
-  </li>
-  <li>
-    <p>There are initial discussions and designs about jobs with mixed 
batch/streaming execution, so stay tuned for more
-news in that area.</p>
-  </li>
-</ul>
-
-<h2 id="subsuming-dataset-with-datastream-and-table-api">Subsuming DataSet 
with DataStream and Table API</h2>
-
-<p>We want to eventually drop the legacy Batch-only DataSet API, have 
batch-and stream processing unified
-throughout the entire system.</p>
-
-<p>Overall Discussion: <a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=158866741";>FLIP-131</a></p>
-
-<p>The <em>DataStream API</em> supports batch-execution to efficiently execute 
streaming programs on historic data
-(see above). Takes over that set of use cases.</p>
-
-<p>The <em>Table API</em> should become the default API for batch-only 
applications.</p>
-
-<ul>
-  <li>Add more operations to Table API, so support common data manipulation 
tasks more
-   easily: <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-155%3A+Introduce+a+few+convenient+operations+in+Table+API";>FLIP-155</a></li>
-  <li>Make Source and Sink definitions easier in the Table API.</li>
-</ul>
-
-<p>Improve the <em>interplay between the Table API and the DataStream API</em> 
to allow switching from Table API to
-DataStream API when more control over the data types and operations is 
necessary.</p>
-
-<ul>
-  <li>Interoperability between DataStream and Table APIs: <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-136%3A++Improve+interoperability+between+DataStream+and+Table+API";>FLIP-136</a></li>
-</ul>
-
 <hr />
 
 <h1 id="applications-vs-clusters-flink-as-a-library">Applications vs. 
Clusters; “Flink as a Library”</h1>
@@ -471,19 +410,9 @@ job as a self contained application.</p>
 
 <p>For example as a simple Kubernetes deployment; deployed and scaled like a 
regular application without extra workflows.</p>
 
-<p>Deploy Flink jobs as self-contained Applications works for all deployment 
targets since Flink 1.11.0
-(<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-85+Flink+Application+Mode";>FLIP-85</a>).</p>
-
 <ul>
-  <li>
-    <p>Reactive Scaling lets Flink applications change their parallelism in 
response to growing and shrinking
-worker pools, and makes Flink compatibel with standard auto-scalers:
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-159%3A+Reactive+Mode";>FLIP-159</a></p>
-  </li>
-  <li>
-    <p>Kubernetes-based HA-services let Flink applications run on Kubernetes 
without requiring a ZooKeeper dependency:
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-144%3A+Native+Kubernetes+HA+for+Flink";>FLIP-144</a></p>
-  </li>
+  <li>There is currently a Kubernetes Operator being developed by the 
community. See
+<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-212%3A+Introduce+Flink+Kubernetes+Operator";>FLIP-212</a>.</li>
 </ul>
 
 <hr />
@@ -499,45 +428,31 @@ Checkpoints and recovery are stable and have been a 
reliable workhorse for years
 trying to make it faster, more predictable, and to remove some confusions and 
inflexibility in some areas.</p>
 
 <ul>
-  <li>Unaligned Checkpoints, to make checkpoints progress faster when 
applications cause backpressure:
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-76%3A+Unaligned+Checkpoints";>FLIP-76</a>,
 available
-since Flink 1.12.2.</li>
-  <li>Log-based Checkpoints, for very frequent incremental checkpointing:
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-158%3A+Generalized+incremental+checkpoints";>FLIP-158</a></li>
-</ul>
-
-<h2 id="large-scale-batch-applications">Large Scale Batch Applications</h2>
-
-<p>The community is working on making large scale batch execution (parallelism 
in the order of 10,000s)
-simpler (less configuration tuning required) and more performant.</p>
-
-<ul>
-  <li>
-    <p>Introduce a more scalable batch shuffle. First parts of this have been 
merged, and ongoing efforts are
-to make the memory footprint (JVM direct memory) more predictable, see
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-148%3A+Introduce+Sort-Merge+Based+Blocking+Shuffle+to+Flink";>FLIP-148</a></p>
-
-    <ul>
-      <li><a 
href="https://issues.apache.org/jira/browse/FLINK-20740";>FLINK-20740</a></li>
-      <li><a 
href="https://issues.apache.org/jira/browse/FLINK-19938";>FLINK-19938</a></li>
-    </ul>
-  </li>
-  <li>
-    <p>Make scheduler faster for higher parallelism: <a 
href="https://issues.apache.org/jira/browse/FLINK-21110";>FLINK-21110</a></p>
-  </li>
+  <li><a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-183%3A+Dynamic+buffer+size+adjustment";>FLIP-183</a>
+is targeting size of checkpoints by debloating the buffers. A first beta is 
already available.</li>
+  <li>With <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-151%3A+Incremental+snapshots+for+heap-based+state+backend";>FLIP-151</a>
 
+there is an ongoing effort to implement a heap based state backend.</li>
 </ul>
 
 <hr />
 
-<h1 id="python-apis">Python APIs</h1>
+<h1 id="apache-flink-as-part-of-an-ever-evolving-data-ecosystem">Apache Flink 
as part of an ever evolving data ecosystem</h1>
 
-<p>Most functionalities in the Java Table APIs and DataStream APIs are already 
supported by the Python APIs. 
-The community is continuously working on improvements such as improving the 
checkpoint strategy for Python UDF execution
-(<a href="https://issues.apache.org/jira/browse/FLINK-18235";>FLINK-18235</a>), 
introducing more connectors support in both the Python DataStream API 
-and Python Table API so that the Python API can be used in for production 
implementations.</p>
+<p>There is almost no use case in which Apache Flink is used on its own. It 
has established itself
+as part of many data related reference architectures. In fact you’ll find the 
squirrel logo covering
+several aspects. The community has added a lot of connectors and formats. With 
the already mentionend
+<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-27%3A+Refactor+Source+Interface";>FLIP-27</a>
 and
+<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-143%3A+Unified+Sink+API";>FLIP-143</a>
+a new default for connectors has been established.</p>
 
-<p>Stateful transformation functions for the Python DataStream API:
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-153%3A+Support+state+access+in+Python+DataStream+API";>FLIP-153</a></p>
+<ul>
+  <li>There are efforts to revise the formats API with
+<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-219%3A+Revised+Format+API";>FLIP-219</a></li>
+  <li>There is ongoing work on new connectors 
+(e.g. <a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=177045634";>Pinot</a>)</li>
+  <li>Connectors will be hosted in an external repository going forward. See 
the 
+<a href="https://lists.apache.org/thread/8k1xonqt7hn0xldbky1cxfx3fzh6sj7h";>ML 
thread</a></li>
+</ul>
 
 <hr />
 
@@ -554,16 +469,6 @@ of the documentation.</p>
 
 <hr />
 
-<h1 id="miscellaneous-operational-tools">Miscellaneous Operational Tools</h1>
-
-<ul>
-  <li>Allow switching state backends with savepoints: <a 
href="https://issues.apache.org/jira/browse/FLINK-20976";>FLINK-20976</a></li>
-  <li>Support for Savepoints with more properties, like incremental 
savepoints, etc.:
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-47%3A+Checkpoints+vs.+Savepoints";>FLIP-47</a></li>
-</ul>
-
-<hr />
-
 <h1 id="stateful-functions">Stateful Functions</h1>
 
 <p>The Stateful Functions subproject has its own roadmap published under <a 
href="https://statefun.io/";>statefun.io</a>.</p>
diff --git a/content/zh/roadmap.html b/content/zh/roadmap.html
index a54a1a0be..99e378f49 100644
--- a/content/zh/roadmap.html
+++ b/content/zh/roadmap.html
@@ -253,21 +253,18 @@ under the License.
       <li><a href="#section-1" id="markdown-toc-section-1">功能阶段</a></li>
     </ul>
   </li>
-  <li><a href="#sql" id="markdown-toc-sql">一体化分析:流批一体,SQL及其他</a>    <ul>
-      <li><a href="#sql-1" id="markdown-toc-sql-1">统一的SQL平台</a></li>
-      <li><a href="#datastream-api-" 
id="markdown-toc-datastream-api-">DataStream API 流批一体深度融合</a></li>
-      <li><a href="#datastream--table-api--dataset" 
id="markdown-toc-datastream--table-api--dataset">使用 DataStream &amp; Table API 
取代 DataSet</a></li>
+  <li><a 
href="#unified-analytics-where-batch-and-streaming-come-together-sql-and-beyond"
 
id="markdown-toc-unified-analytics-where-batch-and-streaming-come-together-sql-and-beyond">Unified
 Analytics: Where Batch and Streaming come Together; SQL and beyond.</a>    <ul>
+      <li><a href="#a-unified-sql-platform" 
id="markdown-toc-a-unified-sql-platform">A unified SQL Platform</a></li>
+      <li><a href="#deep-batch--streaming-unification-for-the-datastream-api" 
id="markdown-toc-deep-batch--streaming-unification-for-the-datastream-api">Deep 
Batch / Streaming Unification for the DataStream API</a></li>
     </ul>
   </li>
   <li><a href="#applications-vs-clusters-flink-as-a-library" 
id="markdown-toc-applications-vs-clusters-flink-as-a-library">Applications vs. 
Clusters; “Flink as a Library”</a></li>
-  <li><a href="#section-2" id="markdown-toc-section-2">性能</a>    <ul>
+  <li><a href="#performance" id="markdown-toc-performance">Performance</a>    
<ul>
       <li><a href="#faster-checkpoints-and-recovery" 
id="markdown-toc-faster-checkpoints-and-recovery">Faster Checkpoints and 
Recovery</a></li>
-      <li><a href="#section-3" id="markdown-toc-section-3">大规模批作业</a></li>
     </ul>
   </li>
-  <li><a href="#python-apis" id="markdown-toc-python-apis">Python APIs</a></li>
-  <li><a href="#section-4" id="markdown-toc-section-4">文档</a></li>
-  <li><a href="#section-5" id="markdown-toc-section-5">操作工具</a></li>
+  <li><a href="#apache-flink-as-part-of-an-ever-evolving-data-ecosystem" 
id="markdown-toc-apache-flink-as-part-of-an-ever-evolving-data-ecosystem">Apache
 Flink as part of an ever evolving data ecosystem</a></li>
+  <li><a href="#documentation" 
id="markdown-toc-documentation">Documentation</a></li>
   <li><a href="#stateful-functions" 
id="markdown-toc-stateful-functions">Stateful Functions</a></li>
 </ul>
 
@@ -295,7 +292,7 @@ under the License.
 。</p>
 
 <div class="row front-graphic">
-  <img src="/img/flink_feature_radar_zh_2.svg" width="700px" />
+  <img src="/img/flink_feature_radar_zh_3.svg" width="700px" />
 </div>
 
 <h2 id="section-1">功能阶段</h2>
@@ -311,144 +308,92 @@ under the License.
 
 <hr />
 
-<h1 id="sql">一体化分析:流批一体,SQL及其他</h1>
-<p>Flink的内核是流数据处理系统,Flink将批处理作为流的特例,用流的方式来执行批处理。
-Flink作为一个流式引擎,不仅能够高效执行批处理,更重要的是通过高效处理有限流的方式,
-打开了无缝流批一体处理之门。</p>
+<h1 
id="unified-analytics-where-batch-and-streaming-come-together-sql-and-beyond">Unified
 Analytics: Where Batch and Streaming come Together; SQL and beyond.</h1>
 
-<p>流批一体升级了流数据范例:它可以保证实时和离线应用语义的一致性。
-此外,有时流式处理的作业也需要离线(有限流)处理作为补充,例如,
-在出现错误或出现数据质量问题时需要重新处理数据,
-或者有的情况下启动新的作业但需要历史数据作为引导。统一的API和系统使得此类操作变得很容易。</p>
+<p>Flink is a streaming data system in its core, that executes “batch as a 
special case of streaming”.
+Efficient execution of batch jobs is powerful in its own right; but even more 
so, batch processing
+capabilities (efficient processing of bounded streams) open the way for a 
seamless unification of
+batch and streaming applications.</p>
 
-<h2 id="sql-1">统一的SQL平台</h2>
+<p>Unified streaming/batch up-levels the streaming data paradigm: It gives 
users consistent semantics across
+their real-time and lag-time applications. Furthermore, streaming applications 
often need to be complemented
+by batch (bounded stream) processing, for example when reprocessing data after 
bugs or data quality issues,
+or when bootstrapping new applications. A unified API and system make this 
much easier.</p>
 
-<p>Flink社区一直致力于建设基于Flink的统一的流批一体SQL分析平台,并将持续在这个方向上努力。
-SQL具有非常强的跨流批的语义,并允许用户使用相同的SQL语句对即时查询(ad-hoc query)和
-连续查询(continuous query)进行分析。Flink拥有高效的统一查询引擎,以及基于此的一系列整合统一。
-根据用户反馈,我们会持续改善这些整合统一的使用体验。</p>
+<h2 id="a-unified-sql-platform">A unified SQL Platform</h2>
 
-<p><strong>CDC &amp; Connectors</strong></p>
+<p>The community has been building Flink to a powerful basis for a unified 
(batch and streaming) SQL analytics
+platform, and is continuing to do so.</p>
 
-<ul>
-  <li>CDC(Change-Data-Capture): 通过直接连接到数据库的日志来捕获数据变更。Flink社区会加强和CDC的整合
-    <ul>
-      <li>CDC connectors: 
-<a 
href="https://flink-packages.org/packages/cdc-connectors";>https://flink-packages.org/packages/cdc-connectors</a></li>
-      <li>背景: <a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=147427289";>FLIP-105</a>
-(CDC对SQL的支持) &amp; <a href="https://debezium.io/";>Debezium</a></li>
-    </ul>
-  </li>
-  <li>数据湖Connectors: 流批一体对数据湖有很大价值,包括支持流(处理当前数据)和批(处理历史数据)的相同API,相同语义,以及相同引擎。
-目前,Flink社区正在和各种数据湖系统进行深度融合,包括:
-    <ul>
-      <li><a href="https://iceberg.apache.org/";>Apache Iceberg</a>: 
-<a 
href="https://iceberg.apache.org/flink/";>https://iceberg.apache.org/flink/</a></li>
-      <li><a href="https://hudi.apache.org/";>Apache Hudi</a>: 
-<a 
href="https://hudi.apache.org/blog/apache-hudi-meets-apache-flink/";>https://hudi.apache.org/blog/apache-hudi-meets-apache-flink/</a></li>
-      <li><a href="https://pinot.apache.org/";>Apache Pinot</a>: 
-<a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=177045634";>FLIP-166</a></li>
-    </ul>
-  </li>
-</ul>
-
-<p><strong>SQL 平台基建</strong></p>
+<p>SQL has very strong cross-batch-streaming semantics, allowing users to use 
the same queries for ad-hoc analytics
+and as continuous queries. Flink already contains an efficient unified query 
engine, and a wide set of
+integrations. With user feedback, those are continuously improved.</p>
 
-<ul>
-  <li>为了简化Flink SQL的生产实践,我们正在改进SQL客户端以及SQL Gateway中客户端和群集之间与交互相关的组件:
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-163%3A+SQL+Client+Improvements";>FLIP-163</a></li>
-</ul>
-
-<p><strong>通用语言,格式(Formats),目录(Catalogs)</strong></p>
+<p><strong>Going Beyond a SQL Stream/Batch Processing Engine</strong></p>
 
 <ul>
-  <li>Hive Query兼容性支持: <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-152%3A+Hive+Query+Syntax+Compatibility";>FLIP-152</a></li>
+  <li>To extend the capability of a pure stream processor and make Flink ready 
for future use cases, 
+<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-188%3A+Introduce+Built-in+Dynamic+Table+Storage";>FLIP-188</a>
+has been announced adding built in dynamic table storage.</li>
+  <li>The experience of updating Flink SQL based jobs has been rather 
cumbersome as it could have
+lead to new job graphs making restoring from savepoints/checkpoints 
impossible. 
+<a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=191336489&amp;src=contextnavpagetreemode";>FLIP-190</a>
+that already has been shipped as MVP is targeting this.</li>
 </ul>
 
-<p>Flink SQL具备广泛的批处理覆盖(全面的TPC-DS支持)和最先进的流处理支持。我们也一直会努力增添更多的SQL功能和算子。</p>
-
-<h2 id="datastream-api-">DataStream API 流批一体深度融合</h2>
-
-<p><em>DataStream API</em> 是 Flink 的<em>物理层</em> API, 
针对需要明确地控制数据类型,数据流,状态以及时间的应用。
-DataStream API 也在不断丰富演化以支持在有限数据上的高效地批处理。
-DataStream API 使用和流式一样的 dataflow 来执行批处理,并使用一样的算子。
-这样一来,用户使用 DataStream API 表达流和批可以保持相同级别的控制。
-我们对DataStream API融合的最终目标是可以混合并自由切换流批执行,提供流批间无缝切换体验。</p>
-
-<p><strong>统一 Sources &amp; Sinks</strong></p>
+<p><strong>Platform Infrastructure</strong></p>
 
 <ul>
-  <li>
-    <p>第一代 source 的 API 和实现要么只能用于 DataStream API 里面的流处理
-(<a 
href="https://github.com/apache/flink/blob/master/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/SourceFunction.java";>SourceFunction</a>)
-;要么只能用于 DataSet API 里面的批处理 
-(<a 
href="https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/InputFormat.java";>InputFormat</a>)
 。</p>
-
-    <p>因此,我们致力于构造对流和批都适用的 sources,能让用户在这两种模式下有一致的使用体验,
-并可以很容易地在流处理和批处理之间切换,执行无限流和有限流作业。
-新的 Source API 的接口已经可以使用。我们会将更多的 source connectors 迁移到这个新的模型,详见
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-27%3A+Refactor+Source+Interface";>FLIP-27</a>.</p>
-  </li>
-  <li>
-    <p>和 source 类似,原先的 sink 及其 API 也是分别针对流
-(<a 
href="https://github.com/apache/flink/blob/master/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/SinkFunction.java";>SinkFunction</a>)
-和批 (<a 
href="https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/OutputFormat.java";>OutputFormat</a>)
-设计的。</p>
-
-    <p>为此,我们引入了新的 sink API,可以流批一致的解决结果输出和提交 (<em>Transactions</em>) 的问题。
-新的 sink API 的第一版已经出炉,并在不断改进中,详见 
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-143%3A+Unified+Sink+API";>FLIP-143</a>
 。</p>
-  </li>
+  <li>After <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-163%3A+SQL+Client+Improvements";>FLIP-163</a>
 
+the community is working again on a set of SQL Client usability improvements
+(<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-189%3A+SQL+Client+Usability+Improvements";>FLIP-189</a>)
+which is aiming at improving the user experience, when using the SQL 
client.</li>
 </ul>
 
-<p><strong>DataStream 的批处理执行模式</strong></p>
+<p><strong>Support for Common Languages, Formats, Catalogs</strong></p>
 
 <ul>
-  <li>
-    <p>Flink 在 DataStream 上为有限流新增加了<em>批执行模式</em>,这可以使得用户更简单快速的执行和恢复有限流作业。 
-在有限流的批执行模式下,用户无需担心 watermarks 和状态大小的问题:
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-140%3A+Introduce+batch-style+execution+for+bounded+keyed+streams";>FLIP-140</a>
 。</p>
-
-    <p>批执行模式的核心实现已有 <a 
href="https://flink.apache.org/news/2020/12/10/release-1.12.0.html#batch-execution-mode-in-the-datastream-api";>很好的结果</a>;
-其他部分也在持续改进中,包括 broadcast state 和 processing-time-timers。
-值得注意的是,此模式的实现基于上面提到的新的的 source 和 sink,因此它只能支持已经使用新 API 的 connectors。</p>
-  </li>
+  <li>With <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-216%3A++Introduce+pluggable+dialect+and++decouple+Hive+connector";>FLIP-216</a>
+there’s now the initiative to introduce pluggable dialects on the example of 
the Hive connector.
+Including so many dependencies to make dialects work has lead to an overhead 
for contributors 
+and users.</li>
 </ul>
 
-<p><strong>混合 有限流/无限流 &amp; 批执行/流执行</strong></p>
-
-<ul>
-  <li>
-    <p>支持在部分 task 结束后还可以做 checkpoint &amp; 支持有限流作业在结束的时候做最后一次 checkpoint :
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-147%3A+Support+Checkpoints+After+Tasks+Finished";>FLIP-147</a></p>
-  </li>
-  <li>
-    <p>对于混合/切换流和批的执行,我们有一些初步的设计和讨论,敬请关注。</p>
-  </li>
-</ul>
+<p>Flink has a broad SQL coverage for batch (full TPC-DS support) and a 
state-of-the-art set of supported
+operations in streaming. There is continuous effort to add more functions and 
cover more SQL operations.</p>
 
-<h2 id="datastream--table-api--dataset">使用 DataStream &amp; Table API 取代 
DataSet</h2>
+<h2 id="deep-batch--streaming-unification-for-the-datastream-api">Deep Batch / 
Streaming Unification for the DataStream API</h2>
 
-<p>我们希望最终能弃用只支持批式处理的 DataSet API,从而使用统一的流批处理贯穿整个系统。
-整体的讨论在这里: <a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=158866741";>FLIP-131</a>
 。</p>
+<p>The <em>DataStream API</em> is Flink’s <em>physical</em> API, for use cases 
where users need very explicit control over data
+types, streams, state, and time. This API is evolving to support efficient 
batch execution on bounded data.</p>
 
-<p><em>DataStream API</em> 可以高效的用批的方式来执行需要处理历史数据的流作业(如上所述)。</p>
+<p>DataStream API executes the same dataflow shape in batch as in streaming, 
keeping the same operators.
+That way users keep the same level of control over the dataflow, and our goal 
is to mix and switch between
+batch/streaming execution in the future to make it a seamless experience.</p>
 
-<p><em>Table API</em> 应该是所有单批作业所使用的默认的 API 。</p>
+<p><strong>Unified Sources and Sinks</strong></p>
 
 <ul>
   <li>
-    <p>Table API 增加更多操作,以方便支持常见的数据操作任务
-  <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-155%3A+Introduce+a+few+convenient+operations+in+Table+API";>FLIP-155</a></p>
-  </li>
-  <li>
-    <p>在 Table API 中,使 Source 和 Sink 更容易定义使用</p>
+    <p>The first APIs and implementations of sources were specific to either 
streaming programs in the DataStream API
+(<a 
href="https://github.com/apache/flink/blob/master/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/source/SourceFunction.java";>SourceFunction</a>),
+or to batch programs in the DataSet API (<a 
href="https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/InputFormat.java";>InputFormat</a>).</p>
+
+    <p>In this effort, we are creating sources that work across batch and 
streaming execution. The aim is to give
+users a consistent experience across both modes, and to allow them to easily 
switch between streaming and batch
+execution for their unbounded and bounded streaming applications.
+The interface for this New Source API is done and available, and we are 
working on migrating more source connectors
+to this new model, see <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-27%3A+Refactor+Source+Interface";>FLIP-27</a>.</p>
   </li>
   <li>
-    <p>DataStream API &amp; Table API 互通性: 
-  <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-136%3A++Improve+interoperability+between+DataStream+and+Table+API";>FLIP-136</a></p>
+    <p>Similar to the sources, the original sink APIs are also specific to 
streaming
+(<a 
href="https://github.com/apache/flink/blob/master/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/SinkFunction.java";>SinkFunction</a>)
+and batch (<a 
href="https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/common/io/OutputFormat.java";>OutputFormat</a>)
+APIs and execution.</p>
 
-    <p>提升 Table API 和 DataStream API 之间互通的能力。当需要更多对数据类型和操作控制的时候,允许从 Table API 
切换到 DataStream API 。</p>
+    <p>We have introduced a new API for sinks that consistently handles result 
writing and committing (<em>Transactions</em>)
+across batch and streaming. The first iteration of the API exists, and we are 
porting sinks and refining the
+API in the process. See <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-143%3A+Unified+Sink+API";>FLIP-143</a>.</p>
   </li>
 </ul>
 
@@ -456,103 +401,74 @@ DataStream API 使用和流式一样的 dataflow 来执行批处理,并使用
 
 <h1 id="applications-vs-clusters-flink-as-a-library">Applications vs. 
Clusters; “Flink as a Library”</h1>
 
-<p>这个部分的工作主要是为了使部署(长时间运行的流式)Flink 作业变得更为自然简单。
-我们希望部署一个流式作业就像启动一个独立的应用(Applications)一样简单:
-不需要首先启动集群(Clusters),再向该集群提交作业。</p>
+<p>The goal of these efforts is to make it feel natural to deploy (long 
running streaming) Flink applications.
+Instead of starting a cluster and submitting a job to that cluster, these 
efforts support deploying a streaming
+job as a self contained application.</p>
 
-<p>例如,我们期望 Flink 作业可以作为简单的Kubernetes部署,能像普通应用程序一样可以进行常规部署和扩展,而无需额外的工作流程。
-从 Flink 1.11.0 开始,Flink 支持将 Flink 作业部署为独立的应用程序
-(<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-85+Flink+Application+Mode";>FLIP-85</a>)
 。</p>
+<p>For example as a simple Kubernetes deployment; deployed and scaled like a 
regular application without extra workflows.</p>
 
 <ul>
-  <li>
-    <p>响应式缩放功能(Reactive Scaling)可以使 Flink 作业根据资源池的增长和收缩情况更改并行度。 
-这样可以自然地使 Flink 与标准自动缩放(atuo Scaler)兼容。
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-159%3A+Reactive+Mode";>FLIP-159</a></p>
-  </li>
-  <li>
-    <p>基于 Kubernetes 的高可用性(HA)服务使 Flink 作业在 Kubernetes 上运行时无需依赖ZooKeeper:
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-144%3A+Native+Kubernetes+HA+for+Flink";>FLIP-144</a></p>
-  </li>
+  <li>There is currently a Kubernetes Operator being developed by the 
community. See
+<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-212%3A+Introduce+Flink+Kubernetes+Operator";>FLIP-212</a>.</li>
 </ul>
 
 <hr />
 
-<h1 id="section-2">性能</h1>
+<h1 id="performance">Performance</h1>
 
-<p>我们会持续不断的提高性能和容错恢复速度。</p>
+<p>Continuous work to keep improving performance and recovery speed.</p>
 
 <h2 id="faster-checkpoints-and-recovery">Faster Checkpoints and Recovery</h2>
 
-<p>Flink 社区正在致力于提升做检查点(checkpointing)和容错恢复(recovery)的速度。
-Flink的容错机制多年来运行非常稳定,但是我们还是想让整个容错过程更快并且更可预测,提升易用性。</p>
-
-<ul>
-  <li>
-    <p>Unaligned Checkpoints,解决反压情况下 Checkpoint 做不出来的问题,从 Flink 1.12.2 版本开始可用:
-  <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-76%3A+Unaligned+Checkpoints";>FLIP-76</a></p>
-  </li>
-  <li>
-    <p>Log-based Checkpoints, 可以做高频增量 Checkpoints,加快 checkpoint:
-  <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-158%3A+Generalized+incremental+checkpoints";>FLIP-158</a></p>
-  </li>
-</ul>
-
-<h2 id="section-3">大规模批作业</h2>
-
-<p>Flink 社区也在致力于简化大规模批作业(并行度量级在10,000左右)的部署运行,所需的配置调整更少并使之有更好的性能。</p>
+<p>The community is continuously working on improving checkpointing and 
recovery speed.
+Checkpoints and recovery are stable and have been a reliable workhorse for 
years. We are still
+trying to make it faster, more predictable, and to remove some confusions and 
inflexibility in some areas.</p>
 
 <ul>
-  <li>
-    <p>为批处理引入更具扩展性的 batch shuffle。Batch shuffle 的第一部分已经合并入社区代码,
-  剩下的部分可以使内存占用量(JVM直接内存)更可预测,请参阅
-  <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-148%3A+Introduce+Sort-Merge+Based+Blocking+Shuffle+to+Flink";>FLIP-148</a></p>
-
-    <ul>
-      <li><a 
href="https://issues.apache.org/jira/browse/FLINK-20740";>FLINK-20740</a></li>
-      <li><a 
href="https://issues.apache.org/jira/browse/FLINK-19938";>FLINK-19938</a></li>
-    </ul>
-  </li>
-  <li>
-    <p>更快调度高并发作业:<a 
href="https://issues.apache.org/jira/browse/FLINK-21110";>FLINK-21110</a></p>
-  </li>
+  <li><a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-183%3A+Dynamic+buffer+size+adjustment";>FLIP-183</a>
+is targeting size of checkpoints by debloating the buffers. A first beta is 
already available.</li>
+  <li>With <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-151%3A+Incremental+snapshots+for+heap-based+state+backend";>FLIP-151</a>
 
+there is an ongoing effort to implement a heap based state backend.</li>
 </ul>
 
 <hr />
 
-<h1 id="python-apis">Python APIs</h1>
+<h1 id="apache-flink-as-part-of-an-ever-evolving-data-ecosystem">Apache Flink 
as part of an ever evolving data ecosystem</h1>
 
-<p>Python DataStream API 对状态访问的支持:
-<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-153%3A+Support+state+access+in+Python+DataStream+API";>FLIP-153</a></p>
-
-<hr />
-
-<h1 id="section-4">文档</h1>
-
-<p>我们也正在简化文档结构,以方便更直观的导航和阅读</p>
+<p>There is almost no use case in which Apache Flink is used on its own. It 
has established itself
+as part of many data related reference architectures. In fact you’ll find the 
squirrel logo covering
+several aspects. The community has added a lot of connectors and formats. With 
the already mentionend
+<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-27%3A+Refactor+Source+Interface";>FLIP-27</a>
 and
+<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-143%3A+Unified+Sink+API";>FLIP-143</a>
+a new default for connectors has been established.</p>
 
 <ul>
-  <li>Flink 文档迁移(Jekyll to Hugo):
-  <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-157+Migrate+Flink+Documentation+from+Jekyll+to+Hugo";>FLIP-157</a></li>
-  <li>文档重构: <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-42%3A+Rework+Flink+Documentation";>FLIP-42</a></li>
-  <li>SQL 文档: <a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=127405685";>FLIP-60</a></li>
+  <li>There are efforts to revise the formats API with
+<a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-219%3A+Revised+Format+API";>FLIP-219</a></li>
+  <li>There is ongoing work on new connectors 
+(e.g. <a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=177045634";>Pinot</a>)</li>
+  <li>Connectors will be hosted in an external repository going forward. See 
the 
+<a href="https://lists.apache.org/thread/8k1xonqt7hn0xldbky1cxfx3fzh6sj7h";>ML 
thread</a></li>
 </ul>
 
 <hr />
 
-<h1 id="section-5">操作工具</h1>
+<h1 id="documentation">Documentation</h1>
+
+<p>There are various dedicated efforts to simplify the maintenance and 
structure (more intuitive navigation/reading)
+of the documentation.</p>
 
 <ul>
-  <li>允许使用 savepoint 来切换后端状态存储(state backends): <a 
href="https://issues.apache.org/jira/browse/FLINK-20976";>FLINK-20976</a></li>
-  <li>支持 savepoint 的其他的一些属性,例如增量 savepoint 等:
-  <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-47%3A+Checkpoints+vs.+Savepoints";>FLIP-47</a></li>
+  <li>Docs Tech Stack: <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-157+Migrate+Flink+Documentation+from+Jekyll+to+Hugo";>FLIP-157</a></li>
+  <li>General Docs Structure: <a 
href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-42%3A+Rework+Flink+Documentation";>FLIP-42</a></li>
+  <li>SQL Docs: <a 
href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=127405685";>FLIP-60</a></li>
 </ul>
 
 <hr />
 
 <h1 id="stateful-functions">Stateful Functions</h1>
 
-<p>Stateful Functions 子项目有其单独的规划路线图,请参考 <a 
href="https://statefun.io/";>statefun.io</a> 。</p>
+<p>The Stateful Functions subproject has its own roadmap published under <a 
href="https://statefun.io/";>statefun.io</a>.</p>
 
 
   </div>

Reply via email to