zentol closed pull request #6988: [FLINK-10691][e2e] Remove dependency on 
hadoop for StreamSQL E2E test
URL: https://github.com/apache/flink/pull/6988
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/flink-end-to-end-tests/flink-stream-sql-test/pom.xml 
b/flink-end-to-end-tests/flink-stream-sql-test/pom.xml
index bae46d021c1..24f7a99f8f4 100644
--- a/flink-end-to-end-tests/flink-stream-sql-test/pom.xml
+++ b/flink-end-to-end-tests/flink-stream-sql-test/pom.xml
@@ -47,11 +47,6 @@
                        <version>${project.version}</version>
                        <scope>provided</scope>
                </dependency>
-               <dependency>
-                       <groupId>org.apache.flink</groupId>
-                       
<artifactId>flink-connector-filesystem_${scala.binary.version}</artifactId>
-                       <version>${project.version}</version>
-               </dependency>
        </dependencies>
 
        <build>
diff --git 
a/flink-end-to-end-tests/flink-stream-sql-test/src/main/java/org/apache/flink/sql/tests/StreamSQLTestProgram.java
 
b/flink-end-to-end-tests/flink-stream-sql-test/src/main/java/org/apache/flink/sql/tests/StreamSQLTestProgram.java
index bc07d618e2e..0ded7f5b48e 100644
--- 
a/flink-end-to-end-tests/flink-stream-sql-test/src/main/java/org/apache/flink/sql/tests/StreamSQLTestProgram.java
+++ 
b/flink-end-to-end-tests/flink-stream-sql-test/src/main/java/org/apache/flink/sql/tests/StreamSQLTestProgram.java
@@ -20,18 +20,23 @@
 
 import org.apache.flink.api.common.functions.MapFunction;
 import org.apache.flink.api.common.restartstrategy.RestartStrategies;
+import org.apache.flink.api.common.serialization.Encoder;
 import org.apache.flink.api.common.time.Time;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeinfo.Types;
 import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
 import org.apache.flink.api.java.utils.ParameterTool;
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
 import org.apache.flink.streaming.api.TimeCharacteristic;
 import org.apache.flink.streaming.api.checkpoint.ListCheckpointed;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.streaming.api.functions.sink.filesystem.BucketAssigner;
+import 
org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
+import 
org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.SimpleVersionedStringSerializer;
+import 
org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
-import org.apache.flink.streaming.connectors.fs.bucketing.BasePathBucketer;
-import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;
 import org.apache.flink.table.api.Table;
 import org.apache.flink.table.api.TableEnvironment;
 import org.apache.flink.table.api.TableSchema;
@@ -44,6 +49,7 @@
 import org.apache.flink.table.sources.wmstrategies.BoundedOutOfOrderTimestamps;
 import org.apache.flink.types.Row;
 
+import java.io.PrintStream;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -139,9 +145,14 @@ public static void main(String[] args) throws Exception {
                DataStream<Row> resultStream =
                        tEnv.toAppendStream(result, Types.ROW(Types.INT, 
Types.SQL_TIMESTAMP));
 
-               // define bucketing sink to emit the result
-               BucketingSink<Row> sink = new BucketingSink<Row>(outputPath)
-                       .setBucketer(new BasePathBucketer<>());
+               final StreamingFileSink<Row> sink = StreamingFileSink
+                       .forRowFormat(new Path(outputPath), (Encoder<Row>) 
(element, stream) -> {
+                               PrintStream out = new PrintStream(stream);
+                               out.println(element.toString());
+                       })
+                       .withBucketAssigner(new KeyBucketAssigner())
+                       .withRollingPolicy(OnCheckpointRollingPolicy.build())
+                       .build();
 
                resultStream
                        // inject a KillMapper that forwards all records but 
terminates the first execution attempt
@@ -152,6 +163,24 @@ public static void main(String[] args) throws Exception {
                sEnv.execute();
        }
 
+       /**
+        * Use first field for buckets.
+        */
+       public static final class KeyBucketAssigner implements 
BucketAssigner<Row, String> {
+
+               private static final long serialVersionUID = 
987325769970523326L;
+
+               @Override
+               public String getBucketId(final Row element, final Context 
context) {
+                       return String.valueOf(element.getField(0));
+               }
+
+               @Override
+               public SimpleVersionedSerializer<String> getSerializer() {
+                       return SimpleVersionedStringSerializer.INSTANCE;
+               }
+       }
+
        /**
         * TableSource for generated data.
         */
diff --git a/flink-end-to-end-tests/test-scripts/test_streaming_sql.sh 
b/flink-end-to-end-tests/test-scripts/test_streaming_sql.sh
index 0aa931ef2d3..3a0dfcb1b8d 100755
--- a/flink-end-to-end-tests/test-scripts/test_streaming_sql.sh
+++ b/flink-end-to-end-tests/test-scripts/test_streaming_sql.sh
@@ -50,7 +50,7 @@ trap sql_cleanup INT
 trap sql_cleanup EXIT
 
 # collect results from files
-cat $TEST_DATA_DIR/out/result/part-0-0 
$TEST_DATA_DIR/out/result/_part-0-1.pending > $TEST_DATA_DIR/out/result-complete
+cat $TEST_DATA_DIR/out/result/20/.part-* $TEST_DATA_DIR/out/result/20/part-* | 
sort > $TEST_DATA_DIR/out/result-complete
 
 # check result:
 # 20,1970-01-01 00:00:00.0


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to