This is an automated email from the ASF dual-hosted git repository.

sewen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 92d5033acfed337dfadf0f9def11164c39d6ceca
Author: Stephan Ewen <[email protected]>
AuthorDate: Thu Nov 5 11:57:01 2020 +0100

    [hotfix] Move FileBufferReaderITCase from 'flink-tests' to 'flink-runtime'
    
    This helps fix some comlications with access to test resources, which fails 
on the Azure Pipelines setup.
---
 .../io/network/partition}/FileBufferReaderITCase.java   | 17 +++--------------
 1 file changed, 3 insertions(+), 14 deletions(-)

diff --git 
a/flink-tests/src/test/java/org/apache/flink/test/runtime/FileBufferReaderITCase.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/FileBufferReaderITCase.java
similarity index 90%
rename from 
flink-tests/src/test/java/org/apache/flink/test/runtime/FileBufferReaderITCase.java
rename to 
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/FileBufferReaderITCase.java
index 9c058f2..0664168 100644
--- 
a/flink-tests/src/test/java/org/apache/flink/test/runtime/FileBufferReaderITCase.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/FileBufferReaderITCase.java
@@ -16,10 +16,8 @@
  * limitations under the License.
  */
 
-package org.apache.flink.test.runtime;
+package org.apache.flink.runtime.io.network.partition;
 
-import org.apache.flink.api.common.JobID;
-import org.apache.flink.client.program.MiniClusterClient;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.configuration.MemorySize;
 import org.apache.flink.configuration.NettyShuffleEnvironmentOptions;
@@ -29,15 +27,12 @@ import org.apache.flink.runtime.execution.Environment;
 import org.apache.flink.runtime.io.network.api.reader.RecordReader;
 import org.apache.flink.runtime.io.network.api.writer.RecordWriter;
 import org.apache.flink.runtime.io.network.api.writer.RecordWriterBuilder;
-import 
org.apache.flink.runtime.io.network.partition.BoundedBlockingSubpartitionType;
-import org.apache.flink.runtime.io.network.partition.ResultPartitionType;
 import org.apache.flink.runtime.jobgraph.DistributionPattern;
 import org.apache.flink.runtime.jobgraph.JobGraph;
 import org.apache.flink.runtime.jobgraph.JobVertex;
 import org.apache.flink.runtime.jobgraph.ScheduleMode;
 import org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable;
 import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup;
-import org.apache.flink.runtime.jobmaster.JobResult;
 import org.apache.flink.runtime.minicluster.MiniCluster;
 import org.apache.flink.runtime.minicluster.MiniClusterConfiguration;
 import org.apache.flink.runtime.net.SSLUtilsTest;
@@ -54,7 +49,6 @@ import org.junit.runners.Parameterized;
 
 import java.util.Arrays;
 import java.util.List;
-import java.util.concurrent.CompletableFuture;
 
 import static org.hamcrest.Matchers.is;
 import static org.junit.Assert.assertThat;
@@ -123,15 +117,10 @@ public class FileBufferReaderITCase extends TestLogger {
                try (final MiniCluster miniCluster = new 
MiniCluster(miniClusterConfiguration)) {
                        miniCluster.start();
 
-                       final MiniClusterClient client = new 
MiniClusterClient(configuration, miniCluster);
                        final JobGraph jobGraph = createJobGraph();
-                       // wait for the submission to succeed
-                       final JobID jobID = client.submitJob(jobGraph).get();
 
-                       final CompletableFuture<JobResult> resultFuture = 
client.requestJobResult(jobID);
-                       final JobResult jobResult = resultFuture.get();
-
-                       
assertThat(jobResult.getSerializedThrowable().isPresent(), is(false));
+                       // the job needs to complete without throwing an 
exception
+                       miniCluster.executeJobBlocking(jobGraph);
                }
        }
 

Reply via email to