[4/4] flink git commit: [FLINK-3366] Rename @Experimental annotation to @PublicEvolving

2016-02-10 Thread fhueske
[FLINK-3366] Rename @Experimental annotation to @PublicEvolving

This closes #1599


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/572855da
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/572855da
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/572855da

Branch: refs/heads/master
Commit: 572855daad452eab169bc2ca27f9f1e4476df656
Parents: 59b237b
Author: Fabian Hueske 
Authored: Mon Feb 8 14:14:01 2016 +0100
Committer: Fabian Hueske 
Committed: Wed Feb 10 11:51:26 2016 +0100

--
 .../apache/flink/annotation/Experimental.java   | 35 ---
 .../apache/flink/annotation/PublicEvolving.java | 40 
 .../flink/api/common/ExecutionConfig.java   | 16 ++---
 .../flink/api/common/JobExecutionResult.java|  4 +-
 .../functions/IterationRuntimeContext.java  |  4 +-
 .../api/common/functions/RuntimeContext.java| 22 +++
 .../util/AbstractRuntimeUDFContext.java | 12 ++--
 .../common/io/statistics/BaseStatistics.java| 14 ++---
 .../api/common/typeinfo/BasicArrayTypeInfo.java | 22 +++
 .../api/common/typeinfo/BasicTypeInfo.java  | 22 +++
 .../api/common/typeinfo/NothingTypeInfo.java| 16 ++---
 .../common/typeinfo/PrimitiveArrayTypeInfo.java | 24 +++
 .../api/common/typeinfo/TypeInformation.java| 20 +++---
 .../api/common/typeutils/CompositeType.java | 36 +--
 .../flink/api/java/typeutils/AvroTypeInfo.java  |  4 +-
 .../api/java/typeutils/EitherTypeInfo.java  | 18 +++---
 .../flink/api/java/typeutils/EnumTypeInfo.java  | 20 +++---
 .../api/java/typeutils/GenericTypeInfo.java | 20 +++---
 .../api/java/typeutils/ObjectArrayTypeInfo.java | 22 +++
 .../flink/api/java/typeutils/PojoTypeInfo.java  | 28 -
 .../flink/api/java/typeutils/TupleTypeInfo.java | 16 ++---
 .../flink/api/java/typeutils/TypeExtractor.java | 66 ++--
 .../flink/api/java/typeutils/ValueTypeInfo.java | 24 +++
 .../api/java/typeutils/WritableTypeInfo.java| 22 +++
 .../java/org/apache/flink/api/java/DataSet.java |  6 +-
 .../flink/api/java/ExecutionEnvironment.java| 32 +-
 .../apache/flink/api/java/LocalEnvironment.java |  4 +-
 .../flink/api/java/RemoteEnvironment.java   |  4 +-
 .../flink/api/java/functions/FirstReducer.java  |  1 +
 .../api/java/functions/FunctionAnnotation.java  | 10 +--
 .../org/apache/flink/api/java/io/CsvReader.java |  4 +-
 .../flink/api/java/operators/CrossOperator.java |  4 +-
 .../flink/api/java/operators/DataSink.java  |  6 +-
 .../flink/api/java/operators/DataSource.java|  4 +-
 .../api/java/operators/DeltaIteration.java  |  6 +-
 .../api/java/operators/IterativeDataSet.java|  8 +--
 .../flink/api/java/operators/JoinOperator.java  |  4 +-
 .../api/java/operators/ProjectOperator.java |  4 +-
 .../flink/api/java/utils/DataSetUtils.java  |  4 +-
 .../flink/api/java/utils/ParameterTool.java |  4 +-
 .../org/apache/flink/api/scala/DataSet.scala|  8 +--
 .../flink/api/scala/ExecutionEnvironment.scala  | 34 +-
 .../api/scala/typeutils/CaseClassTypeInfo.scala | 16 ++---
 .../api/scala/typeutils/EitherTypeInfo.scala| 18 +++---
 .../api/scala/typeutils/EnumValueTypeInfo.scala | 20 +++---
 .../api/scala/typeutils/OptionTypeInfo.scala| 18 +++---
 .../scala/typeutils/ScalaNothingTypeInfo.scala  | 16 ++---
 .../scala/typeutils/TraversableTypeInfo.scala   | 18 +++---
 .../flink/api/scala/typeutils/TryTypeInfo.scala | 18 +++---
 .../api/scala/typeutils/UnitTypeInfo.scala  | 16 ++---
 .../apache/flink/api/scala/utils/package.scala  |  5 +-
 .../api/datastream/AllWindowedStream.java   |  8 +--
 .../api/datastream/CoGroupedStreams.java|  8 +--
 .../api/datastream/ConnectedStreams.java|  4 +-
 .../streaming/api/datastream/DataStream.java| 46 +++---
 .../api/datastream/DataStreamSink.java  |  6 +-
 .../api/datastream/IterativeStream.java |  4 +-
 .../streaming/api/datastream/JoinedStreams.java | 10 +--
 .../streaming/api/datastream/KeyedStream.java   |  6 +-
 .../datastream/SingleOutputStreamOperator.java  | 20 +++---
 .../streaming/api/datastream/SplitStream.java   |  4 +-
 .../api/datastream/WindowedStream.java  |  8 +--
 .../api/environment/CheckpointConfig.java   |  6 +-
 .../environment/StreamExecutionEnvironment.java | 36 +--
 .../source/EventTimeSourceFunction.java |  4 +-
 .../api/functions/source/SourceFunction.java|  6 +-
 .../streaming/api/scala/AllWindowedStream.scala |  6 +-
 .../streaming/api/scala/CoGroupedStreams.scala  | 10 +--
 .../flink/streaming/api/scala/DataStream.scala  | 57 -
 .../streaming/api/scala/JoinedStreams.scala |  8 +--
 .../flink/streaming/api/scala/KeyedStream.scala |  4 +-
 .../api/scala/StreamExecutionEnvironment.scala  | 28 -
 .../streaming/api/scala/WindowedStrea

[3/4] flink git commit: [FLINK-3366] Rename @Experimental annotation to @PublicEvolving

2016-02-10 Thread fhueske
http://git-wip-us.apache.org/repos/asf/flink/blob/572855da/flink-java/src/main/java/org/apache/flink/api/java/RemoteEnvironment.java
--
diff --git 
a/flink-java/src/main/java/org/apache/flink/api/java/RemoteEnvironment.java 
b/flink-java/src/main/java/org/apache/flink/api/java/RemoteEnvironment.java
index 5dd2988..223ebee 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/RemoteEnvironment.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/RemoteEnvironment.java
@@ -18,7 +18,7 @@
 
 package org.apache.flink.api.java;
 
-import org.apache.flink.annotation.Experimental;
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.annotation.Public;
 import org.apache.flink.api.common.InvalidProgramException;
 import org.apache.flink.api.common.JobExecutionResult;
@@ -183,7 +183,7 @@ public class RemoteEnvironment extends ExecutionEnvironment 
{
}
 
@Override
-   @Experimental
+   @PublicEvolving
public void startNewSession() throws Exception {
dispose();
jobID = JobID.generate();

http://git-wip-us.apache.org/repos/asf/flink/blob/572855da/flink-java/src/main/java/org/apache/flink/api/java/functions/FirstReducer.java
--
diff --git 
a/flink-java/src/main/java/org/apache/flink/api/java/functions/FirstReducer.java
 
b/flink-java/src/main/java/org/apache/flink/api/java/functions/FirstReducer.java
index 2eda077..fdd114e 100644
--- 
a/flink-java/src/main/java/org/apache/flink/api/java/functions/FirstReducer.java
+++ 
b/flink-java/src/main/java/org/apache/flink/api/java/functions/FirstReducer.java
@@ -17,6 +17,7 @@
  */
 
 package org.apache.flink.api.java.functions;
+
 import org.apache.flink.api.common.functions.GroupCombineFunction;
 import org.apache.flink.api.common.functions.GroupReduceFunction;
 import org.apache.flink.util.Collector;

http://git-wip-us.apache.org/repos/asf/flink/blob/572855da/flink-java/src/main/java/org/apache/flink/api/java/functions/FunctionAnnotation.java
--
diff --git 
a/flink-java/src/main/java/org/apache/flink/api/java/functions/FunctionAnnotation.java
 
b/flink-java/src/main/java/org/apache/flink/api/java/functions/FunctionAnnotation.java
index dd00c31..0ce518e 100644
--- 
a/flink-java/src/main/java/org/apache/flink/api/java/functions/FunctionAnnotation.java
+++ 
b/flink-java/src/main/java/org/apache/flink/api/java/functions/FunctionAnnotation.java
@@ -26,7 +26,7 @@ import java.lang.annotation.Retention;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.apache.flink.annotation.Experimental;
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.Public;
 import org.apache.flink.api.common.InvalidProgramException;
@@ -310,7 +310,7 @@ public class FunctionAnnotation {
 */
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
-   @Experimental
+   @PublicEvolving
public @interface ReadFields {
String[] value();
}
@@ -341,7 +341,7 @@ public class FunctionAnnotation {
 */
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
-   @Experimental
+   @PublicEvolving
public @interface ReadFieldsFirst {
String[] value();
}
@@ -372,7 +372,7 @@ public class FunctionAnnotation {
 */
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
-   @Experimental
+   @PublicEvolving
public @interface ReadFieldsSecond {
String[] value();
}
@@ -389,7 +389,7 @@ public class FunctionAnnotation {
 */
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
-   @Experimental
+   @PublicEvolving
public @interface SkipCodeAnalysis {
}
 

http://git-wip-us.apache.org/repos/asf/flink/blob/572855da/flink-java/src/main/java/org/apache/flink/api/java/io/CsvReader.java
--
diff --git 
a/flink-java/src/main/java/org/apache/flink/api/java/io/CsvReader.java 
b/flink-java/src/main/java/org/apache/flink/api/java/io/CsvReader.java
index 9c6621d..3d656a4 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/io/CsvReader.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/io/CsvReader.java
@@ -22,7 +22,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 
 import org.apache.flink.annotation.Public;
-import org.apache.flink.annotation.Experimental;
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.java.ExecutionEnvironment;
 import org.apache.flink.api.java.Utils;
 import org.apache.flink.api.java.operators.DataSource;
@@ -110,7 +110,7 @@ public class

[2/4] flink git commit: [FLINK-3366] Rename @Experimental annotation to @PublicEvolving

2016-02-10 Thread fhueske
http://git-wip-us.apache.org/repos/asf/flink/blob/572855da/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/DataStream.scala
--
diff --git 
a/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/DataStream.scala
 
b/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/DataStream.scala
index 736c41b..04c1980 100644
--- 
a/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/DataStream.scala
+++ 
b/flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/DataStream.scala
@@ -18,7 +18,7 @@
 
 package org.apache.flink.streaming.api.scala
 
-import org.apache.flink.annotation.{Experimental, Public}
+import org.apache.flink.annotation.{PublicEvolving, Public}
 import org.apache.flink.api.common.functions.{FilterFunction, FlatMapFunction, 
MapFunction, Partitioner}
 import org.apache.flink.api.common.io.OutputFormat
 import org.apache.flink.api.common.typeinfo.TypeInformation
@@ -50,6 +50,7 @@ class DataStream[T](stream: JavaStream[T]) {
 
   /**
 * Returns the [[StreamExecutionEnvironment]] associated with the current 
[[DataStream]].
+ *
 * @return associated execution environment
 */
   def getExecutionEnvironment: StreamExecutionEnvironment =
@@ -60,7 +61,7 @@ class DataStream[T](stream: JavaStream[T]) {
*
* @return ID of the DataStream
*/
-  @Experimental
+  @PublicEvolving
   def getId = stream.getId
 
   /**
@@ -128,7 +129,7 @@ class DataStream[T](stream: JavaStream[T]) {
 * @param uid The unique user-specified ID of this transformation.
 * @return The operator with the specified ID.
 */
-  @Experimental
+  @PublicEvolving
   def uid(uid: String) : DataStream[T] = javaStream match {
 case stream : SingleOutputStreamOperator[T,_] => stream.uid(uid)
 case _ => throw new UnsupportedOperationException("Only supported for 
operators.")
@@ -142,7 +143,7 @@ class DataStream[T](stream: JavaStream[T]) {
* however it is not advised for performance considerations.
*
*/
-  @Experimental
+  @PublicEvolving
   def disableChaining(): DataStream[T] = {
 stream match {
   case ds: SingleOutputStreamOperator[_, _] => ds.disableChaining();
@@ -158,7 +159,7 @@ class DataStream[T](stream: JavaStream[T]) {
* previous tasks even if possible.
*
*/
-  @Experimental
+  @PublicEvolving
   def startNewChain(): DataStream[T] = {
 stream match {
   case ds: SingleOutputStreamOperator[_, _] => ds.startNewChain();
@@ -175,7 +176,7 @@ class DataStream[T](stream: JavaStream[T]) {
* All subsequent operators are assigned to the default resource group.
*
*/
-  @Experimental
+  @PublicEvolving
   def isolateResources(): DataStream[T] = {
 stream match {
   case ds: SingleOutputStreamOperator[_, _] => ds.isolateResources();
@@ -196,7 +197,7 @@ class DataStream[T](stream: JavaStream[T]) {
* degree of parallelism for the operators must be decreased from the
* default.
*/
-  @Experimental
+  @PublicEvolving
   def startNewResourceGroup(): DataStream[T] = {
 stream match {
   case ds: SingleOutputStreamOperator[_, _] => ds.startNewResourceGroup();
@@ -345,14 +346,14 @@ class DataStream[T](stream: JavaStream[T]) {
* the first instance of the next processing operator. Use this setting with 
care
* since it might cause a serious performance bottleneck in the application.
*/
-  @Experimental
+  @PublicEvolving
   def global: DataStream[T] = stream.global()
 
   /**
* Sets the partitioning of the DataStream so that the output tuples
* are shuffled to the next component.
*/
-  @Experimental
+  @PublicEvolving
   def shuffle: DataStream[T] = stream.shuffle()
 
   /**
@@ -385,7 +386,7 @@ class DataStream[T](stream: JavaStream[T]) {
* In cases where the different parallelisms are not multiples of each other 
one or several
* downstream operations will have a differing number of inputs from 
upstream operations.
*/
-  @Experimental
+  @PublicEvolving
   def rescale: DataStream[T] = stream.rescale()
 
   /**
@@ -408,7 +409,7 @@ class DataStream[T](stream: JavaStream[T]) {
* the keepPartitioning flag to true
*
*/
-  @Experimental
+  @PublicEvolving
   def iterate[R](stepFunction: DataStream[T] => (DataStream[T], DataStream[R]),
 maxWaitTimeMillis:Long = 0,
 keepPartitioning: Boolean = false) : DataStream[R] = {
@@ -438,7 +439,7 @@ class DataStream[T](stream: JavaStream[T]) {
* to 0 then the iteration sources will indefinitely, so the job must be 
killed to stop.
*
*/
-  @Experimental
+  @PublicEvolving
   def iterate[R, F: TypeInformation: ClassTag](stepFunction: 
ConnectedStreams[T, F] =>
 (DataStream[F], DataStream[R]), maxWaitTimeMillis:Long): DataStream[R] = {
 val feedbackType: TypeInformation[F] = implicitly[TypeInformation[F]]
@@ -625,7 +626,7 @@ class Dat

[1/4] flink git commit: [FLINK-3234] [dataSet] Add KeySelector support to sortPartition operation.

2016-02-10 Thread fhueske
Repository: flink
Updated Branches:
  refs/heads/master 59b237b5d -> 0a63797a6


[FLINK-3234] [dataSet] Add KeySelector support to sortPartition operation.

This closes #1585


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/0a63797a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/0a63797a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/0a63797a

Branch: refs/heads/master
Commit: 0a63797a6a5418b2363bca25bd77c33c217ff257
Parents: 572855d
Author: Chiwan Park 
Authored: Thu Feb 4 20:46:10 2016 +0900
Committer: Fabian Hueske 
Committed: Wed Feb 10 11:51:26 2016 +0100

--
 .../java/org/apache/flink/api/java/DataSet.java |  18 ++
 .../java/operators/SortPartitionOperator.java   | 174 +--
 .../api/java/operator/SortPartitionTest.java|  82 +
 .../org/apache/flink/api/scala/DataSet.scala|  25 +++
 .../api/scala/PartitionSortedDataSet.scala  |  22 ++-
 .../javaApiOperators/SortPartitionITCase.java   |  61 +++
 .../scala/operators/SortPartitionITCase.scala   |  59 +++
 7 files changed, 385 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/0a63797a/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java
--
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java 
b/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java
index bfb97f4..c315920 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java
@@ -1381,6 +1381,24 @@ public abstract class DataSet {
return new SortPartitionOperator<>(this, field, order, 
Utils.getCallLocationName());
}
 
+   /**
+* Locally sorts the partitions of the DataSet on the extracted key in 
the specified order.
+* The DataSet can be sorted on multiple values by returning a tuple 
from the KeySelector.
+*
+* Note that no additional sort keys can be appended to a KeySelector 
sort keys. To sort
+* the partitions by multiple values using KeySelector, the KeySelector 
must return a tuple
+* consisting of the values.
+*
+* @param keyExtractor The KeySelector function which extracts the key 
values from the DataSet
+* on which the DataSet is sorted.
+* @param order The order in which the DataSet is sorted.
+* @return The DataSet with sorted local partitions.
+*/
+   public  SortPartitionOperator sortPartition(KeySelector 
keyExtractor, Order order) {
+   final TypeInformation keyType = 
TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
+   return new SortPartitionOperator<>(this, new 
Keys.SelectorFunctionKeys<>(clean(keyExtractor), getType(), keyType), order, 
Utils.getCallLocationName());
+   }
+
// 

//  Top-K
// 


http://git-wip-us.apache.org/repos/asf/flink/blob/0a63797a/flink-java/src/main/java/org/apache/flink/api/java/operators/SortPartitionOperator.java
--
diff --git 
a/flink-java/src/main/java/org/apache/flink/api/java/operators/SortPartitionOperator.java
 
b/flink-java/src/main/java/org/apache/flink/api/java/operators/SortPartitionOperator.java
index 354a0cd..7f30a30 100644
--- 
a/flink-java/src/main/java/org/apache/flink/api/java/operators/SortPartitionOperator.java
+++ 
b/flink-java/src/main/java/org/apache/flink/api/java/operators/SortPartitionOperator.java
@@ -26,9 +26,13 @@ import org.apache.flink.api.common.operators.Order;
 import org.apache.flink.api.common.operators.Ordering;
 import org.apache.flink.api.common.operators.UnaryOperatorInformation;
 import org.apache.flink.api.common.operators.base.SortPartitionOperatorBase;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.DataSet;
+import org.apache.flink.api.java.functions.KeySelector;
+import org.apache.flink.api.java.tuple.Tuple2;
 
-import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.List;
 
 /**
  * This operator represents a DataSet with locally sorted partitions.
@@ -38,27 +42,58 @@ import java.util.Arrays;
 @Public
 public class SortPartitionOperator extends SingleInputOperator> {
 
-   private int[] sortKeyPositions;
+   private List> keys;
 
-   private Order[] sortOrders;
+   private List orders;
 
private final String sortLocationName;
 
+   private boolean useKeySelector;
 
-

flink git commit: [docs] Fixed typo in Storm Compatibility page

2016-02-10 Thread mjsax
Repository: flink
Updated Branches:
  refs/heads/master 0a63797a6 -> aeee6efd4


[docs] Fixed typo in Storm Compatibility page

This closes #1618


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/aeee6efd
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/aeee6efd
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/aeee6efd

Branch: refs/heads/master
Commit: aeee6efd451dcbdda37ed05779bda74291079ed5
Parents: 0a63797
Author: Georgios Andrianakis 
Authored: Wed Feb 10 14:30:46 2016 +0200
Committer: mjsax 
Committed: Wed Feb 10 14:51:14 2016 +0100

--
 docs/apis/streaming/storm_compatibility.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/aeee6efd/docs/apis/streaming/storm_compatibility.md
--
diff --git a/docs/apis/streaming/storm_compatibility.md 
b/docs/apis/streaming/storm_compatibility.md
index d646040..4d5715c 100644
--- a/docs/apis/streaming/storm_compatibility.md
+++ b/docs/apis/streaming/storm_compatibility.md
@@ -228,8 +228,8 @@ DataStream> multiStream = ...
 SplitStream> splitStream = multiStream.split(new 
StormStreamSelector());
 
 // remove SplitStreamType using SplitStreamMapper to get data stream of type 
SomeType
-DataStream s1 = splitStream.select("s1").map(new 
SplitStreamMapper()).returns(SomeType.classs);
-DataStream s2 = splitStream.select("s2").map(new 
SplitStreamMapper()).returns(SomeType.classs);
+DataStream s1 = splitStream.select("s1").map(new 
SplitStreamMapper()).returns(SomeType.class);
+DataStream s2 = splitStream.select("s2").map(new 
SplitStreamMapper()).returns(SomeType.class);
 
 // do further processing on s1 and s2
 [...]



flink git commit: [FLINK-3373] [build] Shade away Hadoop's HTTP Components dependency

2016-02-10 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master aeee6efd4 -> 8ccd7544e


[FLINK-3373] [build] Shade away Hadoop's HTTP Components dependency

This closes #1615


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/8ccd7544
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/8ccd7544
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/8ccd7544

Branch: refs/heads/master
Commit: 8ccd7544edb25e82cc8a898809cc7c8bb7893620
Parents: aeee6ef
Author: Stephan Ewen 
Authored: Tue Feb 9 21:18:43 2016 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 10 15:01:22 2016 +0100

--
 flink-shaded-hadoop/flink-shaded-hadoop2/pom.xml | 15 +++
 flink-shaded-hadoop/pom.xml  | 13 +
 pom.xml  | 12 
 3 files changed, 28 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/8ccd7544/flink-shaded-hadoop/flink-shaded-hadoop2/pom.xml
--
diff --git a/flink-shaded-hadoop/flink-shaded-hadoop2/pom.xml 
b/flink-shaded-hadoop/flink-shaded-hadoop2/pom.xml
index b5839d9..5eb8043 100644
--- a/flink-shaded-hadoop/flink-shaded-hadoop2/pom.xml
+++ b/flink-shaded-hadoop/flink-shaded-hadoop2/pom.xml
@@ -652,4 +652,19 @@ under the License.


 
+   
+   
+   
+   org.apache.httpcomponents
+   httpcore
+   4.2.5
+   
+   
+   
+   org.apache.httpcomponents
+   httpclient
+   4.2.6
+   
+   
+   
 

http://git-wip-us.apache.org/repos/asf/flink/blob/8ccd7544/flink-shaded-hadoop/pom.xml
--
diff --git a/flink-shaded-hadoop/pom.xml b/flink-shaded-hadoop/pom.xml
index 7d54ef9..d5a8529 100644
--- a/flink-shaded-hadoop/pom.xml
+++ b/flink-shaded-hadoop/pom.xml
@@ -111,6 +111,11 @@ under the License.

io.netty:netty:*

org.apache.curator:*

org.apache.hadoop:*
+
+   
+   
net.java.dev.jets3t:jets3t
+   
org.apache.httpcomponents:*
+   
commons-httpclient:commons-httpclient



@@ -133,6 +138,14 @@ under the License.

org.apache.curator

org.apache.flink.hadoop.shaded.org.apache.curator

+   
+   
org.apache.http
+   
org.apache.flink.hadoop.shaded.org.apache.http
+   
+   
+   
org.apache.commons.httpclient
+   
org.apache.flink.hadoop.shaded.org.apache.commons.httpclient
+   




http://git-wip-us.apache.org/repos/asf/flink/blob/8ccd7544/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 0a64c4a..42ebf79 100644
--- a/pom.xml
+++ b/pom.xml
@@ -342,18 +342,6 @@ under the License.

 

-   org.apache.httpcomponents
-   httpcore
-   4.2.5
-   
-
-   
-   org.apache.httpcomponents
-   httpc

flink git commit: [FLINK-3372] Setting custom YARN application name is ignored

2016-02-10 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/release-0.10 2cd06181f -> 11bfe6f3e


[FLINK-3372] Setting custom YARN application name is ignored

This closes #1607


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/11bfe6f3
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/11bfe6f3
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/11bfe6f3

Branch: refs/heads/release-0.10
Commit: 11bfe6f3e9956101e76fbb3b3b8dccee93d81eb3
Parents: 2cd0618
Author: Nick Dimiduk 
Authored: Mon Feb 8 10:24:52 2016 -0800
Committer: Stephan Ewen 
Committed: Wed Feb 10 15:20:49 2016 +0100

--
 .../src/main/java/org/apache/flink/client/CliFrontend.java | 3 +--
 .../main/java/org/apache/flink/client/FlinkYarnSessionCli.java | 6 ++
 2 files changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/11bfe6f3/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
--
diff --git 
a/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java 
b/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
index 7c8d52e..d830361 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/CliFrontend.java
@@ -813,11 +813,10 @@ public class CliFrontend {
 
// user wants to run Flink in YARN cluster.
CommandLine commandLine = options.getCommandLine();
-   AbstractFlinkYarnClient flinkYarnClient = 
CliFrontendParser.getFlinkYarnSessionCli().createFlinkYarnClient(commandLine);
+   AbstractFlinkYarnClient flinkYarnClient = 
CliFrontendParser.getFlinkYarnSessionCli().createFlinkYarnClient(commandLine, 
programName);
if (flinkYarnClient == null) {
throw new RuntimeException("Unable to create 
Flink YARN Client. Check previous log messages");
}
-   flinkYarnClient.setName("Flink Application: " + 
programName);
 
// the number of slots available from YARN:
int yarnTmSlots = flinkYarnClient.getTaskManagerSlots();

http://git-wip-us.apache.org/repos/asf/flink/blob/11bfe6f3/flink-clients/src/main/java/org/apache/flink/client/FlinkYarnSessionCli.java
--
diff --git 
a/flink-clients/src/main/java/org/apache/flink/client/FlinkYarnSessionCli.java 
b/flink-clients/src/main/java/org/apache/flink/client/FlinkYarnSessionCli.java
index a9a20ae..ff11c19 100644
--- 
a/flink-clients/src/main/java/org/apache/flink/client/FlinkYarnSessionCli.java
+++ 
b/flink-clients/src/main/java/org/apache/flink/client/FlinkYarnSessionCli.java
@@ -103,6 +103,10 @@ public class FlinkYarnSessionCli {
}
 
public AbstractFlinkYarnClient createFlinkYarnClient(CommandLine cmd) {
+   return createFlinkYarnClient(cmd, null);
+   }
+
+   public AbstractFlinkYarnClient createFlinkYarnClient(CommandLine cmd, 
String programName) {
 
AbstractFlinkYarnClient flinkYarnClient = getFlinkYarnClient();
if (flinkYarnClient == null) {
@@ -225,6 +229,8 @@ public class FlinkYarnSessionCli {
}
if(cmd.hasOption(NAME.getOpt())) {

flinkYarnClient.setName(cmd.getOptionValue(NAME.getOpt()));
+   } else if (programName != null && !programName.isEmpty()) {
+   flinkYarnClient.setName("Flink Application: " + 
programName);
}
return flinkYarnClient;
}



[2/6] flink git commit: [FLINK-3350] [tests] Increase default test Akka ask and ZooKeeper timeouts

2016-02-10 Thread sewen
[FLINK-3350] [tests] Increase default test Akka ask and ZooKeeper timeouts


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/b8f40251
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/b8f40251
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/b8f40251

Branch: refs/heads/master
Commit: b8f40251c6c45379118254c21b0d553c2d3b8937
Parents: 9173825
Author: Ufuk Celebi 
Authored: Mon Feb 8 14:24:43 2016 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 10 15:26:43 2016 +0100

--
 .../runtime/minicluster/FlinkMiniCluster.scala  | 20 ++--
 .../minicluster/LocalFlinkMiniCluster.scala |  2 ++
 .../runtime/testutils/ZooKeeperTestUtils.java   |  5 +++--
 .../runtime/testingUtils/TestingCluster.scala   |  2 ++
 .../kafka/KafkaTestEnvironmentImpl.java |  7 ---
 .../kafka/KafkaTestEnvironmentImpl.java |  7 ---
 ...ctTaskManagerProcessFailureRecoveryTest.java |  3 +++
 .../JobManagerCheckpointRecoveryITCase.java |  8 ++--
 .../recovery/ProcessFailureCancelingITCase.java |  2 +-
 9 files changed, 43 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/b8f40251/flink-runtime/src/main/scala/org/apache/flink/runtime/minicluster/FlinkMiniCluster.scala
--
diff --git 
a/flink-runtime/src/main/scala/org/apache/flink/runtime/minicluster/FlinkMiniCluster.scala
 
b/flink-runtime/src/main/scala/org/apache/flink/runtime/minicluster/FlinkMiniCluster.scala
index 4cdda3f..0346d6d 100644
--- 
a/flink-runtime/src/main/scala/org/apache/flink/runtime/minicluster/FlinkMiniCluster.scala
+++ 
b/flink-runtime/src/main/scala/org/apache/flink/runtime/minicluster/FlinkMiniCluster.scala
@@ -42,7 +42,7 @@ import org.apache.flink.runtime.webmonitor.{WebMonitorUtils, 
WebMonitor}
 
 import org.slf4j.LoggerFactory
 
-import scala.concurrent.duration.FiniteDuration
+import scala.concurrent.duration.{Duration, FiniteDuration}
 import scala.concurrent._
 import scala.concurrent.forkjoin.ForkJoinPool
 
@@ -86,7 +86,7 @@ abstract class FlinkMiniCluster(
   
   implicit val executionContext = ExecutionContext.global
 
-  implicit val timeout = AkkaUtils.getTimeout(userConfiguration)
+  implicit val timeout = AkkaUtils.getTimeout(configuration)
 
   val recoveryMode = RecoveryMode.fromConfig(configuration)
 
@@ -188,6 +188,22 @@ abstract class FlinkMiniCluster(
 AkkaUtils.getAkkaConfig(configuration, Some((hostname, resolvedPort)))
   }
 
+  /**
+* Sets CI environment (Travis) specific config defaults.
+*/
+  def setDefaultCiConfig(config: Configuration) : Unit = {
+// 
https://docs.travis-ci.com/user/environment-variables#Default-Environment-Variables
+if (sys.env.contains("CI")) {
+  // Only set if nothing specified in config
+  if (config.getString(ConfigConstants.AKKA_ASK_TIMEOUT, null) == null) {
+val duration = Duration(ConfigConstants.DEFAULT_AKKA_ASK_TIMEOUT) * 10
+config.setString(ConfigConstants.AKKA_ASK_TIMEOUT, 
s"${duration.toSeconds}s")
+
+LOG.info(s"Akka ask timeout set to ${duration.toSeconds}s")
+  }
+}
+  }
+
   // --
   //  Start/Stop Methods
   // --

http://git-wip-us.apache.org/repos/asf/flink/blob/b8f40251/flink-runtime/src/main/scala/org/apache/flink/runtime/minicluster/LocalFlinkMiniCluster.scala
--
diff --git 
a/flink-runtime/src/main/scala/org/apache/flink/runtime/minicluster/LocalFlinkMiniCluster.scala
 
b/flink-runtime/src/main/scala/org/apache/flink/runtime/minicluster/LocalFlinkMiniCluster.scala
index 913aec0..c803429 100644
--- 
a/flink-runtime/src/main/scala/org/apache/flink/runtime/minicluster/LocalFlinkMiniCluster.scala
+++ 
b/flink-runtime/src/main/scala/org/apache/flink/runtime/minicluster/LocalFlinkMiniCluster.scala
@@ -48,6 +48,8 @@ class LocalFlinkMiniCluster(
   override def generateConfiguration(userConfiguration: Configuration): 
Configuration = {
 val config = getDefaultConfig
 
+setDefaultCiConfig(config)
+
 config.addAll(userConfiguration)
 setMemory(config)
 initializeIOFormatClasses(config)

http://git-wip-us.apache.org/repos/asf/flink/blob/b8f40251/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/ZooKeeperTestUtils.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/ZooKeeperTestUtils.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/ZooKeeperTestUtils.java
index 6c33835..75569ec 100644
--- 
a/flink-runt

[5/6] flink git commit: [hotfix] [tests] Reset state to allow retry on failure

2016-02-10 Thread sewen
[hotfix] [tests] Reset state to allow retry on failure

This closes #1611


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/48b74546
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/48b74546
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/48b74546

Branch: refs/heads/master
Commit: 48b745466202ebbb68608930e13cb6ed4a35e6e7
Parents: 756cbaf
Author: Ufuk Celebi 
Authored: Tue Feb 9 12:45:41 2016 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 10 15:27:41 2016 +0100

--
 .../JobManagerCheckpointRecoveryITCase.java  | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/48b74546/flink-tests/src/test/java/org/apache/flink/test/recovery/JobManagerCheckpointRecoveryITCase.java
--
diff --git 
a/flink-tests/src/test/java/org/apache/flink/test/recovery/JobManagerCheckpointRecoveryITCase.java
 
b/flink-tests/src/test/java/org/apache/flink/test/recovery/JobManagerCheckpointRecoveryITCase.java
index 59a05ff..ea30c58 100644
--- 
a/flink-tests/src/test/java/org/apache/flink/test/recovery/JobManagerCheckpointRecoveryITCase.java
+++ 
b/flink-tests/src/test/java/org/apache/flink/test/recovery/JobManagerCheckpointRecoveryITCase.java
@@ -116,15 +116,15 @@ public class JobManagerCheckpointRecoveryITCase extends 
TestLogger {
 
private static final int Parallelism = 8;
 
-   private static final CountDownLatch CompletedCheckpointsLatch = new 
CountDownLatch(2);
+   private static CountDownLatch CompletedCheckpointsLatch = new 
CountDownLatch(2);
 
-   private static final AtomicLongArray RecoveredStates = new 
AtomicLongArray(Parallelism);
+   private static AtomicLongArray RecoveredStates = new 
AtomicLongArray(Parallelism);
 
-   private static final CountDownLatch FinalCountLatch = new 
CountDownLatch(1);
+   private static CountDownLatch FinalCountLatch = new CountDownLatch(1);
 
-   private static final AtomicReference FinalCount = new 
AtomicReference<>();
+   private static AtomicReference FinalCount = new 
AtomicReference<>();
 
-   private static final long LastElement = -1;
+   private static long LastElement = -1;
 
/**
 * Simple checkpointed streaming sum.
@@ -156,7 +156,6 @@ public class JobManagerCheckpointRecoveryITCase extends 
TestLogger {
Configuration config = 
ZooKeeperTestUtils.createZooKeeperRecoveryModeConfig(ZooKeeper
.getConnectString(), 
FileStateBackendBasePath.getAbsoluteFile().toURI().toString());
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 
Parallelism);
-   config.setString(ConfigConstants.AKKA_ASK_TIMEOUT, "100 s");
 
ActorSystem testSystem = null;
JobManagerProcess[] jobManagerProcess = new 
JobManagerProcess[2];
@@ -248,6 +247,13 @@ public class JobManagerCheckpointRecoveryITCase extends 
TestLogger {
}
}
catch (Throwable t) {
+   // Reset all static state for test retries
+   CompletedCheckpointsLatch = new CountDownLatch(2);
+   RecoveredStates = new AtomicLongArray(Parallelism);
+   FinalCountLatch = new CountDownLatch(1);
+   FinalCount = new AtomicReference<>();
+   LastElement = -1;
+
// Print early (in some situations the process logs get 
too big
// for Travis and the root problem is not shown)
t.printStackTrace();
@@ -303,7 +309,6 @@ public class JobManagerCheckpointRecoveryITCase extends 
TestLogger {
fileStateBackendPath);
 
config.setInteger(ConfigConstants.LOCAL_NUMBER_JOB_MANAGER, 2);
-   config.setString(ConfigConstants.AKKA_ASK_TIMEOUT, "100 s");
 
JobManagerProcess[] jobManagerProcess = new 
JobManagerProcess[2];
LeaderRetrievalService leaderRetrievalService = null;



[4/6] flink git commit: [hotfix] [tests] Log retry rule failures on warn level

2016-02-10 Thread sewen
[hotfix] [tests] Log retry rule failures on warn level


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/756cbaff
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/756cbaff
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/756cbaff

Branch: refs/heads/master
Commit: 756cbafff1fd25f67268ca84b62c8a479156bf88
Parents: 3a643c0
Author: Ufuk Celebi 
Authored: Tue Feb 9 11:25:37 2016 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 10 15:27:28 2016 +0100

--
 .../test/java/org/apache/flink/testutils/junit/RetryRule.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/756cbaff/flink-core/src/test/java/org/apache/flink/testutils/junit/RetryRule.java
--
diff --git 
a/flink-core/src/test/java/org/apache/flink/testutils/junit/RetryRule.java 
b/flink-core/src/test/java/org/apache/flink/testutils/junit/RetryRule.java
index a4aff86..2b3a37a 100644
--- a/flink-core/src/test/java/org/apache/flink/testutils/junit/RetryRule.java
+++ b/flink-core/src/test/java/org/apache/flink/testutils/junit/RetryRule.java
@@ -113,7 +113,7 @@ public class RetryRule implements TestRule {
break; // success
}
catch (Throwable t) {
-   LOG.debug(String.format("Test run 
failed (%d/%d).",
+   LOG.warn(String.format("Test run failed 
(%d/%d).",
currentRun, 
timesOnFailure + 1), t);
 
// Throw the failure if retried too 
often
@@ -156,7 +156,7 @@ public class RetryRule implements TestRule {
break; // success
}
catch (Throwable t) {
-   LOG.debug(String.format("Test run 
failed (%d/%d).", currentRun, timesOnFailure + 1), t);
+   LOG.warn(String.format("Test run failed 
(%d/%d).", currentRun, timesOnFailure + 1), t);
 
if 
(!exceptionClass.isAssignableFrom(t.getClass()) || currentRun >= 
timesOnFailure) {
// Throw the failure if retried 
too often, or if it is the wrong exception



[3/6] flink git commit: [hotfix] [tests] Ignore ZooKeeper logs in process tests

2016-02-10 Thread sewen
[hotfix] [tests] Ignore ZooKeeper logs in process tests


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/3a643c07
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/3a643c07
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/3a643c07

Branch: refs/heads/master
Commit: 3a643c07792c62142c1f8cda172d4f4c3442c9b3
Parents: b8f4025
Author: Ufuk Celebi 
Authored: Tue Feb 9 11:01:39 2016 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 10 15:27:16 2016 +0100

--
 .../JobManagerSubmittedJobGraphsRecoveryITCase.java  | 6 +-
 .../org/apache/flink/runtime/testutils/CommonTestUtils.java  | 1 +
 .../AbstractJobManagerProcessFailureRecoveryITCase.java  | 8 +---
 .../org/apache/flink/test/recovery/ChaosMonkeyITCase.java| 4 
 .../test/recovery/JobManagerCheckpointRecoveryITCase.java| 8 
 5 files changed, 23 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/3a643c07/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerSubmittedJobGraphsRecoveryITCase.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerSubmittedJobGraphsRecoveryITCase.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerSubmittedJobGraphsRecoveryITCase.java
index 99f7bd7..59c7c39 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerSubmittedJobGraphsRecoveryITCase.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/jobmanager/JobManagerSubmittedJobGraphsRecoveryITCase.java
@@ -343,6 +343,10 @@ public class JobManagerSubmittedJobGraphsRecoveryITCase 
extends TestLogger {
assertEquals(2, jobSubmitSuccessMessages);
}
catch (Throwable t) {
+   // Print early (in some situations the process logs get 
too big
+   // for Travis and the root problem is not shown)
+   t.printStackTrace();
+
// In case of an error, print the job manager process 
logs.
if (jobManagerProcess[0] != null) {
jobManagerProcess[0].printProcessLog();
@@ -352,7 +356,7 @@ public class JobManagerSubmittedJobGraphsRecoveryITCase 
extends TestLogger {
jobManagerProcess[1].printProcessLog();
}
 
-   t.printStackTrace();
+   throw t;
}
finally {
if (jobManagerProcess[0] != null) {

http://git-wip-us.apache.org/repos/asf/flink/blob/3a643c07/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
index 069b6af..bbb6a89 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/testutils/CommonTestUtils.java
@@ -147,6 +147,7 @@ public class CommonTestUtils {

writer.println("log4j.appender.console.layout=org.apache.log4j.PatternLayout");

writer.println("log4j.appender.console.layout.ConversionPattern=%-4r [%t] %-5p 
%c %x - %m%n");

writer.println("log4j.logger.org.eclipse.jetty.util.log=OFF");
+   writer.println("log4j.logger.org.apache.zookeeper=OFF");
 
writer.flush();
writer.close();

http://git-wip-us.apache.org/repos/asf/flink/blob/3a643c07/flink-tests/src/test/java/org/apache/flink/test/recovery/AbstractJobManagerProcessFailureRecoveryITCase.java
--
diff --git 
a/flink-tests/src/test/java/org/apache/flink/test/recovery/AbstractJobManagerProcessFailureRecoveryITCase.java
 
b/flink-tests/src/test/java/org/apache/flink/test/recovery/AbstractJobManagerProcessFailureRecoveryITCase.java
index 2f6b762..6122352 100644
--- 
a/flink-tests/src/test/java/org/apache/flink/test/recovery/AbstractJobManagerProcessFailureRecoveryITCase.java
+++ 
b/flink-tests/src/test/java/org/apache/flink/test/recovery/AbstractJobManagerProcessFailureRecoveryITCase.java
@@ -246,8 +246,10 @@ public abstract class 
AbstractJobManagerProcessFailureRecoveryITCase extends Tes
fail("The program encountered a " + 
error.getClass().getSimpleName() + " : " + error.getMessage());
 

[1/6] flink git commit: [FLINK-3341] Make 'auto.offset.reset' compatible with Kafka 0.8 and 0.9

2016-02-10 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 8ccd7544e -> 6968a57a1


[FLINK-3341] Make 'auto.offset.reset' compatible with Kafka 0.8 and 0.9

This closes #1597


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/9173825a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/9173825a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/9173825a

Branch: refs/heads/master
Commit: 9173825aa6a1525d72a78cda16cb4ae1e9b8a8e4
Parents: 8ccd754
Author: Robert Metzger 
Authored: Sat Feb 6 13:27:06 2016 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 10 15:12:34 2016 +0100

--
 .../connectors/kafka/FlinkKafkaConsumer08.java  |  2 +-
 .../kafka/internals/LegacyFetcher.java  |  3 ++-
 .../connectors/kafka/Kafka08ITCase.java | 22 ++--
 .../kafka/KafkaTestEnvironmentImpl.java | 20 +++---
 .../kafka/KafkaTestEnvironmentImpl.java | 22 +---
 .../connectors/kafka/KafkaConsumerTestBase.java |  5 ++---
 .../connectors/kafka/KafkaTestBase.java |  2 --
 .../connectors/kafka/KafkaTestEnvironment.java  |  3 ---
 .../flink/yarn/YARNSessionFIFOITCase.java   |  1 +
 9 files changed, 25 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/9173825a/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
--
diff --git 
a/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
 
b/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
index bdea37f..1cdfffe 100644
--- 
a/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
+++ 
b/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumer08.java
@@ -70,7 +70,7 @@ import static 
com.google.common.base.Preconditions.checkNotNull;
  * socket.timeout.ms
  * socket.receive.buffer.bytes
  * fetch.message.max.bytes
- * auto.offset.reset with the values "latest", "earliest" 
(unlike 0.8.2 behavior)
+ * auto.offset.reset with the values "largest", "smallest"
  * fetch.wait.max.ms
  * 
  * 

http://git-wip-us.apache.org/repos/asf/flink/blob/9173825a/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/LegacyFetcher.java
--
diff --git 
a/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/LegacyFetcher.java
 
b/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/LegacyFetcher.java
index fe7f777..10f4c41 100644
--- 
a/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/LegacyFetcher.java
+++ 
b/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/LegacyFetcher.java
@@ -576,7 +576,8 @@ public class LegacyFetcher implements Fetcher {
 
private static long getInvalidOffsetBehavior(Properties config) 
{
long timeType;
-   if 
(config.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, 
"latest").equals("latest")) {
+   String val = 
config.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "largest");
+   if (val.equals("largest") || val.equals("latest")) { // 
largest is kafka 0.8, latest is kafka 0.9
timeType = OffsetRequest.LatestTime();
} else {
timeType = OffsetRequest.EarliestTime();

http://git-wip-us.apache.org/repos/asf/flink/blob/9173825a/flink-streaming-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ITCase.java
--
diff --git 
a/flink-streaming-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ITCase.java
 
b/flink-streaming-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink/streaming/connectors/kafka/Kafka08ITCase.java
index 6a2fa27..a3e815e 100644
--- 
a/flink-streaming-connectors/flink-connector-kafka-0.8/src/test/java/org/apache/flink

[6/6] flink git commit: [FLINK-3260] [runtime] Enforce terminal state of Executions

2016-02-10 Thread sewen
[FLINK-3260] [runtime] Enforce terminal state of Executions

This commit fixes the problem that Executions could leave their terminal state
FINISHED to transition to FAILED. Such a transition will be propagated to the
ExecutionGraph where it entails JobStatus changes. Since the Execution already
reached a terminal state, it should not again affect the ExecutionGraph. This
can lead to an inconsistent state in case of a restart where the old Executions
get disassociated from the ExecutionGraph.

This closes #1613


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/6968a57a
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/6968a57a
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/6968a57a

Branch: refs/heads/master
Commit: 6968a57a1a31a11b33bacd2c94d6559bcabd6eb9
Parents: 48b7454
Author: Till Rohrmann 
Authored: Tue Feb 9 10:30:12 2016 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 10 15:34:37 2016 +0100

--
 .../flink/runtime/executiongraph/Execution.java |  14 +-
 .../ExecutionGraphRestartTest.java  |  90 +
 .../runtime/testingUtils/TestingCluster.scala   |   6 +-
 .../testingUtils/TestingTaskManagerLike.scala   |   4 +-
 .../runtime/testingUtils/TestingUtils.scala | 133 ++-
 5 files changed, 233 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/6968a57a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
index eb2e68c..db037bb 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
@@ -435,7 +435,7 @@ public class Execution implements Serializable {
return;
}
else if (current == CREATED || current == SCHEDULED) {
-   // from here, we can directly switch to 
cancelled, because the no task has been deployed
+   // from here, we can directly switch to 
cancelled, because no task has been deployed
if (transitionState(current, CANCELED)) {

// we skip the canceling state. set the 
timestamp, for a consistent appearance
@@ -754,11 +754,10 @@ public class Execution implements Serializable {
return false;
}
 
-   if (current == CANCELED) {
-   // we are already aborting or are already 
aborted
+   if (current == CANCELED || current == FINISHED) {
+   // we are already aborting or are already 
aborted or we are already finished
if (LOG.isDebugEnabled()) {
-   LOG.debug(String.format("Ignoring 
transition of vertex %s to %s while being %s", 
-   getVertexWithAttempt(), 
FAILED, CANCELED));
+   LOG.debug("Ignoring transition of 
vertex {} to {} while being {}.", getVertexWithAttempt(), FAILED, current);
}
return false;
}
@@ -928,6 +927,11 @@ public class Execution implements Serializable {
}
 
private boolean transitionState(ExecutionState currentState, 
ExecutionState targetState, Throwable error) {
+   // sanity check
+   if (currentState.isTerminal()) {
+   throw new IllegalStateException("Cannot leave terminal 
state " + currentState + " to transition to " + targetState + ".");
+   }
+
if (STATE_UPDATER.compareAndSet(this, currentState, 
targetState)) {
markTimestamp(targetState);
 

http://git-wip-us.apache.org/repos/asf/flink/blob/6968a57a/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphRestartTest.java
--
diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphRestartTest.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphRestartTest.java
index 0c3af8f..47a48a2 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphRestartTest.java
+++ 
b/flink-r

flink git commit: [FLINK-3107] [runtime] Start checkpoint ID counter with periodic scheduler

2016-02-10 Thread uce
Repository: flink
Updated Branches:
  refs/heads/master 6968a57a1 -> 8df0bbacb


[FLINK-3107] [runtime] Start checkpoint ID counter with periodic scheduler

Problem: The job manager enables checkpoints during submission of streaming
programs. This can lead to call to a call to 
`ZooKeeperCheckpointIDCounter.start()`,
which communicates with ZooKeeper. This can block the job manager actor.

Solution: Start the counter in the `CheckpointCoordinatorDeActivator`.

This closes #1610.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/8df0bbac
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/8df0bbac
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/8df0bbac

Branch: refs/heads/master
Commit: 8df0bbacb8712342471c12cbf765a0a92b70abc9
Parents: 6968a57
Author: Ufuk Celebi 
Authored: Tue Feb 9 16:06:46 2016 +0100
Committer: Ufuk Celebi 
Committed: Wed Feb 10 19:51:59 2016 +0100

--
 .../flink/runtime/checkpoint/CheckpointCoordinator.java | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/8df0bbac/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
--
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
index 9963a20..b0e23d6 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
@@ -197,9 +197,9 @@ public class CheckpointCoordinator {
this.completedCheckpointStore = 
checkNotNull(completedCheckpointStore);
this.recentPendingCheckpoints = new 
ArrayDeque(NUM_GHOST_CHECKPOINT_IDS);
this.userClassLoader = userClassLoader;
-   this.checkpointIdCounter = checkNotNull(checkpointIDCounter);
 
-   checkpointIDCounter.start();
+   // Started with the periodic scheduler
+   this.checkpointIdCounter = checkNotNull(checkpointIDCounter);
 
this.timer = new Timer("Checkpoint Timer", true);
 
@@ -862,6 +862,14 @@ public class CheckpointCoordinator {
// make sure all prior timers are cancelled
stopCheckpointScheduler();
 
+   try {
+   // Multiple start calls are OK
+   checkpointIdCounter.start();
+   } catch (Exception e) {
+   String msg = "Failed to start checkpoint ID 
counter: " + e.getMessage();
+   throw new RuntimeException(msg, e);
+   }
+
periodicScheduling = true;
currentPeriodicTrigger = new ScheduledTrigger();
timer.scheduleAtFixedRate(currentPeriodicTrigger, 
baseInterval, baseInterval);



[1/2] flink git commit: [FLINK-3371] [api-breaking] Move TriggerResult and TriggerContext to dedicated classes

2016-02-10 Thread sewen
Repository: flink
Updated Branches:
  refs/heads/master 8df0bbacb -> fd324ea72


[FLINK-3371] [api-breaking] Move TriggerResult and TriggerContext to dedicated 
classes

This closes #1603


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/50bd65a5
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/50bd65a5
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/50bd65a5

Branch: refs/heads/master
Commit: 50bd65a574776817a03dd32fd438cb2327447109
Parents: 8df0bba
Author: Stephan Ewen 
Authored: Sun Feb 7 21:46:16 2016 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 10 22:15:31 2016 +0100

--
 .../examples/windowing/SessionWindowing.java|   3 +-
 .../api/windowing/assigners/GlobalWindows.java  |  10 +-
 .../triggers/ContinuousEventTimeTrigger.java|   7 +-
 .../ContinuousProcessingTimeTrigger.java|   2 +-
 .../api/windowing/triggers/CountTrigger.java|   2 +-
 .../api/windowing/triggers/DeltaTrigger.java|   7 +-
 .../windowing/triggers/EventTimeTrigger.java|   5 +-
 .../triggers/ProcessingTimeTrigger.java |   5 +-
 .../api/windowing/triggers/PurgingTrigger.java  |   4 +-
 .../api/windowing/triggers/Trigger.java | 102 +--
 .../api/windowing/triggers/TriggerResult.java   |  96 +
 .../windowing/EvictingWindowOperator.java   |   5 +-
 .../windowing/NonKeyedWindowOperator.java   |  34 ---
 .../operators/windowing/WindowOperator.java |  18 ++--
 14 files changed, 179 insertions(+), 121 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/50bd65a5/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/windowing/SessionWindowing.java
--
diff --git 
a/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/windowing/SessionWindowing.java
 
b/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/windowing/SessionWindowing.java
index bd82800..e2df160 100644
--- 
a/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/windowing/SessionWindowing.java
+++ 
b/flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/windowing/SessionWindowing.java
@@ -27,6 +27,7 @@ import 
org.apache.flink.streaming.api.functions.source.EventTimeSourceFunction;
 import org.apache.flink.streaming.api.watermark.Watermark;
 import org.apache.flink.streaming.api.windowing.assigners.GlobalWindows;
 import org.apache.flink.streaming.api.windowing.triggers.Trigger;
+import org.apache.flink.streaming.api.windowing.triggers.TriggerResult;
 import org.apache.flink.streaming.api.windowing.windows.GlobalWindow;
 
 import java.util.ArrayList;
@@ -95,7 +96,7 @@ public class SessionWindowing {
env.execute();
}
 
-   private static class SessionTrigger implements Trigger, GlobalWindow> {
+   private static class SessionTrigger extends Trigger, GlobalWindow> {
 
private static final long serialVersionUID = 1L;
 

http://git-wip-us.apache.org/repos/asf/flink/blob/50bd65a5/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/windowing/assigners/GlobalWindows.java
--
diff --git 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/windowing/assigners/GlobalWindows.java
 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/windowing/assigners/GlobalWindows.java
index d3eb2ac..a4d92cf 100644
--- 
a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/windowing/assigners/GlobalWindows.java
+++ 
b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/windowing/assigners/GlobalWindows.java
@@ -21,6 +21,7 @@ import org.apache.flink.api.common.ExecutionConfig;
 import org.apache.flink.api.common.typeutils.TypeSerializer;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.streaming.api.windowing.triggers.Trigger;
+import org.apache.flink.streaming.api.windowing.triggers.TriggerResult;
 import org.apache.flink.streaming.api.windowing.windows.GlobalWindow;
 
 import java.util.Collection;
@@ -67,15 +68,12 @@ public class GlobalWindows extends WindowAssigner {
/**
 * A trigger that never fires, as default Trigger for GlobalWindows.
 */
-   private static class NeverTrigger implements Trigger {
+   private static class NeverTrigger extends Trigger 
{
private static final long serialVersionUID = 1L;
 
@Override
-   public TriggerResult onElement(Object element,
-   long timestamp,
-

[2/2] flink git commit: [FLINK-3384] [kafka] Add ClosableQueue for message exchanges between Kafka Threads

2016-02-10 Thread sewen
[FLINK-3384] [kafka] Add ClosableQueue for message exchanges between Kafka 
Threads


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/fd324ea7
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/fd324ea7
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/fd324ea7

Branch: refs/heads/master
Commit: fd324ea72979cc3d4202ffa3ea174ec4cc9d153b
Parents: 50bd65a
Author: Stephan Ewen 
Authored: Wed Feb 10 14:51:10 2016 +0100
Committer: Stephan Ewen 
Committed: Wed Feb 10 22:15:32 2016 +0100

--
 .../kafka/internals/ClosableBlockingQueue.java  | 502 +++
 .../internals/ClosableBlockingQueueTest.java| 603 +++
 2 files changed, 1105 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/fd324ea7/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
--
diff --git 
a/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
 
b/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
new file mode 100644
index 000..856c2ad
--- /dev/null
+++ 
b/flink-streaming-connectors/flink-connector-kafka-0.8/src/main/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueue.java
@@ -0,0 +1,502 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kafka.internals;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * A special form of blocking queue with two additions:
+ * 
+ * The queue can be closed atomically when empty. Adding elements 
after the queue
+ * is closed fails. This allows queue consumers to atomically discover 
that no elements
+ * are available and mark themselves as shut down.
+ * The queue allows to poll batches of elements in one polling 
call.
+ * 
+ * 
+ * The queue has no capacity restriction and is safe for multiple producers 
and consumers.
+ * 
+ * Note: Null elements are prohibited.
+ * 
+ * @param  The type of elements in the queue.
+ */
+public class ClosableBlockingQueue {
+
+   /** The lock used to make queue accesses and open checks atomic */
+   private final ReentrantLock lock;
+   
+   /** The condition on which blocking get-calls wait if the queue is 
empty */
+   private final Condition nonEmpty;
+   
+   /** The deque of elements */
+   private final ArrayDeque elements;
+   
+   /** Flag marking the status of the queue */
+   private volatile boolean open;
+   
+   // 

+
+   /**
+* Creates a new empty queue.
+*/
+   public ClosableBlockingQueue() {
+   this(10);
+   }
+
+   /**
+* Creates a new empty queue, reserving space for at least the 
specified number
+* of elements. The queu can still grow, of more elements are added 
than the
+* reserved space.
+* 
+* @param initialSize The number of elements to reserve space for.
+*/
+   public ClosableBlockingQueue(int initialSize) {
+   this.lock = new ReentrantLock(true);
+   this.nonEmpty = this.lock.newCondition();
+   
+   this.elements = new ArrayDeque<>(initialSize);
+   this.open = true;
+   
+   
+   }
+
+   /**
+* Creates a new queue that contains the given elements.
+  

svn commit: r12334 - /release/flink/flink-0.10.2/

2016-02-10 Thread uce
Author: uce
Date: Wed Feb 10 22:30:50 2016
New Revision: 12334

Log:
Add Flink 0.10.2 release files

Added:
release/flink/flink-0.10.2/
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz   (with props)
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.asc
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.md5
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.sha
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop2.tgz   (with props)
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop2.tgz.asc
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop2.tgz.md5
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop2.tgz.sha
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop24.tgz   (with props)
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop24.tgz.asc
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop24.tgz.md5
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop24.tgz.sha
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop26.tgz   (with props)
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop26.tgz.asc
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop26.tgz.md5
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop26.tgz.sha
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop27.tgz   (with props)
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop27.tgz.asc
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop27.tgz.md5
release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop27.tgz.sha
release/flink/flink-0.10.2/flink-0.10.2-src.tgz   (with props)
release/flink/flink-0.10.2/flink-0.10.2-src.tgz.asc
release/flink/flink-0.10.2/flink-0.10.2-src.tgz.md5
release/flink/flink-0.10.2/flink-0.10.2-src.tgz.sha

Added: release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz
==
Binary file - no diff available.

Propchange: release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz
--
svn:mime-type = application/octet-stream

Added: release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.asc
==
--- release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.asc (added)
+++ release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.asc Wed Feb 10 
22:30:50 2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABAgAGBQJWuKu/AAoJEN4+D0ydQDMJEUcQALLjoLKcxedEhasdHAda56DS
+wRSt7G3j26OsxUYxcAnWtJ4JCGYbALJ6RMjNundFVZpUcxH0qe2Y/sy8up/Ks5v+
+Ws2KiYbnWtoIDSXkYSL3iR2/aRRw342I6aJa2tf40+8pPMUZdC2Z1rahgxqhMoKH
+Ol+DEdtg2OVk5BWYQ9/IE2sl6wX1xnHtDpGEghqGGqZUQqkUdee8aK2QjG3Tlaqi
+WA3VkDXzbZ6FIcwKoJFcJMiniCEjxjVp9xIFU5XyeHxu7Tp/iu6ZQhROVqfWVqTi
+Wb8M6s8ojanTFOGQyKHLLC3kxMJ3v2DK2JNEG+/K1dtCfcSd//PrKhFsv1Qiuud4
+FbCox2WsXxc0uZHqgs2QWwCAnJZ4UP63lsevvK2l2yB5c3fyqIQ+AFqeuklMsaKS
+f1GXQ1pjvM24Ya9DpvObsZ2tOY2LHGDvuFx5M3dRSAi+cWM3fh58/6z+nK0EhjqJ
+izzfBl0c5LQGapsqObknMnSW6N1QUBMWwlv9664zYftCkIAeZrLIHytjzoIVUT5d
+0kdm03ULdA0j3XTqIY6VlQEgedgNIjRCAmmfKBi/EAXpOqnNnTgkwAqdVzSZ6egO
+afVSErpk+8sQpLXjfua26MhyxzjsjuJ8ukWQNumTCuUybAEJbFABk+uMak0JYaI1
+xqd0TlKOxZpM3mIZS683
+=ztao
+-END PGP SIGNATURE-

Added: release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.md5
==
--- release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.md5 (added)
+++ release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.md5 Wed Feb 10 
22:30:50 2016
@@ -0,0 +1 @@
+854f274118664dc6d4becb5474a245d8  flink-0.10.2-bin-hadoop1.tgz

Added: release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.sha
==
--- release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.sha (added)
+++ release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop1.tgz.sha Wed Feb 10 
22:30:50 2016
@@ -0,0 +1 @@
+71b9577d7f5a2cafec6da91b7bd74707ae3651483dd799ad0a77787414fcc0241fc52ce4ce299e36e310332ed8c50a8bfe4cc034f66522c286b059af550a76d8
  flink-0.10.2-bin-hadoop1.tgz

Added: release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop2.tgz
==
Binary file - no diff available.

Propchange: release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop2.tgz
--
svn:mime-type = application/octet-stream

Added: release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop2.tgz.asc
==
--- release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop2.tgz.asc (added)
+++ release/flink/flink-0.10.2/flink-0.10.2-bin-hadoop2.tgz.asc Wed Feb 10 
22:30:50 2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABAgAGBQJWuK4UAAoJEN4+D0ydQDMJNHAP/0hCU25eeDDSMA3n5ve3gZmd
+rOoRs8z1TpocbphYmdmGEgz0TSOaqz5N+Xkd/TLwhLlzOEBc1lE8Ggbf4vUeU