Repository: flink
Updated Branches:
  refs/heads/master 24817c8cd -> 427de663c


[hotfix] Minor edits to comments in code


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/427de663
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/427de663
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/427de663

Branch: refs/heads/master
Commit: 427de663ce3d27c02cdeeef8375d1d04bfc67a4c
Parents: 24817c8
Author: Greg Hogan <c...@greghogan.com>
Authored: Tue Apr 12 10:51:36 2016 -0400
Committer: Greg Hogan <c...@greghogan.com>
Committed: Tue Apr 12 10:51:36 2016 -0400

----------------------------------------------------------------------
 .travis.yml                                                   | 2 +-
 .../src/main/java/org/apache/flink/api/common/Plan.java       | 2 +-
 .../flink/api/java/typeutils/runtime/TupleSerializerBase.java | 2 +-
 flink-dist/src/main/flink-bin/bin/config.sh                   | 2 +-
 .../main/java/org/apache/flink/graph/EdgeJoinFunction.java    | 1 +
 .../src/main/java/org/apache/flink/optimizer/Optimizer.java   | 4 ++--
 .../main/java/org/apache/flink/optimizer/dag/FilterNode.java  | 2 +-
 .../runtime/io/disk/iomanager/ChannelWriterOutputView.java    | 4 ++--
 .../apache/flink/runtime/operators/MapPartitionDriver.java    | 2 +-
 .../apache/flink/runtime/operators/hash/MutableHashTable.java | 7 ++++---
 .../flink/runtime/operators/sort/NormalizedKeySorter.java     | 2 +-
 11 files changed, 16 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/.travis.yml
----------------------------------------------------------------------
diff --git a/.travis.yml b/.travis.yml
index 0810fb7..f6fdb78 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -13,7 +13,7 @@ language: java
 
 #See https://issues.apache.org/jira/browse/FLINK-1072
 # NOTE: When changing the build matrix, please also check with the 
tools/deploy_to_maven.sh file
-# The file assumes a certain build order for the maven / nigthly build 
deployments.
+# The file assumes a certain build order for the maven / nightly build 
deployments.
 matrix:
   include:
     - jdk: "oraclejdk8"

http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
----------------------------------------------------------------------
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/Plan.java 
b/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
index 3e5cb61..03ada8a 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/Plan.java
@@ -288,7 +288,7 @@ public class Plan implements Visitable<Operator<?>> {
         */
        public void setDefaultParallelism(int defaultParallelism) {
                checkArgument(defaultParallelism >= 1 || defaultParallelism == 
-1,
-                       "The default parallelism must be positive, or -1 if the 
system should use the globally comfigured default.");
+                       "The default parallelism must be positive, or -1 if the 
system should use the globally configured default.");
                
                this.defaultParallelism = defaultParallelism;
        }

http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/TupleSerializerBase.java
----------------------------------------------------------------------
diff --git 
a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/TupleSerializerBase.java
 
b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/TupleSerializerBase.java
index 8b1d8ca..5b5d462 100644
--- 
a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/TupleSerializerBase.java
+++ 
b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/TupleSerializerBase.java
@@ -78,7 +78,7 @@ public abstract class TupleSerializerBase<T> extends 
TypeSerializer<T> {
        }
 
        // We use this in the Aggregate and Distinct Operators to create 
instances
-       // of immutable Typles (i.e. Scala Tuples)
+       // of immutable Tuples (i.e. Scala Tuples)
        public abstract T createInstance(Object[] fields);
 
        public abstract T createOrReuseInstance(Object[] fields, T reuse);

http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/flink-dist/src/main/flink-bin/bin/config.sh
----------------------------------------------------------------------
diff --git a/flink-dist/src/main/flink-bin/bin/config.sh 
b/flink-dist/src/main/flink-bin/bin/config.sh
index 43619ce..ffbec07 100755
--- a/flink-dist/src/main/flink-bin/bin/config.sh
+++ b/flink-dist/src/main/flink-bin/bin/config.sh
@@ -271,7 +271,7 @@ if [ -n "${HBASE_CONF_DIR}" ]; then
 fi
 
 # Auxilliary function which extracts the name of host from a line which
-# also potentialy includes topology information and the taskManager type
+# also potentially includes topology information and the taskManager type
 extractHostName() {
     # handle comments: extract first part of string (before first # character)
     SLAVE=`echo $1 | cut -d'#' -f 1`

http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/EdgeJoinFunction.java
----------------------------------------------------------------------
diff --git 
a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/EdgeJoinFunction.java
 
b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/EdgeJoinFunction.java
index 68d6e53..698b3b6 100644
--- 
a/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/EdgeJoinFunction.java
+++ 
b/flink-libraries/flink-gelly/src/main/java/org/apache/flink/graph/EdgeJoinFunction.java
@@ -21,6 +21,7 @@ package org.apache.flink.graph;
 import java.io.Serializable;
 
 import org.apache.flink.api.common.functions.Function;
+import org.apache.flink.api.java.DataSet;
 
 /**
  * Interface to be implemented by the transformation function

http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/flink-optimizer/src/main/java/org/apache/flink/optimizer/Optimizer.java
----------------------------------------------------------------------
diff --git 
a/flink-optimizer/src/main/java/org/apache/flink/optimizer/Optimizer.java 
b/flink-optimizer/src/main/java/org/apache/flink/optimizer/Optimizer.java
index d668cbe..f73abe2 100644
--- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/Optimizer.java
+++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/Optimizer.java
@@ -473,12 +473,12 @@ public class Optimizer {
                }
 
                // now that we have all nodes created and recorded which ones 
consume memory, tell the nodes their minimal
-               // guaranteed memory, for further cost estimations. we assume 
an equal distribution of memory among consumer tasks
+               // guaranteed memory, for further cost estimations. We assume 
an equal distribution of memory among consumer tasks
                rootNode.accept(new IdAndEstimatesVisitor(this.statistics));
 
                // We are dealing with operator DAGs, rather than operator 
trees.
                // That requires us to deviate at some points from the 
classical DB optimizer algorithms.
-               // This step build some auxiliary structures to help track 
branches and joins in the DAG
+               // This step builds auxiliary structures to help track branches 
and joins in the DAG
                BranchesVisitor branchingVisitor = new BranchesVisitor();
                rootNode.accept(branchingVisitor);
 

http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/FilterNode.java
----------------------------------------------------------------------
diff --git 
a/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/FilterNode.java 
b/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/FilterNode.java
index cb4e7bd..89bd337 100644
--- 
a/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/FilterNode.java
+++ 
b/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/FilterNode.java
@@ -29,7 +29,7 @@ import org.apache.flink.optimizer.operators.FilterDescriptor;
 import org.apache.flink.optimizer.operators.OperatorDescriptorSingle;
 
 /**
- * The optimizer's internal representation of a <i>FlatMap</i> operator node.
+ * The optimizer's internal representation of a <i>Filter</i> operator node.
  */
 public class FilterNode extends SingleInputNode {
        

http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/ChannelWriterOutputView.java
----------------------------------------------------------------------
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/ChannelWriterOutputView.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/ChannelWriterOutputView.java
index 64b2ebc..b469cc1 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/ChannelWriterOutputView.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/ChannelWriterOutputView.java
@@ -29,7 +29,7 @@ import 
org.apache.flink.runtime.memory.AbstractPagedOutputView;
 /**
  * A {@link org.apache.flink.core.memory.DataOutputView} that is backed by a
  * {@link BlockChannelWriter}, making it effectively a data output
- * stream. The view writes it data in blocks to the underlying channel, adding 
a minimal header to each block.
+ * stream. The view writes its data in blocks to the underlying channel, 
adding a minimal header to each block.
  * The data can be re-read by a {@link ChannelReaderInputView}, if it uses the 
same block size.
  */
 public final class ChannelWriterOutputView extends AbstractPagedOutputView {
@@ -116,7 +116,7 @@ public final class ChannelWriterOutputView extends 
AbstractPagedOutputView {
        
        /**
         * Creates an new ChannelWriterOutputView that writes to the given 
channel. It uses only a single
-        * memory segment for the buffering, which it takes from the writers 
return queue.
+        * memory segment for the buffering, which it takes from the writer's 
return queue.
         * Note that this variant locks if no buffers are contained
         * in the return queue.
         * 

http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/flink-runtime/src/main/java/org/apache/flink/runtime/operators/MapPartitionDriver.java
----------------------------------------------------------------------
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/MapPartitionDriver.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/MapPartitionDriver.java
index 8792ef1..8f245f0 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/MapPartitionDriver.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/MapPartitionDriver.java
@@ -55,7 +55,7 @@ public class MapPartitionDriver<IT, OT> implements 
Driver<MapPartitionFunction<I
        }
 
        @Override
-               public int getNumberOfInputs() {
+       public int getNumberOfInputs() {
                return 1;
        }
 

http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
----------------------------------------------------------------------
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
index 7d1fc11..1495ee1 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
@@ -1749,10 +1749,11 @@ public class MutableHashTable<BT, PT> implements 
MemorySegmentSource {
                }
                
                public BT next(BT reuse) {
-                       // search unprobed record in bucket, while found none, 
move to next bucket and search.
+                       // search unprobed record in bucket, while none found 
move to next bucket and search.
                        while (true) {
                                BT result = nextInBucket(reuse);
                                if (result == null) {
+                                       // return null while there are no more 
buckets.
                                        if (!moveToNextOnHeapBucket()) {
                                                return null;
                                        }
@@ -1763,11 +1764,11 @@ public class MutableHashTable<BT, PT> implements 
MemorySegmentSource {
                }
                
                public BT next() {
-                       // search unProbed record in bucket, while found none, 
move to next bucket and search.
+                       // search unprobed record in bucket, while none found 
move to next bucket and search.
                        while (true) {
                                BT result = nextInBucket();
                                if (result == null) {
-                                       // return null while there is no more 
bucket.
+                                       // return null while there are no more 
buckets.
                                        if (!moveToNextOnHeapBucket()) {
                                                return null;
                                        }

http://git-wip-us.apache.org/repos/asf/flink/blob/427de663/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/NormalizedKeySorter.java
----------------------------------------------------------------------
diff --git 
a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/NormalizedKeySorter.java
 
b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/NormalizedKeySorter.java
index 9e1882c..2cade8d 100644
--- 
a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/NormalizedKeySorter.java
+++ 
b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/NormalizedKeySorter.java
@@ -361,7 +361,7 @@ public final class NormalizedKeySorter<T> implements 
InMemorySorter<T> {
                }
                
                final long pointerI = segI.getLong(segmentOffsetI) & 
POINTER_MASK;
-               final long pointerJ = segJ.getLong(segmentOffsetJ)  & 
POINTER_MASK;
+               final long pointerJ = segJ.getLong(segmentOffsetJ) & 
POINTER_MASK;
                
                return compareRecords(pointerI, pointerJ);
        }

Reply via email to