Repository: spark
Updated Branches:
  refs/heads/master 9023015f0 -> 3f749f7ed


[SPARK-14355][BUILD] Fix typos in Exception/Testcase/Comments and static 
analysis results

## What changes were proposed in this pull request?

This PR contains the following 5 types of maintenance fix over 59 files (+94 
lines, -93 lines).
- Fix typos(exception/log strings, testcase name, comments) in 44 lines.
- Fix lint-java errors (MaxLineLength) in 6 lines. (New codes after SPARK-14011)
- Use diamond operators in 40 lines. (New codes after SPARK-13702)
- Fix redundant semicolon in 5 lines.
- Rename class `InferSchemaSuite` to `CSVInferSchemaSuite` in 
CSVInferSchemaSuite.scala.

## How was this patch tested?

Manual and pass the Jenkins tests.

Author: Dongjoon Hyun <dongj...@apache.org>

Closes #12139 from dongjoon-hyun/SPARK-14355.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/3f749f7e
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/3f749f7e
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/3f749f7e

Branch: refs/heads/master
Commit: 3f749f7ed443899d667c9e2b2a11bc595d6fc7f6
Parents: 9023015
Author: Dongjoon Hyun <dongj...@apache.org>
Authored: Sun Apr 3 18:14:16 2016 -0700
Committer: Reynold Xin <r...@databricks.com>
Committed: Sun Apr 3 18:14:16 2016 -0700

----------------------------------------------------------------------
 .../spark/network/client/TransportClientFactory.java  |  2 +-
 .../network/client/TransportResponseHandler.java      |  6 +++---
 .../spark/network/server/OneForOneStreamManager.java  |  2 +-
 .../spark/network/sasl/ShuffleSecretManager.java      |  2 +-
 .../unsafe/sort/UnsafeSorterSpillMerger.java          |  2 +-
 .../main/scala/org/apache/spark/api/r/RRunner.scala   |  2 +-
 .../org/apache/spark/util/random/RandomSampler.scala  |  2 +-
 .../spark/shuffle/sort/UnsafeShuffleWriterSuite.java  |  4 ++--
 .../java/org/apache/spark/examples/JavaLogQuery.java  |  4 ++--
 .../JavaMultiLabelClassificationMetricsExample.java   | 14 +++++++-------
 .../mllib/JavaPowerIterationClusteringExample.java    | 10 +++++-----
 .../examples/mllib/JavaStratifiedSamplingExample.java |  2 +-
 .../spark/examples/streaming/JavaFlumeEventCount.java |  4 ++--
 .../spark/streaming/flume/JavaFlumeStreamSuite.java   | 11 ++++++-----
 .../apache/spark/launcher/CommandBuilderUtils.java    |  2 +-
 .../java/org/apache/spark/launcher/SparkLauncher.java |  2 +-
 .../apache/spark/launcher/LauncherServerSuite.java    |  2 +-
 .../launcher/SparkSubmitCommandBuilderSuite.java      |  2 +-
 .../ml/classification/DecisionTreeClassifier.scala    |  2 +-
 .../spark/ml/param/shared/SharedParamsCodeGen.scala   |  2 +-
 .../apache/spark/ml/param/shared/sharedParams.scala   |  4 ++--
 .../spark/ml/regression/DecisionTreeRegressor.scala   |  2 +-
 .../scala/org/apache/spark/ml/tree/treeModels.scala   |  2 +-
 .../mllib/classification/LogisticRegression.scala     |  2 +-
 .../org/apache/spark/ml/param/JavaTestParams.java     |  2 +-
 .../JavaStreamingLogisticRegressionSuite.java         |  4 ++--
 .../mllib/clustering/JavaStreamingKMeansSuite.java    |  4 ++--
 .../apache/spark/mllib/linalg/JavaVectorsSuite.java   |  4 ++--
 .../JavaStreamingLinearRegressionSuite.java           |  4 ++--
 .../spark/ml/regression/LinearRegressionSuite.scala   |  2 +-
 .../spark/sql/execution/UnsafeExternalRowSorter.java  |  2 +-
 .../org/apache/spark/sql/catalyst/CatalystConf.scala  |  2 +-
 .../spark/sql/catalyst/expressions/Expression.scala   |  4 ++--
 .../catalyst/expressions/codegen/CodeGenerator.scala  |  2 +-
 .../catalyst/expressions/conditionalExpressions.scala |  2 +-
 .../apache/spark/sql/catalyst/parser/AstBuilder.scala |  4 ++--
 .../src/test/scala/org/apache/spark/sql/RowTest.scala |  2 +-
 .../spark/sql/execution/UnsafeKVExternalSorter.java   |  2 +-
 .../spark/sql/execution/vectorized/ColumnarBatch.java |  4 ++--
 .../sql/execution/vectorized/OnHeapColumnVector.java  |  2 +-
 .../scala/org/apache/spark/sql/ContinuousQuery.scala  |  2 +-
 .../src/main/scala/org/apache/spark/sql/Dataset.scala |  2 +-
 .../apache/spark/sql/execution/SparkStrategies.scala  |  2 +-
 .../scala/org/apache/spark/sql/execution/Window.scala |  2 +-
 .../apache/spark/sql/execution/basicOperators.scala   |  2 +-
 .../spark/sql/execution/columnar/ColumnBuilder.scala  |  2 +-
 .../spark/sql/execution/stat/StatFunctions.scala      |  2 +-
 .../sql/execution/streaming/FileStreamSink.scala      |  2 +-
 .../state/HDFSBackedStateStoreProvider.scala          |  2 +-
 .../sql/execution/streaming/state/StateStore.scala    |  2 +-
 .../org/apache/spark/sql/jdbc/JdbcDialects.scala      |  2 +-
 .../test/org/apache/spark/sql/JavaDatasetSuite.java   | 10 +++++-----
 .../test/scala/org/apache/spark/sql/QueryTest.scala   |  4 ++--
 .../datasources/csv/CSVInferSchemaSuite.scala         |  2 +-
 .../datasources/parquet/ParquetIOSuite.scala          |  2 +-
 .../apache/spark/sql/streaming/FileStressSuite.scala  |  2 +-
 .../org/apache/spark/sql/hive/test/TestHive.scala     |  4 ++--
 .../spark/sql/hive/execution/HiveComparisonTest.scala |  2 +-
 .../org/apache/spark/sql/hive/parquetSuites.scala     |  4 ++--
 59 files changed, 94 insertions(+), 93 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
----------------------------------------------------------------------
diff --git 
a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
 
b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
index 5a36e18..b5a9d66 100644
--- 
a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
+++ 
b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
@@ -94,7 +94,7 @@ public class TransportClientFactory implements Closeable {
     this.context = Preconditions.checkNotNull(context);
     this.conf = context.getConf();
     this.clientBootstraps = 
Lists.newArrayList(Preconditions.checkNotNull(clientBootstraps));
-    this.connectionPool = new ConcurrentHashMap<SocketAddress, ClientPool>();
+    this.connectionPool = new ConcurrentHashMap<>();
     this.numConnectionsPerPeer = conf.numConnectionsPerPeer();
     this.rand = new Random();
 

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java
----------------------------------------------------------------------
diff --git 
a/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java
 
b/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java
index f0e2004..8a69223 100644
--- 
a/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java
+++ 
b/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java
@@ -64,9 +64,9 @@ public class TransportResponseHandler extends 
MessageHandler<ResponseMessage> {
 
   public TransportResponseHandler(Channel channel) {
     this.channel = channel;
-    this.outstandingFetches = new ConcurrentHashMap<StreamChunkId, 
ChunkReceivedCallback>();
-    this.outstandingRpcs = new ConcurrentHashMap<Long, RpcResponseCallback>();
-    this.streamCallbacks = new ConcurrentLinkedQueue<StreamCallback>();
+    this.outstandingFetches = new ConcurrentHashMap<>();
+    this.outstandingRpcs = new ConcurrentHashMap<>();
+    this.streamCallbacks = new ConcurrentLinkedQueue<>();
     this.timeOfLastRequestNs = new AtomicLong(0);
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
----------------------------------------------------------------------
diff --git 
a/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
 
b/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
index e2222ae..ae7e520 100644
--- 
a/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
+++ 
b/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
@@ -63,7 +63,7 @@ public class OneForOneStreamManager extends StreamManager {
     // For debugging purposes, start with a random stream id to help 
identifying different streams.
     // This does not need to be globally unique, only unique to this class.
     nextStreamId = new AtomicLong((long) new 
Random().nextInt(Integer.MAX_VALUE) * 1000);
-    streams = new ConcurrentHashMap<Long, StreamState>();
+    streams = new ConcurrentHashMap<>();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/common/network-shuffle/src/main/java/org/apache/spark/network/sasl/ShuffleSecretManager.java
----------------------------------------------------------------------
diff --git 
a/common/network-shuffle/src/main/java/org/apache/spark/network/sasl/ShuffleSecretManager.java
 
b/common/network-shuffle/src/main/java/org/apache/spark/network/sasl/ShuffleSecretManager.java
index 268cb40..56a025c 100644
--- 
a/common/network-shuffle/src/main/java/org/apache/spark/network/sasl/ShuffleSecretManager.java
+++ 
b/common/network-shuffle/src/main/java/org/apache/spark/network/sasl/ShuffleSecretManager.java
@@ -37,7 +37,7 @@ public class ShuffleSecretManager implements SecretKeyHolder {
   private static final String SPARK_SASL_USER = "sparkSaslUser";
 
   public ShuffleSecretManager() {
-    shuffleSecretMap = new ConcurrentHashMap<String, String>();
+    shuffleSecretMap = new ConcurrentHashMap<>();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillMerger.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillMerger.java
 
b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillMerger.java
index 2b1c860..01aed95 100644
--- 
a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillMerger.java
+++ 
b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillMerger.java
@@ -45,7 +45,7 @@ final class UnsafeSorterSpillMerger {
         }
       }
     };
-    priorityQueue = new PriorityQueue<UnsafeSorterIterator>(numSpills, 
comparator);
+    priorityQueue = new PriorityQueue<>(numSpills, comparator);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/core/src/main/scala/org/apache/spark/api/r/RRunner.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/api/r/RRunner.scala 
b/core/src/main/scala/org/apache/spark/api/r/RRunner.scala
index ff279ec..07d1fa2 100644
--- a/core/src/main/scala/org/apache/spark/api/r/RRunner.scala
+++ b/core/src/main/scala/org/apache/spark/api/r/RRunner.scala
@@ -182,7 +182,7 @@ private[spark] class RRunner[U](
           }
           stream.flush()
         } catch {
-          // TODO: We should propogate this error to the task thread
+          // TODO: We should propagate this error to the task thread
           case e: Exception =>
             logError("R Writer thread got an exception", e)
         } finally {

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala 
b/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala
index d397cca..8c67364 100644
--- a/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala
+++ b/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala
@@ -326,7 +326,7 @@ class GapSamplingReplacement(
   /**
    * Skip elements with replication factor zero (i.e. elements that won't be 
sampled).
    * Samples 'k' from geometric distribution  P(k) = (1-q)(q)^k, where q = 
e^(-f), that is
-   * q is the probabililty of Poisson(0; f)
+   * q is the probability of Poisson(0; f)
    */
   private def advance(): Unit = {
     val u = math.max(rng.nextDouble(), epsilon)

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
----------------------------------------------------------------------
diff --git 
a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
 
b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
index 44733dc..30750b1 100644
--- 
a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
+++ 
b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
@@ -170,11 +170,11 @@ public class UnsafeShuffleWriterSuite {
   private UnsafeShuffleWriter<Object, Object> createWriter(
       boolean transferToEnabled) throws IOException {
     conf.set("spark.file.transferTo", String.valueOf(transferToEnabled));
-    return new UnsafeShuffleWriter<Object, Object>(
+    return new UnsafeShuffleWriter<>(
       blockManager,
       shuffleBlockResolver,
       taskMemoryManager,
-      new SerializedShuffleHandle<Object, Object>(0, 1, shuffleDep),
+      new SerializedShuffleHandle<>(0, 1, shuffleDep),
       0, // map id
       taskContext,
       conf

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java 
b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
index 8abc03e..ebb0687 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
@@ -82,10 +82,10 @@ public final class JavaLogQuery {
       String user = m.group(3);
       String query = m.group(5);
       if (!user.equalsIgnoreCase("-")) {
-        return new Tuple3<String, String, String>(ip, user, query);
+        return new Tuple3<>(ip, user, query);
       }
     }
-    return new Tuple3<String, String, String>(null, null, null);
+    return new Tuple3<>(null, null, null);
   }
 
   public static Stats extractStats(String line) {

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
index 5904260..bc99dc0 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaMultiLabelClassificationMetricsExample.java
@@ -34,13 +34,13 @@ public class JavaMultiLabelClassificationMetricsExample {
     JavaSparkContext sc = new JavaSparkContext(conf);
     // $example on$
     List<Tuple2<double[], double[]>> data = Arrays.asList(
-      new Tuple2<double[], double[]>(new double[]{0.0, 1.0}, new double[]{0.0, 
2.0}),
-      new Tuple2<double[], double[]>(new double[]{0.0, 2.0}, new double[]{0.0, 
1.0}),
-      new Tuple2<double[], double[]>(new double[]{}, new double[]{0.0}),
-      new Tuple2<double[], double[]>(new double[]{2.0}, new double[]{2.0}),
-      new Tuple2<double[], double[]>(new double[]{2.0, 0.0}, new double[]{2.0, 
0.0}),
-      new Tuple2<double[], double[]>(new double[]{0.0, 1.0, 2.0}, new 
double[]{0.0, 1.0}),
-      new Tuple2<double[], double[]>(new double[]{1.0}, new double[]{1.0, 2.0})
+      new Tuple2<>(new double[]{0.0, 1.0}, new double[]{0.0, 2.0}),
+      new Tuple2<>(new double[]{0.0, 2.0}, new double[]{0.0, 1.0}),
+      new Tuple2<>(new double[]{}, new double[]{0.0}),
+      new Tuple2<>(new double[]{2.0}, new double[]{2.0}),
+      new Tuple2<>(new double[]{2.0, 0.0}, new double[]{2.0, 0.0}),
+      new Tuple2<>(new double[]{0.0, 1.0, 2.0}, new double[]{0.0, 1.0}),
+      new Tuple2<>(new double[]{1.0}, new double[]{1.0, 2.0})
     );
     JavaRDD<Tuple2<double[], double[]>> scoreAndLabels = sc.parallelize(data);
 

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java
index b62fa90..91c3bd7 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java
@@ -40,11 +40,11 @@ public class JavaPowerIterationClusteringExample {
     @SuppressWarnings("unchecked")
     // $example on$
     JavaRDD<Tuple3<Long, Long, Double>> similarities = 
sc.parallelize(Lists.newArrayList(
-      new Tuple3<Long, Long, Double>(0L, 1L, 0.9),
-      new Tuple3<Long, Long, Double>(1L, 2L, 0.9),
-      new Tuple3<Long, Long, Double>(2L, 3L, 0.9),
-      new Tuple3<Long, Long, Double>(3L, 4L, 0.1),
-      new Tuple3<Long, Long, Double>(4L, 5L, 0.9)));
+      new Tuple3<>(0L, 1L, 0.9),
+      new Tuple3<>(1L, 2L, 0.9),
+      new Tuple3<>(2L, 3L, 0.9),
+      new Tuple3<>(3L, 4L, 0.1),
+      new Tuple3<>(4L, 5L, 0.9)));
 
     PowerIterationClustering pic = new PowerIterationClustering()
       .setK(2)

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
index c27fba2..86c389e 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStratifiedSamplingExample.java
@@ -36,7 +36,7 @@ public class JavaStratifiedSamplingExample {
     JavaSparkContext jsc = new JavaSparkContext(conf);
 
     // $example on$
-    List<Tuple2<Integer, Character>> list = new ArrayList<Tuple2<Integer, 
Character>>(
+    List<Tuple2<Integer, Character>> list = new ArrayList<>(
       Arrays.<Tuple2<Integer, Character>>asList(
         new Tuple2(1, 'a'),
         new Tuple2(1, 'b'),

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
index da56637..bae4b78 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
@@ -19,7 +19,6 @@ package org.apache.spark.examples.streaming;
 
 import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.function.Function;
-import org.apache.spark.examples.streaming.StreamingExamples;
 import org.apache.spark.streaming.*;
 import org.apache.spark.streaming.api.java.*;
 import org.apache.spark.streaming.flume.FlumeUtils;
@@ -58,7 +57,8 @@ public final class JavaFlumeEventCount {
     Duration batchInterval = new Duration(2000);
     SparkConf sparkConf = new SparkConf().setAppName("JavaFlumeEventCount");
     JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, 
batchInterval);
-    JavaReceiverInputDStream<SparkFlumeEvent> flumeStream = 
FlumeUtils.createStream(ssc, host, port);
+    JavaReceiverInputDStream<SparkFlumeEvent> flumeStream =
+      FlumeUtils.createStream(ssc, host, port);
 
     flumeStream.count();
 

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/external/flume/src/test/java/org/apache/spark/streaming/flume/JavaFlumeStreamSuite.java
----------------------------------------------------------------------
diff --git 
a/external/flume/src/test/java/org/apache/spark/streaming/flume/JavaFlumeStreamSuite.java
 
b/external/flume/src/test/java/org/apache/spark/streaming/flume/JavaFlumeStreamSuite.java
index 3b5e0c7..ada05f2 100644
--- 
a/external/flume/src/test/java/org/apache/spark/streaming/flume/JavaFlumeStreamSuite.java
+++ 
b/external/flume/src/test/java/org/apache/spark/streaming/flume/JavaFlumeStreamSuite.java
@@ -27,10 +27,11 @@ public class JavaFlumeStreamSuite extends 
LocalJavaStreamingContext {
   @Test
   public void testFlumeStream() {
     // tests the API, does not actually test data receiving
-    JavaReceiverInputDStream<SparkFlumeEvent> test1 = 
FlumeUtils.createStream(ssc, "localhost", 12345);
-    JavaReceiverInputDStream<SparkFlumeEvent> test2 = 
FlumeUtils.createStream(ssc, "localhost", 12345,
-      StorageLevel.MEMORY_AND_DISK_SER_2());
-    JavaReceiverInputDStream<SparkFlumeEvent> test3 = 
FlumeUtils.createStream(ssc, "localhost", 12345,
-      StorageLevel.MEMORY_AND_DISK_SER_2(), false);
+    JavaReceiverInputDStream<SparkFlumeEvent> test1 = 
FlumeUtils.createStream(ssc, "localhost",
+      12345);
+    JavaReceiverInputDStream<SparkFlumeEvent> test2 = 
FlumeUtils.createStream(ssc, "localhost",
+      12345, StorageLevel.MEMORY_AND_DISK_SER_2());
+    JavaReceiverInputDStream<SparkFlumeEvent> test3 = 
FlumeUtils.createStream(ssc, "localhost",
+      12345, StorageLevel.MEMORY_AND_DISK_SER_2(), false);
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java 
b/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
index 1e55aad..a08c8dc 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
@@ -34,7 +34,7 @@ class CommandBuilderUtils {
   /** The set of known JVM vendors. */
   enum JavaVendor {
     Oracle, IBM, OpenJDK, Unknown
-  };
+  }
 
   /** Returns whether the given string is null or empty. */
   static boolean isEmpty(String s) {

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java 
b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
index a542159..a083f05 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
@@ -477,6 +477,6 @@ public class SparkLauncher {
       // No op.
     }
 
-  };
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/launcher/src/test/java/org/apache/spark/launcher/LauncherServerSuite.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/test/java/org/apache/spark/launcher/LauncherServerSuite.java 
b/launcher/src/test/java/org/apache/spark/launcher/LauncherServerSuite.java
index 5bf2bab..a9039b3 100644
--- a/launcher/src/test/java/org/apache/spark/launcher/LauncherServerSuite.java
+++ b/launcher/src/test/java/org/apache/spark/launcher/LauncherServerSuite.java
@@ -175,7 +175,7 @@ public class LauncherServerSuite extends BaseSuite {
 
     TestClient(Socket s) throws IOException {
       super(s);
-      this.inbound = new LinkedBlockingQueue<Message>();
+      this.inbound = new LinkedBlockingQueue<>();
       this.clientThread = new Thread(this);
       clientThread.setName("TestClient");
       clientThread.setDaemon(true);

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
 
b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
index b7f4f2e..29cbbe8 100644
--- 
a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
+++ 
b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
@@ -160,7 +160,7 @@ public class SparkSubmitCommandBuilderSuite extends 
BaseSuite {
       "SparkPi",
       "42");
 
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = buildCommand(sparkSubmitArgs, env);
     assertEquals("foo", findArgValue(cmd, parser.MASTER));
     assertEquals("bar", findArgValue(cmd, parser.DEPLOY_MODE));

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala
index 23c4af1..4525bf7 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala
@@ -205,7 +205,7 @@ final class DecisionTreeClassificationModel private[ml] (
   @Since("2.0.0")
   lazy val featureImportances: Vector = 
TreeEnsembleModel.featureImportances(this, numFeatures)
 
-  /** Convert to spark.mllib DecisionTreeModel (losing some infomation) */
+  /** Convert to spark.mllib DecisionTreeModel (losing some information) */
   override private[spark] def toOld: OldDecisionTreeModel = {
     new OldDecisionTreeModel(rootNode.toOld(1), OldAlgo.Classification)
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
index 3ce129b..1d03a5b 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
@@ -62,7 +62,7 @@ private[shared] object SharedParamsCodeGen {
         "every 10 iterations", isValid = "(interval: Int) => interval == -1 || 
interval >= 1"),
       ParamDesc[Boolean]("fitIntercept", "whether to fit an intercept term", 
Some("true")),
       ParamDesc[String]("handleInvalid", "how to handle invalid entries. 
Options are skip (which " +
-        "will filter out rows with bad values), or error (which will throw an 
errror). More " +
+        "will filter out rows with bad values), or error (which will throw an 
error). More " +
         "options may be added later",
         isValid = "ParamValidators.inArray(Array(\"skip\", \"error\"))"),
       ParamDesc[Boolean]("standardization", "whether to standardize the 
training features" +

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala 
b/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala
index 96263c5..64d6af2 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala
@@ -270,10 +270,10 @@ private[ml] trait HasFitIntercept extends Params {
 private[ml] trait HasHandleInvalid extends Params {
 
   /**
-   * Param for how to handle invalid entries. Options are skip (which will 
filter out rows with bad values), or error (which will throw an errror). More 
options may be added later.
+   * Param for how to handle invalid entries. Options are skip (which will 
filter out rows with bad values), or error (which will throw an error). More 
options may be added later.
    * @group param
    */
-  final val handleInvalid: Param[String] = new Param[String](this, 
"handleInvalid", "how to handle invalid entries. Options are skip (which will 
filter out rows with bad values), or error (which will throw an errror). More 
options may be added later", ParamValidators.inArray(Array("skip", "error")))
+  final val handleInvalid: Param[String] = new Param[String](this, 
"handleInvalid", "how to handle invalid entries. Options are skip (which will 
filter out rows with bad values), or error (which will throw an error). More 
options may be added later", ParamValidators.inArray(Array("skip", "error")))
 
   /** @group getParam */
   final def getHandleInvalid: String = $(handleInvalid)

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala
index 0a3d00e..1289a31 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala
@@ -205,7 +205,7 @@ final class DecisionTreeRegressionModel private[ml] (
   @Since("2.0.0")
   lazy val featureImportances: Vector = 
TreeEnsembleModel.featureImportances(this, numFeatures)
 
-  /** Convert to spark.mllib DecisionTreeModel (losing some infomation) */
+  /** Convert to spark.mllib DecisionTreeModel (losing some information) */
   override private[spark] def toOld: OldDecisionTreeModel = {
     new OldDecisionTreeModel(rootNode.toOld(1), OldAlgo.Regression)
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala 
b/mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala
index 1fad9d6..8ea767b 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala
@@ -71,7 +71,7 @@ private[spark] trait DecisionTreeModel {
    */
   private[ml] def maxSplitFeatureIndex(): Int = rootNode.maxSplitFeatureIndex()
 
-  /** Convert to spark.mllib DecisionTreeModel (losing some infomation) */
+  /** Convert to spark.mllib DecisionTreeModel (losing some information) */
   private[spark] def toOld: OldDecisionTreeModel
 }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/main/scala/org/apache/spark/mllib/classification/LogisticRegression.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/classification/LogisticRegression.scala
 
b/mllib/src/main/scala/org/apache/spark/mllib/classification/LogisticRegression.scala
index c0404be..f10570e 100644
--- 
a/mllib/src/main/scala/org/apache/spark/mllib/classification/LogisticRegression.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/mllib/classification/LogisticRegression.scala
@@ -418,7 +418,7 @@ class LogisticRegressionWithLBFGS
 
   private def run(input: RDD[LabeledPoint], initialWeights: Vector, 
userSuppliedWeights: Boolean):
       LogisticRegressionModel = {
-    // ml's Logisitic regression only supports binary classifcation currently.
+    // ml's Logistic regression only supports binary classification currently.
     if (numOfLinearPredictor == 1) {
       def runWithMlLogisitcRegression(elasticNetParam: Double) = {
         // Prepare the ml LogisticRegression based on our settings

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/test/java/org/apache/spark/ml/param/JavaTestParams.java
----------------------------------------------------------------------
diff --git a/mllib/src/test/java/org/apache/spark/ml/param/JavaTestParams.java 
b/mllib/src/test/java/org/apache/spark/ml/param/JavaTestParams.java
index 6584118..06f7fbb 100644
--- a/mllib/src/test/java/org/apache/spark/ml/param/JavaTestParams.java
+++ b/mllib/src/test/java/org/apache/spark/ml/param/JavaTestParams.java
@@ -89,7 +89,7 @@ public class JavaTestParams extends JavaParams {
     myDoubleParam_ = new DoubleParam(this, "myDoubleParam", "this is a double 
param",
       ParamValidators.inRange(0.0, 1.0));
     List<String> validStrings = Arrays.asList("a", "b");
-    myStringParam_ = new Param<String>(this, "myStringParam", "this is a 
string param",
+    myStringParam_ = new Param<>(this, "myStringParam", "this is a string 
param",
       ParamValidators.inArray(validStrings));
     myDoubleArrayParam_ =
       new DoubleArrayParam(this, "myDoubleArrayParam", "this is a double 
param");

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/test/java/org/apache/spark/mllib/classification/JavaStreamingLogisticRegressionSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/mllib/classification/JavaStreamingLogisticRegressionSuite.java
 
b/mllib/src/test/java/org/apache/spark/mllib/classification/JavaStreamingLogisticRegressionSuite.java
index c9e5ee2..62c6d9b 100644
--- 
a/mllib/src/test/java/org/apache/spark/mllib/classification/JavaStreamingLogisticRegressionSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/mllib/classification/JavaStreamingLogisticRegressionSuite.java
@@ -66,8 +66,8 @@ public class JavaStreamingLogisticRegressionSuite implements 
Serializable {
     JavaDStream<LabeledPoint> training =
       attachTestInputStream(ssc, Arrays.asList(trainingBatch, trainingBatch), 
2);
     List<Tuple2<Integer, Vector>> testBatch = Arrays.asList(
-      new Tuple2<Integer, Vector>(10, Vectors.dense(1.0)),
-      new Tuple2<Integer, Vector>(11, Vectors.dense(0.0)));
+      new Tuple2<>(10, Vectors.dense(1.0)),
+      new Tuple2<>(11, Vectors.dense(0.0)));
     JavaPairDStream<Integer, Vector> test = JavaPairDStream.fromJavaDStream(
       attachTestInputStream(ssc, Arrays.asList(testBatch, testBatch), 2));
     StreamingLogisticRegressionWithSGD slr = new 
StreamingLogisticRegressionWithSGD()

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaStreamingKMeansSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaStreamingKMeansSuite.java
 
b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaStreamingKMeansSuite.java
index d644766..62edbd3 100644
--- 
a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaStreamingKMeansSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaStreamingKMeansSuite.java
@@ -66,8 +66,8 @@ public class JavaStreamingKMeansSuite implements Serializable 
{
     JavaDStream<Vector> training =
       attachTestInputStream(ssc, Arrays.asList(trainingBatch, trainingBatch), 
2);
     List<Tuple2<Integer, Vector>> testBatch = Arrays.asList(
-      new Tuple2<Integer, Vector>(10, Vectors.dense(1.0)),
-      new Tuple2<Integer, Vector>(11, Vectors.dense(0.0)));
+      new Tuple2<>(10, Vectors.dense(1.0)),
+      new Tuple2<>(11, Vectors.dense(0.0)));
     JavaPairDStream<Integer, Vector> test = JavaPairDStream.fromJavaDStream(
       attachTestInputStream(ssc, Arrays.asList(testBatch, testBatch), 2));
     StreamingKMeans skmeans = new StreamingKMeans()

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/test/java/org/apache/spark/mllib/linalg/JavaVectorsSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/mllib/linalg/JavaVectorsSuite.java 
b/mllib/src/test/java/org/apache/spark/mllib/linalg/JavaVectorsSuite.java
index 77c8c62..4ba8e54 100644
--- a/mllib/src/test/java/org/apache/spark/mllib/linalg/JavaVectorsSuite.java
+++ b/mllib/src/test/java/org/apache/spark/mllib/linalg/JavaVectorsSuite.java
@@ -37,8 +37,8 @@ public class JavaVectorsSuite implements Serializable {
   public void sparseArrayConstruction() {
     @SuppressWarnings("unchecked")
     Vector v = Vectors.sparse(3, Arrays.asList(
-        new Tuple2<Integer, Double>(0, 2.0),
-        new Tuple2<Integer, Double>(2, 3.0)));
+        new Tuple2<>(0, 2.0),
+        new Tuple2<>(2, 3.0)));
     assertArrayEquals(new double[]{2.0, 0.0, 3.0}, v.toArray(), 0.0);
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/test/java/org/apache/spark/mllib/regression/JavaStreamingLinearRegressionSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/mllib/regression/JavaStreamingLinearRegressionSuite.java
 
b/mllib/src/test/java/org/apache/spark/mllib/regression/JavaStreamingLinearRegressionSuite.java
index dbf6488..ea0ccd7 100644
--- 
a/mllib/src/test/java/org/apache/spark/mllib/regression/JavaStreamingLinearRegressionSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/mllib/regression/JavaStreamingLinearRegressionSuite.java
@@ -65,8 +65,8 @@ public class JavaStreamingLinearRegressionSuite implements 
Serializable {
     JavaDStream<LabeledPoint> training =
       attachTestInputStream(ssc, Arrays.asList(trainingBatch, trainingBatch), 
2);
     List<Tuple2<Integer, Vector>> testBatch = Arrays.asList(
-      new Tuple2<Integer, Vector>(10, Vectors.dense(1.0)),
-      new Tuple2<Integer, Vector>(11, Vectors.dense(0.0)));
+      new Tuple2<>(10, Vectors.dense(1.0)),
+      new Tuple2<>(11, Vectors.dense(0.0)));
     JavaPairDStream<Integer, Vector> test = JavaPairDStream.fromJavaDStream(
       attachTestInputStream(ssc, Arrays.asList(testBatch, testBatch), 2));
     StreamingLinearRegressionWithSGD slr = new 
StreamingLinearRegressionWithSGD()

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
 
b/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
index cccb7f8..eb19d13 100644
--- 
a/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
+++ 
b/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
@@ -759,7 +759,7 @@ class LinearRegressionSuite
             .sliding(2)
             .forall(x => x(0) >= x(1)))
       } else {
-        // To clalify that the normal solver is used here.
+        // To clarify that the normal solver is used here.
         assert(model.summary.objectiveHistory.length == 1)
         assert(model.summary.objectiveHistory(0) == 0.0)
         val devianceResidualsR = Array(-0.47082, 0.34635)

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/catalyst/src/main/java/org/apache/spark/sql/execution/UnsafeExternalRowSorter.java
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/java/org/apache/spark/sql/execution/UnsafeExternalRowSorter.java
 
b/sql/catalyst/src/main/java/org/apache/spark/sql/execution/UnsafeExternalRowSorter.java
index aa7fc21..7784345 100644
--- 
a/sql/catalyst/src/main/java/org/apache/spark/sql/execution/UnsafeExternalRowSorter.java
+++ 
b/sql/catalyst/src/main/java/org/apache/spark/sql/execution/UnsafeExternalRowSorter.java
@@ -151,7 +151,7 @@ public final class UnsafeExternalRowSorter {
             Platform.throwException(e);
           }
           throw new RuntimeException("Exception should have been re-thrown in 
next()");
-        };
+        }
       };
     } catch (IOException e) {
       cleanupResources();

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystConf.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystConf.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystConf.scala
index d5ac015..2b98aac 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystConf.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystConf.scala
@@ -26,7 +26,7 @@ private[spark] trait CatalystConf {
   def groupByOrdinal: Boolean
 
   /**
-   * Returns the [[Resolver]] for the current configuration, which can be used 
to determin if two
+   * Returns the [[Resolver]] for the current configuration, which can be used 
to determine if two
    * identifiers are equal.
    */
   def resolver: Resolver = {

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
index 5f8899d..a24a5db 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
@@ -153,8 +153,8 @@ abstract class Expression extends TreeNode[Expression] {
    * evaluate to the same result.
    */
   lazy val canonicalized: Expression = {
-    val canonicalizedChildred = children.map(_.canonicalized)
-    Canonicalize.execute(withNewChildren(canonicalizedChildred))
+    val canonicalizedChildren = children.map(_.canonicalized)
+    Canonicalize.execute(withNewChildren(canonicalizedChildren))
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
index b64d3ee..1bebd4e 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
@@ -509,7 +509,7 @@ class CodegenContext {
 
   /**
    * Checks and sets up the state and codegen for subexpression elimination. 
This finds the
-   * common subexpresses, generates the functions that evaluate those 
expressions and populates
+   * common subexpressions, generates the functions that evaluate those 
expressions and populates
    * the mapping of common subexpressions to the generated functions.
    */
   private def subexpressionElimination(expressions: Seq[Expression]) = {

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala
index 103ab36..35a7b46 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala
@@ -222,7 +222,7 @@ object CaseWhen {
   }
 
   /**
-   * A factory method to faciliate the creation of this expression when used 
in parsers.
+   * A factory method to facilitate the creation of this expression when used 
in parsers.
    * @param branches Expressions at even position are the branch conditions, 
and expressions at odd
    *                 position are branch values.
    */

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
index 8541b1f..61ea3e4 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
@@ -965,7 +965,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with 
Logging {
 
   /**
    * Create a binary arithmetic expression. The following arithmetic operators 
are supported:
-   * - Mulitplication: '*'
+   * - Multiplication: '*'
    * - Division: '/'
    * - Hive Long Division: 'DIV'
    * - Modulo: '%'
@@ -1270,7 +1270,7 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with 
Logging {
   }
 
   /**
-   * Create a double literal for a number denoted in scientifc notation.
+   * Create a double literal for a number denoted in scientific notation.
    */
   override def visitScientificDecimalLiteral(
       ctx: ScientificDecimalLiteralContext): Literal = withOrigin(ctx) {

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
index d9577de..c9c9599 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/RowTest.scala
@@ -121,7 +121,7 @@ class RowTest extends FunSpec with Matchers {
       externalRow should be theSameInstanceAs externalRow.copy()
     }
 
-    it("copy should return same ref for interal rows") {
+    it("copy should return same ref for internal rows") {
       internalRow should be theSameInstanceAs internalRow.copy()
     }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
 
b/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
index d3bfb00..8132bba 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
@@ -272,5 +272,5 @@ public final class UnsafeKVExternalSorter {
     public void close() {
       cleanupResources();
     }
-  };
+  }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java
 
b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java
index 792e179..d1cc4e6 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnarBatch.java
@@ -79,7 +79,7 @@ public final class ColumnarBatch {
 
   /**
    * Called to close all the columns in this batch. It is not valid to access 
the data after
-   * calling this. This must be called at the end to clean up memory 
allcoations.
+   * calling this. This must be called at the end to clean up memory 
allocations.
    */
   public void close() {
     for (ColumnVector c: columns) {
@@ -315,7 +315,7 @@ public final class ColumnarBatch {
   public int numRows() { return numRows; }
 
   /**
-   * Returns the number of valid rowss.
+   * Returns the number of valid rows.
    */
   public int numValidRows() {
     assert(numRowsFiltered <= numRows);

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OnHeapColumnVector.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OnHeapColumnVector.java
 
b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OnHeapColumnVector.java
index b1429fe..708a009 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OnHeapColumnVector.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/OnHeapColumnVector.java
@@ -212,7 +212,7 @@ public final class OnHeapColumnVector extends ColumnVector {
   public void putIntsLittleEndian(int rowId, int count, byte[] src, int 
srcIndex) {
     int srcOffset = srcIndex + Platform.BYTE_ARRAY_OFFSET;
     for (int i = 0; i < count; ++i) {
-      intData[i + rowId] = Platform.getInt(src, srcOffset);;
+      intData[i + rowId] = Platform.getInt(src, srcOffset);
       srcIndex += 4;
       srcOffset += 4;
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/ContinuousQuery.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/ContinuousQuery.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/ContinuousQuery.scala
index 1dc9a68..d9973b0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/ContinuousQuery.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/ContinuousQuery.scala
@@ -94,7 +94,7 @@ trait ContinuousQuery {
   /**
    * Blocks until all available data in the source has been processed an 
committed to the sink.
    * This method is intended for testing. Note that in the case of continually 
arriving data, this
-   * method may block forever.  Additionally, this method is only guranteed to 
block until data that
+   * method may block forever. Additionally, this method is only guaranteed to 
block until data that
    * has been synchronously appended data to a 
[[org.apache.spark.sql.execution.streaming.Source]]
    * prior to invocation. (i.e. `getOffset` must immediately reflect the 
addition).
    */

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index 41cb799..a39a211 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -2077,7 +2077,7 @@ class Dataset[T] private[sql](
 
   /**
    * Returns a new [[Dataset]] partitioned by the given partitioning 
expressions into
-   * `numPartitions`. The resulting Datasetis hash partitioned.
+   * `numPartitions`. The resulting Dataset is hash partitioned.
    *
    * This is the same operation as "DISTRIBUTE BY" in SQL (Hive QL).
    *

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
index 5bcc172..e1fabf5 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
@@ -108,7 +108,7 @@ private[sql] abstract class SparkStrategies extends 
QueryPlanner[SparkPlan] {
     /**
      * Matches a plan whose single partition should be small enough to build a 
hash table.
      *
-     * Note: this assume that the number of partition is fixed, requires 
addtional work if it's
+     * Note: this assume that the number of partition is fixed, requires 
additional work if it's
      * dynamic.
      */
     def canBuildHashMap(plan: LogicalPlan): Boolean = {

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/execution/Window.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/Window.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/Window.scala
index 8060891..8e9214f 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Window.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Window.scala
@@ -811,7 +811,7 @@ private[execution] final class 
UnboundedPrecedingWindowFunctionFrame(
  *
  * This is a very expensive operator to use, O(n * (n - 1) /2), because we 
need to maintain a
  * buffer and must do full recalculation after each row. Reverse iteration 
would be possible, if
- * the communitativity of the used window functions can be guaranteed.
+ * the commutativity of the used window functions can be guaranteed.
  *
  * @param target to write results to.
  * @param processor to calculate the row values with.

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
index fb1c618..aba500a 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
@@ -146,7 +146,7 @@ case class Filter(condition: Expression, child: SparkPlan)
     // This has the property of not doing redundant IsNotNull checks and 
taking better advantage of
     // short-circuiting, not loading attributes until they are needed.
     // This is very perf sensitive.
-    // TODO: revisit this. We can consider reodering predicates as well.
+    // TODO: revisit this. We can consider reordering predicates as well.
     val generatedIsNotNullChecks = new Array[Boolean](notNullPreds.length)
     val generated = otherPreds.map { c =>
       val nullChecks = c.references.map { r =>

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBuilder.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBuilder.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBuilder.scala
index 7e26f19..9a17336 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBuilder.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBuilder.scala
@@ -185,7 +185,7 @@ private[columnar] object ColumnBuilder {
       case udt: UserDefinedType[_] =>
         return apply(udt.sqlType, initialSize, columnName, useCompression)
       case other =>
-        throw new Exception(s"not suppported type: $other")
+        throw new Exception(s"not supported type: $other")
     }
 
     builder.initialize(initialSize, columnName, useCompression)

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala
index e0b6709..d603f63 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala
@@ -296,7 +296,7 @@ private[sql] object StatFunctions extends Logging {
     val defaultRelativeError: Double = 0.01
 
     /**
-     * Statisttics from the Greenwald-Khanna paper.
+     * Statistics from the Greenwald-Khanna paper.
      * @param value the sampled value
      * @param g the minimum rank jump from the previous value's minimum rank
      * @param delta the maximum span of the rank.

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
index e819e95..6921ae5 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
@@ -32,7 +32,7 @@ object FileStreamSink {
 
 /**
  * A sink that writes out results to parquet files.  Each batch is written out 
to a unique
- * directory. After all of the files in a batch have been succesfully written, 
the list of
+ * directory. After all of the files in a batch have been successfully 
written, the list of
  * file paths is appended to the log atomically. In the case of partial 
failures, some duplicate
  * data may be present in the target directory, but only one copy of each file 
will be present
  * in the log.

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
index 8ece3c9..1e0a4a5 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
@@ -178,7 +178,7 @@ private[state] class HDFSBackedStateStoreProvider(
      * This can be called only after committing all the updates made in the 
current thread.
      */
     override def iterator(): Iterator[(UnsafeRow, UnsafeRow)] = {
-      verify(state == COMMITTED, "Cannot get iterator of store data before 
comitting")
+      verify(state == COMMITTED, "Cannot get iterator of store data before 
committing")
       HDFSBackedStateStoreProvider.this.iterator(newVersion)
     }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
index d60e618..07f63f9 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
@@ -220,7 +220,7 @@ private[state] object StateStore extends Logging {
       val executorId = SparkEnv.get.blockManager.blockManagerId.executorId
       val verified =
         coordinatorRef.map(_.verifyIfInstanceActive(storeId, 
executorId)).getOrElse(false)
-      logDebug(s"Verifyied whether the loaded instance $storeId is active: 
$verified" )
+      logDebug(s"Verified whether the loaded instance $storeId is active: 
$verified" )
       verified
     } catch {
       case NonFatal(e) =>

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
index ca2d909..cfe4911 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
@@ -126,7 +126,7 @@ object JdbcDialects {
 
   /**
    * Register a dialect for use on all new matching jdbc 
[[org.apache.spark.sql.DataFrame]].
-   * Readding an existing dialect will cause a move-to-front.
+   * Reading an existing dialect will cause a move-to-front.
    *
    * @param dialect The new dialect.
    */

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java 
b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
index a5ab446..873f681 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
@@ -318,14 +318,14 @@ public class JavaDatasetSuite implements Serializable {
     Encoder<Tuple3<Integer, Long, String>> encoder3 =
       Encoders.tuple(Encoders.INT(), Encoders.LONG(), Encoders.STRING());
     List<Tuple3<Integer, Long, String>> data3 =
-      Arrays.asList(new Tuple3<Integer, Long, String>(1, 2L, "a"));
+      Arrays.asList(new Tuple3<>(1, 2L, "a"));
     Dataset<Tuple3<Integer, Long, String>> ds3 = context.createDataset(data3, 
encoder3);
     Assert.assertEquals(data3, ds3.collectAsList());
 
     Encoder<Tuple4<Integer, String, Long, String>> encoder4 =
       Encoders.tuple(Encoders.INT(), Encoders.STRING(), Encoders.LONG(), 
Encoders.STRING());
     List<Tuple4<Integer, String, Long, String>> data4 =
-      Arrays.asList(new Tuple4<Integer, String, Long, String>(1, "b", 2L, 
"a"));
+      Arrays.asList(new Tuple4<>(1, "b", 2L, "a"));
     Dataset<Tuple4<Integer, String, Long, String>> ds4 = 
context.createDataset(data4, encoder4);
     Assert.assertEquals(data4, ds4.collectAsList());
 
@@ -333,7 +333,7 @@ public class JavaDatasetSuite implements Serializable {
       Encoders.tuple(Encoders.INT(), Encoders.STRING(), Encoders.LONG(), 
Encoders.STRING(),
         Encoders.BOOLEAN());
     List<Tuple5<Integer, String, Long, String, Boolean>> data5 =
-      Arrays.asList(new Tuple5<Integer, String, Long, String, Boolean>(1, "b", 
2L, "a", true));
+      Arrays.asList(new Tuple5<>(1, "b", 2L, "a", true));
     Dataset<Tuple5<Integer, String, Long, String, Boolean>> ds5 =
       context.createDataset(data5, encoder5);
     Assert.assertEquals(data5, ds5.collectAsList());
@@ -354,7 +354,7 @@ public class JavaDatasetSuite implements Serializable {
       Encoders.tuple(Encoders.INT(),
         Encoders.tuple(Encoders.STRING(), Encoders.STRING(), Encoders.LONG()));
     List<Tuple2<Integer, Tuple3<String, String, Long>>> data2 =
-      Arrays.asList(tuple2(1, new Tuple3<String, String, Long>("a", "b", 3L)));
+      Arrays.asList(tuple2(1, new Tuple3<>("a", "b", 3L)));
     Dataset<Tuple2<Integer, Tuple3<String, String, Long>>> ds2 =
       context.createDataset(data2, encoder2);
     Assert.assertEquals(data2, ds2.collectAsList());
@@ -376,7 +376,7 @@ public class JavaDatasetSuite implements Serializable {
       Encoders.tuple(Encoders.DOUBLE(), Encoders.DECIMAL(), Encoders.DATE(), 
Encoders.TIMESTAMP(),
         Encoders.FLOAT());
     List<Tuple5<Double, BigDecimal, Date, Timestamp, Float>> data =
-      Arrays.asList(new Tuple5<Double, BigDecimal, Date, Timestamp, Float>(
+      Arrays.asList(new Tuple5<>(
         1.7976931348623157E308, new BigDecimal("0.922337203685477589"),
           Date.valueOf("1970-01-01"), new 
Timestamp(System.currentTimeMillis()), Float.MAX_VALUE));
     Dataset<Tuple5<Double, BigDecimal, Date, Timestamp, Float>> ds =

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
index d160f8a..f7f3bd7 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
@@ -105,10 +105,10 @@ abstract class QueryTest extends PlanTest {
       val expected = expectedAnswer.toSet.toSeq.map((a: Any) => 
a.toString).sorted
       val actual = decoded.toSet.toSeq.map((a: Any) => a.toString).sorted
 
-      val comparision = sideBySide("expected" +: expected, "spark" +: 
actual).mkString("\n")
+      val comparison = sideBySide("expected" +: expected, "spark" +: 
actual).mkString("\n")
       fail(
         s"""Decoded objects do not match expected objects:
-            |$comparision
+            |$comparison
             |${ds.resolvedTEncoder.deserializer.treeString}
          """.stripMargin)
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchemaSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchemaSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchemaSuite.scala
index 3a7cb25..23d4226 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchemaSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVInferSchemaSuite.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.execution.datasources.csv
 import org.apache.spark.SparkFunSuite
 import org.apache.spark.sql.types._
 
-class InferSchemaSuite extends SparkFunSuite {
+class CSVInferSchemaSuite extends SparkFunSuite {
 
   test("String fields types are inferred correctly from null types") {
     assert(CSVInferSchema.inferField(NullType, "") == NullType)

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
index 9746187..a301725 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
@@ -469,7 +469,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest 
with SharedSQLContext {
     }
   }
 
-  testQuietly("SPARK-9849 DirectParquetOutputCommitter qualified name 
backwards compatiblity") {
+  testQuietly("SPARK-9849 DirectParquetOutputCommitter qualified name 
backwards compatibility") {
     val clonedConf = new Configuration(hadoopConfiguration)
 
     // Write to a parquet file and let it fail.

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStressSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStressSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStressSuite.scala
index 3916430..5b49a0a 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStressSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStressSuite.scala
@@ -29,7 +29,7 @@ import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.util.Utils
 
 /**
- * A stress test for streamign queries that read and write files.  This test 
constists of
+ * A stress test for streaming queries that read and write files.  This test 
consists of
  * two threads:
  *  - one that writes out `numRecords` distinct integers to files of random 
sizes (the total
  *    number of records is fixed but each files size / creation time is 
random).

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
index 4afc8d1..9393302 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
@@ -380,8 +380,8 @@ class TestHiveContext private[hive](
        """.stripMargin.cmd,
       s"LOAD DATA LOCAL INPATH '${getHiveFile("data/files/episodes.avro")}' 
INTO TABLE episodes".cmd
     ),
-    // THIS TABLE IS NOT THE SAME AS THE HIVE TEST TABLE episodes_partitioned 
AS DYNAMIC PARITIONING
-    // IS NOT YET SUPPORTED
+    // THIS TABLE IS NOT THE SAME AS THE HIVE TEST TABLE episodes_partitioned 
AS DYNAMIC
+    // PARTITIONING IS NOT YET SUPPORTED
     TestTable("episodes_part",
       s"""CREATE TABLE episodes_part (title STRING, air_date STRING, doctor 
INT)
          |PARTITIONED BY (doctor_pt INT)

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
index 4c1b425..e67fcbe 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
@@ -482,7 +482,7 @@ abstract class HiveComparisonTest
                 val tablesGenerated = queryList.zip(executions).flatMap {
                   // We should take executedPlan instead of sparkPlan, because 
in following codes we
                   // will run the collected plans. As we will do extra 
processing for sparkPlan such
-                  // as adding exchage, collapsing codegen stages, etc., 
collecing sparkPlan here
+                  // as adding exchange, collapsing codegen stages, etc., 
collecting sparkPlan here
                   // will cause some errors when running these plans later.
                   case (q, e) => e.executedPlan.collect {
                     case i: InsertIntoHiveTable if tablesRead contains 
i.table.tableName =>

http://git-wip-us.apache.org/repos/asf/spark/blob/3f749f7e/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
index b6fc61d..eac65d5 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
@@ -311,7 +311,7 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest 
{
         case ExecutedCommand(_: InsertIntoHadoopFsRelation) => // OK
         case o => fail("test_insert_parquet should be converted to a " +
           s"${classOf[HadoopFsRelation ].getCanonicalName} and " +
-          s"${classOf[InsertIntoDataSource].getCanonicalName} is expcted as 
the SparkPlan. " +
+          s"${classOf[InsertIntoDataSource].getCanonicalName} is expected as 
the SparkPlan. " +
           s"However, found a ${o.toString} ")
       }
 
@@ -341,7 +341,7 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest 
{
         case ExecutedCommand(_: InsertIntoHadoopFsRelation) => // OK
         case o => fail("test_insert_parquet should be converted to a " +
           s"${classOf[HadoopFsRelation ].getCanonicalName} and " +
-          s"${classOf[InsertIntoDataSource].getCanonicalName} is expcted as 
the SparkPlan." +
+          s"${classOf[InsertIntoDataSource].getCanonicalName} is expected as 
the SparkPlan." +
           s"However, found a ${o.toString} ")
       }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to