This is an automated email from the ASF dual-hosted git repository.

srowen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 8535df7  [MINOR] Fix typos in comments and replace an explicit type 
with <>
8535df7 is described below

commit 8535df72614800ba789286e569a39ea6e84b3354
Author: younggyu chun <younggyuc...@gmail.com>
AuthorDate: Sat Aug 10 16:47:11 2019 -0500

    [MINOR] Fix typos in comments and replace an explicit type with <>
    
    ## What changes were proposed in this pull request?
    This PR fixed typos in comments and replace the explicit type with '<>' for 
Java 8+.
    
    ## How was this patch tested?
    Manually tested.
    
    Closes #25338 from younggyuchun/younggyu.
    
    Authored-by: younggyu chun <younggyuc...@gmail.com>
    Signed-off-by: Sean Owen <sean.o...@databricks.com>
---
 appveyor.yml                                                      | 2 +-
 .../java/org/apache/spark/network/ChunkFetchIntegrationSuite.java | 6 +++---
 .../test/java/org/apache/spark/network/RpcIntegrationSuite.java   | 8 ++++----
 .../org/apache/spark/network/TransportClientFactorySuite.java     | 2 +-
 .../spark/network/shuffle/ExternalShuffleIntegrationSuite.java    | 6 +++---
 .../test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java  | 2 +-
 .../org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java   | 4 ++--
 .../org/apache/spark/metrics/source/AccumulatorSourceSuite.scala  | 2 +-
 .../examples/sql/streaming/JavaStructuredSessionization.java      | 2 +-
 .../spark/streaming/kafka010/JavaConsumerStrategySuite.java       | 2 +-
 .../spark/streaming/kafka010/JavaDirectKafkaStreamSuite.java      | 2 +-
 .../java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java  | 2 +-
 .../test/java/org/apache/spark/ml/stat/JavaSummarizerSuite.java   | 2 +-
 .../apache/spark/mllib/regression/JavaRidgeRegressionSuite.java   | 4 ++--
 .../apache/spark/sql/execution/python/WindowInPandasExec.scala    | 2 +-
 .../test/org/apache/spark/sql/JavaDataFrameReaderWriterSuite.java | 2 +-
 .../java/org/apache/spark/streaming/JavaMapWithStateSuite.java    | 4 ++--
 17 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/appveyor.yml b/appveyor.yml
index 8fb090c..b0e946c 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -55,7 +55,7 @@ build_script:
 
 environment:
   NOT_CRAN: true
-  # See SPARK-27848. Currently installing some dependent packagess causes
+  # See SPARK-27848. Currently installing some dependent packages causes
   # "(converted from warning) unable to identify current timezone 'C':" for an 
unknown reason.
   # This environment variable works around to test SparkR against a higher 
version.
   R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
diff --git 
a/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
 
b/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
index 5999b62..a818fe4 100644
--- 
a/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
+++ 
b/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
@@ -151,9 +151,9 @@ public class ChunkFetchIntegrationSuite {
       clientFactory.createClient(TestUtils.getLocalHost(), server.getPort())) {
       final Semaphore sem = new Semaphore(0);
 
-      res.successChunks = Collections.synchronizedSet(new HashSet<Integer>());
-      res.failedChunks = Collections.synchronizedSet(new HashSet<Integer>());
-      res.buffers = Collections.synchronizedList(new 
LinkedList<ManagedBuffer>());
+      res.successChunks = Collections.synchronizedSet(new HashSet<>());
+      res.failedChunks = Collections.synchronizedSet(new HashSet<>());
+      res.buffers = Collections.synchronizedList(new LinkedList<>());
 
       ChunkReceivedCallback callback = new ChunkReceivedCallback() {
         @Override
diff --git 
a/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java
 
b/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java
index 117f1e4..498dc51 100644
--- 
a/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java
+++ 
b/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java
@@ -175,8 +175,8 @@ public class RpcIntegrationSuite {
     final Semaphore sem = new Semaphore(0);
 
     final RpcResult res = new RpcResult();
-    res.successMessages = Collections.synchronizedSet(new HashSet<String>());
-    res.errorMessages = Collections.synchronizedSet(new HashSet<String>());
+    res.successMessages = Collections.synchronizedSet(new HashSet<>());
+    res.errorMessages = Collections.synchronizedSet(new HashSet<>());
 
     RpcResponseCallback callback = new RpcResponseCallback() {
       @Override
@@ -208,8 +208,8 @@ public class RpcIntegrationSuite {
     TransportClient client = 
clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
     final Semaphore sem = new Semaphore(0);
     RpcResult res = new RpcResult();
-    res.successMessages = Collections.synchronizedSet(new HashSet<String>());
-    res.errorMessages = Collections.synchronizedSet(new HashSet<String>());
+    res.successMessages = Collections.synchronizedSet(new HashSet<>());
+    res.errorMessages = Collections.synchronizedSet(new HashSet<>());
 
     for (String stream : streams) {
       int idx = stream.lastIndexOf('/');
diff --git 
a/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java
 
b/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java
index b4caa87..2aec4a3 100644
--- 
a/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java
+++ 
b/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java
@@ -84,7 +84,7 @@ public class TransportClientFactorySuite {
     try (TransportContext context = new TransportContext(conf, rpcHandler)) {
       TransportClientFactory factory = context.createClientFactory();
       Set<TransportClient> clients = Collections.synchronizedSet(
-          new HashSet<TransportClient>());
+              new HashSet<>());
 
       AtomicInteger failed = new AtomicInteger();
       Thread[] attempts = new Thread[maxConnections * 10];
diff --git 
a/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java
 
b/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java
index 61a58e9..9d398e3 100644
--- 
a/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java
+++ 
b/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java
@@ -170,9 +170,9 @@ public class ExternalShuffleIntegrationSuite {
       TransportConf clientConf,
       int port) throws Exception {
     final FetchResult res = new FetchResult();
-    res.successBlocks = Collections.synchronizedSet(new HashSet<String>());
-    res.failedBlocks = Collections.synchronizedSet(new HashSet<String>());
-    res.buffers = Collections.synchronizedList(new 
LinkedList<ManagedBuffer>());
+    res.successBlocks = Collections.synchronizedSet(new HashSet<>());
+    res.failedBlocks = Collections.synchronizedSet(new HashSet<>());
+    res.buffers = Collections.synchronizedList(new LinkedList<>());
 
     final Semaphore requestsRemaining = new Semaphore(0);
 
diff --git 
a/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java
 
b/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java
index 0d441fb..cd253c0 100644
--- 
a/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java
+++ 
b/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java
@@ -467,7 +467,7 @@ public class UTF8StringSuite {
       )));
     assertEquals(
       fromString("translate"),
-      fromString("translate").translate(new HashMap<Character, Character>()));
+      fromString("translate").translate(new HashMap<>()));
     assertEquals(
       fromString("asae"),
       fromString("translate").translate(ImmutableMap.of(
diff --git 
a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
 
b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
index 88125a6..6b83a98 100644
--- 
a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
+++ 
b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
@@ -533,7 +533,7 @@ public class UnsafeShuffleWriterSuite {
     long newPeakMemory;
     try {
       for (int i = 0; i < numRecordsPerPage * 10; i++) {
-        writer.insertRecordIntoSorter(new Tuple2<Object, Object>(1, 1));
+        writer.insertRecordIntoSorter(new Tuple2<>(1, 1));
         newPeakMemory = writer.getPeakMemoryUsedBytes();
         if (i % numRecordsPerPage == 0) {
           // The first page is allocated in constructor, another page will be 
allocated after
@@ -550,7 +550,7 @@ public class UnsafeShuffleWriterSuite {
       newPeakMemory = writer.getPeakMemoryUsedBytes();
       assertEquals(previousPeakMemory, newPeakMemory);
       for (int i = 0; i < numRecordsPerPage; i++) {
-        writer.insertRecordIntoSorter(new Tuple2<Object, Object>(1, 1));
+        writer.insertRecordIntoSorter(new Tuple2<>(1, 1));
       }
       newPeakMemory = writer.getPeakMemoryUsedBytes();
       assertEquals(previousPeakMemory, newPeakMemory);
diff --git 
a/core/src/test/scala/org/apache/spark/metrics/source/AccumulatorSourceSuite.scala
 
b/core/src/test/scala/org/apache/spark/metrics/source/AccumulatorSourceSuite.scala
index 45e6e0b..4262d26 100644
--- 
a/core/src/test/scala/org/apache/spark/metrics/source/AccumulatorSourceSuite.scala
+++ 
b/core/src/test/scala/org/apache/spark/metrics/source/AccumulatorSourceSuite.scala
@@ -66,7 +66,7 @@ class AccumulatorSourceSuite extends SparkFunSuite {
     assert(gauges.get("my-accumulator-2").getValue() == 456)
   }
 
-  test("the double accumulators value propety is checked when the gauge's 
value is requested") {
+  test("the double accumulators value property is checked when the gauge's 
value is requested") {
     val acc1 = new DoubleAccumulator()
     acc1.add(123.123)
     val acc2 = new DoubleAccumulator()
diff --git 
a/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredSessionization.java
 
b/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredSessionization.java
index 943e3d8..34ee235 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredSessionization.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredSessionization.java
@@ -70,7 +70,7 @@ public final class JavaStructuredSessionization {
       new FlatMapFunction<LineWithTimestamp, Event>() {
         @Override
         public Iterator<Event> call(LineWithTimestamp lineWithTimestamp) {
-          ArrayList<Event> eventList = new ArrayList<Event>();
+          ArrayList<Event> eventList = new ArrayList<>();
           for (String word : lineWithTimestamp.getLine().split(" ")) {
             eventList.add(new Event(word, lineWithTimestamp.getTimestamp()));
           }
diff --git 
a/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaConsumerStrategySuite.java
 
b/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaConsumerStrategySuite.java
index 938cc8d..dc364ac 100644
--- 
a/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaConsumerStrategySuite.java
+++ 
b/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaConsumerStrategySuite.java
@@ -42,7 +42,7 @@ public class JavaConsumerStrategySuite implements 
Serializable {
     final Collection<TopicPartition> parts = Arrays.asList(tp1, tp2);
     final scala.collection.Iterable<TopicPartition> sParts =
       JavaConverters.collectionAsScalaIterableConverter(parts).asScala();
-    final Map<String, Object> kafkaParams = new HashMap<String, Object>();
+    final Map<String, Object> kafkaParams = new HashMap<>();
     kafkaParams.put("bootstrap.servers", "not used");
     final scala.collection.Map<String, Object> sKafkaParams =
       JavaConverters.mapAsScalaMapConverter(kafkaParams).asScala();
diff --git 
a/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaDirectKafkaStreamSuite.java
 
b/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaDirectKafkaStreamSuite.java
index dc9c13b..2b8b185 100644
--- 
a/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaDirectKafkaStreamSuite.java
+++ 
b/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaDirectKafkaStreamSuite.java
@@ -152,7 +152,7 @@ public class JavaDirectKafkaStreamSuite implements 
Serializable {
 
     JavaDStream<String> unifiedStream = stream1.union(stream2);
 
-    final Set<String> result = Collections.synchronizedSet(new 
HashSet<String>());
+    final Set<String> result = Collections.synchronizedSet(new HashSet<>());
     unifiedStream.foreachRDD(new VoidFunction<JavaRDD<String>>() {
           @Override
           public void call(JavaRDD<String> rdd) {
diff --git 
a/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java
 
b/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java
index 9795041..22d9324 100644
--- 
a/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java
+++ 
b/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java
@@ -37,7 +37,7 @@ public class CommandBuilderUtilsSuite {
     testOpt(" a b c \\\\ ", Arrays.asList("a", "b", "c", "\\"));
 
     // Following tests ported from UtilsSuite.scala.
-    testOpt("", new ArrayList<String>());
+    testOpt("", new ArrayList<>());
     testOpt("a", Arrays.asList("a"));
     testOpt("aaa", Arrays.asList("aaa"));
     testOpt("a b c", Arrays.asList("a", "b", "c"));
diff --git 
a/mllib/src/test/java/org/apache/spark/ml/stat/JavaSummarizerSuite.java 
b/mllib/src/test/java/org/apache/spark/ml/stat/JavaSummarizerSuite.java
index 38ab39a..163d213 100644
--- a/mllib/src/test/java/org/apache/spark/ml/stat/JavaSummarizerSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/stat/JavaSummarizerSuite.java
@@ -40,7 +40,7 @@ public class JavaSummarizerSuite extends SharedSparkSession {
   @Override
   public void setUp() throws IOException {
     super.setUp();
-    List<LabeledPoint> points = new ArrayList<LabeledPoint>();
+    List<LabeledPoint> points = new ArrayList<>();
     points.add(new LabeledPoint(0.0, Vectors.dense(1.0, 2.0)));
     points.add(new LabeledPoint(0.0, Vectors.dense(3.0, 4.0)));
 
diff --git 
a/mllib/src/test/java/org/apache/spark/mllib/regression/JavaRidgeRegressionSuite.java
 
b/mllib/src/test/java/org/apache/spark/mllib/regression/JavaRidgeRegressionSuite.java
index fb6c775..5a9389c 100644
--- 
a/mllib/src/test/java/org/apache/spark/mllib/regression/JavaRidgeRegressionSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/mllib/regression/JavaRidgeRegressionSuite.java
@@ -57,7 +57,7 @@ public class JavaRidgeRegressionSuite extends 
SharedSparkSession {
     List<LabeledPoint> data = generateRidgeData(2 * numExamples, numFeatures, 
10.0);
 
     JavaRDD<LabeledPoint> testRDD = jsc.parallelize(
-      new ArrayList<LabeledPoint>(data.subList(0, numExamples)));
+            new ArrayList<>(data.subList(0, numExamples)));
     List<LabeledPoint> validationData = data.subList(numExamples, 2 * 
numExamples);
 
     RidgeRegressionWithSGD ridgeSGDImpl = new RidgeRegressionWithSGD();
@@ -82,7 +82,7 @@ public class JavaRidgeRegressionSuite extends 
SharedSparkSession {
     List<LabeledPoint> data = generateRidgeData(2 * numExamples, numFeatures, 
10.0);
 
     JavaRDD<LabeledPoint> testRDD = jsc.parallelize(
-      new ArrayList<LabeledPoint>(data.subList(0, numExamples)));
+            new ArrayList<>(data.subList(0, numExamples)));
     List<LabeledPoint> validationData = data.subList(numExamples, 2 * 
numExamples);
 
     RidgeRegressionModel model = RidgeRegressionWithSGD.train(testRDD.rdd(), 
200, 1.0, 0.0);
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/python/WindowInPandasExec.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/python/WindowInPandasExec.scala
index 01ce07b..cad89de 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/python/WindowInPandasExec.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/python/WindowInPandasExec.scala
@@ -251,7 +251,7 @@ case class WindowInPandasExec(
     }
 
     // Setting the window bounds argOffset for each UDF. For UDFs with bounded 
window, argOffset
-    // for the UDF is (lowerBoundOffet, upperBoundOffset, inputOffset1, 
inputOffset2, ...)
+    // for the UDF is (lowerBoundOffset, upperBoundOffset, inputOffset1, 
inputOffset2, ...)
     // For UDFs with unbounded window, argOffset is (inputOffset1, 
inputOffset2, ...)
     pyFuncs.indices.foreach { exprIndex =>
       val frameIndex = expressionIndexToFrameIndex(exprIndex)
diff --git 
a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameReaderWriterSuite.java
 
b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameReaderWriterSuite.java
index 7babf75..d54be46 100644
--- 
a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameReaderWriterSuite.java
+++ 
b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameReaderWriterSuite.java
@@ -62,7 +62,7 @@ public class JavaDataFrameReaderWriterSuite {
 
   @Test
   public void testOptionsAPI() {
-    HashMap<String, String> map = new HashMap<String, String>();
+    HashMap<String, String> map = new HashMap<>();
     map.put("e", "1");
     spark
         .read()
diff --git 
a/streaming/src/test/java/org/apache/spark/streaming/JavaMapWithStateSuite.java 
b/streaming/src/test/java/org/apache/spark/streaming/JavaMapWithStateSuite.java
index b1367b8..2e00713 100644
--- 
a/streaming/src/test/java/org/apache/spark/streaming/JavaMapWithStateSuite.java
+++ 
b/streaming/src/test/java/org/apache/spark/streaming/JavaMapWithStateSuite.java
@@ -149,10 +149,10 @@ public class JavaMapWithStateSuite extends 
LocalJavaStreamingContext implements
       inputStream.map(x -> new Tuple2<>(x, 1))).mapWithState(mapWithStateSpec);
 
     List<Set<T>> collectedOutputs =
-        Collections.synchronizedList(new ArrayList<Set<T>>());
+        Collections.synchronizedList(new ArrayList<>());
     mapWithStateDStream.foreachRDD(rdd -> 
collectedOutputs.add(Sets.newHashSet(rdd.collect())));
     List<Set<Tuple2<K, S>>> collectedStateSnapshots =
-        Collections.synchronizedList(new ArrayList<Set<Tuple2<K, S>>>());
+        Collections.synchronizedList(new ArrayList<>());
     mapWithStateDStream.stateSnapshots().foreachRDD(rdd ->
         collectedStateSnapshots.add(Sets.newHashSet(rdd.collect())));
     BatchCounter batchCounter = new BatchCounter(ssc.ssc());


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to