[SPARK-13702][CORE][SQL][MLLIB] Use diamond operator for generic instance 
creation in Java code.

## What changes were proposed in this pull request?

In order to make `docs/examples` (and other related code) more 
simple/readable/user-friendly, this PR replaces existing codes like the 
followings by using `diamond` operator.

```
-    final ArrayList<Product2<Object, Object>> dataToWrite =
-      new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
```

Java 7 or higher supports **diamond** operator which replaces the type 
arguments required to invoke the constructor of a generic class with an empty 
set of type parameters (<>). Currently, Spark Java code use mixed usage of this.

## How was this patch tested?

Manual.
Pass the existing tests.

Author: Dongjoon Hyun <dongj...@apache.org>

Closes #11541 from dongjoon-hyun/SPARK-13702.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/c3689bc2
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/c3689bc2
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/c3689bc2

Branch: refs/heads/master
Commit: c3689bc24e03a9471cd6e8169da61963c4528252
Parents: cbff280
Author: Dongjoon Hyun <dongj...@apache.org>
Authored: Wed Mar 9 10:31:26 2016 +0000
Committer: Sean Owen <so...@cloudera.com>
Committed: Wed Mar 9 10:31:26 2016 +0000

----------------------------------------------------------------------
 .../network/client/TransportClientFactory.java  |  4 +--
 .../shuffle/sort/ShuffleExternalSorter.java     |  4 +--
 .../apache/spark/status/api/v1/TaskSorting.java |  2 +-
 .../spark/launcher/SparkLauncherSuite.java      |  2 +-
 .../shuffle/sort/UnsafeShuffleWriterSuite.java  | 19 +++++-------
 .../map/AbstractBytesToBytesMapSuite.java       |  6 ++--
 .../spark/util/collection/TestTimSort.java      |  2 +-
 .../unsafe/sort/UnsafeExternalSorterSuite.java  |  2 +-
 docs/sql-programming-guide.md                   |  4 +--
 docs/streaming-programming-guide.md             |  4 +--
 .../org/apache/spark/examples/JavaLogQuery.java |  2 +-
 .../org/apache/spark/examples/JavaPageRank.java |  2 +-
 .../org/apache/spark/examples/JavaSparkPi.java  |  2 +-
 .../java/org/apache/spark/examples/JavaTC.java  | 10 +++---
 .../apache/spark/examples/JavaWordCount.java    |  2 +-
 .../ml/JavaElementwiseProductExample.java       |  2 +-
 .../JavaDecisionTreeClassificationExample.java  |  4 +--
 .../JavaDecisionTreeRegressionExample.java      |  4 +--
 ...vaGradientBoostingClassificationExample.java |  4 +--
 .../JavaGradientBoostingRegressionExample.java  |  4 +--
 .../mllib/JavaIsotonicRegressionExample.java    |  2 +-
 .../JavaLinearRegressionWithSGDExample.java     |  2 +-
 .../examples/mllib/JavaNaiveBayesExample.java   |  2 +-
 .../spark/examples/mllib/JavaPCAExample.java    |  2 +-
 .../JavaRandomForestClassificationExample.java  |  4 +--
 .../JavaRandomForestRegressionExample.java      |  4 +--
 .../mllib/JavaRecommendationExample.java        |  6 ++--
 .../spark/examples/mllib/JavaSVDExample.java    |  2 +-
 .../examples/streaming/JavaActorWordCount.java  |  2 +-
 .../examples/streaming/JavaCustomReceiver.java  |  2 +-
 .../streaming/JavaDirectKafkaWordCount.java     |  6 ++--
 .../examples/streaming/JavaKafkaWordCount.java  |  4 +--
 .../streaming/JavaNetworkWordCount.java         |  2 +-
 .../examples/streaming/JavaQueueStream.java     |  4 +--
 .../JavaRecoverableNetworkWordCount.java        |  2 +-
 .../streaming/JavaKinesisWordCountASL.java      |  2 +-
 .../spark/launcher/AbstractCommandBuilder.java  | 16 +++++-----
 .../spark/launcher/CommandBuilderUtils.java     |  2 +-
 .../apache/spark/launcher/LauncherServer.java   |  2 +-
 .../java/org/apache/spark/launcher/Main.java    |  8 ++---
 .../launcher/SparkClassCommandBuilder.java      |  2 +-
 .../apache/spark/launcher/SparkLauncher.java    |  6 ++--
 .../launcher/SparkSubmitCommandBuilder.java     | 12 ++++----
 .../SparkSubmitCommandBuilderSuite.java         | 10 +++---
 .../JavaDecisionTreeClassifierSuite.java        |  2 +-
 .../classification/JavaGBTClassifierSuite.java  |  2 +-
 .../JavaRandomForestClassifierSuite.java        |  2 +-
 .../JavaDecisionTreeRegressorSuite.java         |  2 +-
 .../ml/regression/JavaGBTRegressorSuite.java    |  2 +-
 .../JavaRandomForestRegressorSuite.java         |  2 +-
 .../spark/mllib/clustering/JavaLDASuite.java    |  8 ++---
 .../spark/mllib/tree/JavaDecisionTreeSuite.java |  4 +--
 .../org/apache/spark/sql/types/DataTypes.java   |  2 +-
 .../SpecificParquetRecordReaderBase.java        |  2 +-
 .../apache/spark/sql/JavaApplySchemaSuite.java  |  2 +-
 .../org/apache/spark/sql/JavaDatasetSuite.java  |  2 +-
 .../spark/sql/hive/aggregate/MyDoubleAvg.java   |  4 +--
 .../spark/sql/hive/aggregate/MyDoubleSum.java   |  4 +--
 .../org/apache/spark/sql/hive/test/Complex.java | 32 ++++++++++----------
 59 files changed, 129 insertions(+), 134 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
----------------------------------------------------------------------
diff --git 
a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
 
b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
index 61bafc8..1008c67 100644
--- 
a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
+++ 
b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
@@ -194,8 +194,8 @@ public class TransportClientFactory implements Closeable {
       .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs())
       .option(ChannelOption.ALLOCATOR, pooledAllocator);
 
-    final AtomicReference<TransportClient> clientRef = new 
AtomicReference<TransportClient>();
-    final AtomicReference<Channel> channelRef = new AtomicReference<Channel>();
+    final AtomicReference<TransportClient> clientRef = new AtomicReference<>();
+    final AtomicReference<Channel> channelRef = new AtomicReference<>();
 
     bootstrap.handler(new ChannelInitializer<SocketChannel>() {
       @Override

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java 
b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java
index f97e76d..7a114df 100644
--- 
a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java
+++ 
b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java
@@ -84,9 +84,9 @@ final class ShuffleExternalSorter extends MemoryConsumer {
    * this might not be necessary if we maintained a pool of re-usable pages in 
the TaskMemoryManager
    * itself).
    */
-  private final LinkedList<MemoryBlock> allocatedPages = new 
LinkedList<MemoryBlock>();
+  private final LinkedList<MemoryBlock> allocatedPages = new LinkedList<>();
 
-  private final LinkedList<SpillInfo> spills = new LinkedList<SpillInfo>();
+  private final LinkedList<SpillInfo> spills = new LinkedList<>();
 
   /** Peak memory used by this sorter so far, in bytes. **/
   private long peakMemoryUsedBytes;

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java 
b/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
index f19ed01..0cf84d5 100644
--- a/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
+++ b/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
@@ -29,7 +29,7 @@ public enum TaskSorting {
 
   private final Set<String> alternateNames;
   private TaskSorting(String... names) {
-    alternateNames = new HashSet<String>();
+    alternateNames = new HashSet<>();
     for (String n: names) {
       alternateNames.add(n);
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java
----------------------------------------------------------------------
diff --git 
a/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java 
b/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java
index 1692df7..3e47bfc 100644
--- a/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java
+++ b/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java
@@ -88,7 +88,7 @@ public class SparkLauncherSuite {
   @Test
   public void testChildProcLauncher() throws Exception {
     SparkSubmitOptionParser opts = new SparkSubmitOptionParser();
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     env.put("SPARK_PRINT_LAUNCH_COMMAND", "1");
 
     SparkLauncher launcher = new SparkLauncher(env)

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
----------------------------------------------------------------------
diff --git 
a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
 
b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
index add9d93..ddea6f5 100644
--- 
a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
+++ 
b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
@@ -67,7 +67,7 @@ public class UnsafeShuffleWriterSuite {
   File mergedOutputFile;
   File tempDir;
   long[] partitionSizesInMergedFile;
-  final LinkedList<File> spillFilesCreated = new LinkedList<File>();
+  final LinkedList<File> spillFilesCreated = new LinkedList<>();
   SparkConf conf;
   final Serializer serializer = new KryoSerializer(new SparkConf());
   TaskMetrics taskMetrics;
@@ -217,7 +217,7 @@ public class UnsafeShuffleWriterSuite {
   }
 
   private List<Tuple2<Object, Object>> readRecordsFromFile() throws 
IOException {
-    final ArrayList<Tuple2<Object, Object>> recordsList = new 
ArrayList<Tuple2<Object, Object>>();
+    final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>();
     long startOffset = 0;
     for (int i = 0; i < NUM_PARTITITONS; i++) {
       final long partitionSize = partitionSizesInMergedFile[i];
@@ -286,8 +286,7 @@ public class UnsafeShuffleWriterSuite {
   @Test
   public void writeWithoutSpilling() throws Exception {
     // In this example, each partition should have exactly one record:
-    final ArrayList<Product2<Object, Object>> dataToWrite =
-      new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
     for (int i = 0; i < NUM_PARTITITONS; i++) {
       dataToWrite.add(new Tuple2<Object, Object>(i, i));
     }
@@ -325,8 +324,7 @@ public class UnsafeShuffleWriterSuite {
       conf.set("spark.shuffle.compress", "false");
     }
     final UnsafeShuffleWriter<Object, Object> writer = 
createWriter(transferToEnabled);
-    final ArrayList<Product2<Object, Object>> dataToWrite =
-      new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
     for (int i : new int[] { 1, 2, 3, 4, 4, 2 }) {
       dataToWrite.add(new Tuple2<Object, Object>(i, i));
     }
@@ -403,7 +401,7 @@ public class UnsafeShuffleWriterSuite {
   public void writeEnoughDataToTriggerSpill() throws Exception {
     memoryManager.limit(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES);
     final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
-    final ArrayList<Product2<Object, Object>> dataToWrite = new 
ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
     final byte[] bigByteArray = new 
byte[PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES / 10];
     for (int i = 0; i < 10 + 1; i++) {
       dataToWrite.add(new Tuple2<Object, Object>(i, bigByteArray));
@@ -445,8 +443,7 @@ public class UnsafeShuffleWriterSuite {
   @Test
   public void writeRecordsThatAreBiggerThanDiskWriteBufferSize() throws 
Exception {
     final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
-    final ArrayList<Product2<Object, Object>> dataToWrite =
-      new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
     final byte[] bytes = new byte[(int) 
(ShuffleExternalSorter.DISK_WRITE_BUFFER_SIZE * 2.5)];
     new Random(42).nextBytes(bytes);
     dataToWrite.add(new Tuple2<Object, Object>(1, ByteBuffer.wrap(bytes)));
@@ -461,7 +458,7 @@ public class UnsafeShuffleWriterSuite {
   @Test
   public void writeRecordsThatAreBiggerThanMaxRecordSize() throws Exception {
     final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
-    final ArrayList<Product2<Object, Object>> dataToWrite = new 
ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
     dataToWrite.add(new Tuple2<Object, Object>(1, ByteBuffer.wrap(new 
byte[1])));
     // We should be able to write a record that's right _at_ the max record 
size
     final byte[] atMaxRecordSize = new byte[(int) 
taskMemoryManager.pageSizeBytes() - 4];
@@ -498,7 +495,7 @@ public class UnsafeShuffleWriterSuite {
     taskMemoryManager = spy(taskMemoryManager);
     when(taskMemoryManager.pageSizeBytes()).thenReturn(pageSizeBytes);
     final UnsafeShuffleWriter<Object, Object> writer =
-      new UnsafeShuffleWriter<Object, Object>(
+      new UnsafeShuffleWriter<>(
         blockManager,
         shuffleBlockResolver,
         taskMemoryManager,

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java
----------------------------------------------------------------------
diff --git 
a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java
 
b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java
index 61b94b7..9aab226 100644
--- 
a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java
+++ 
b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java
@@ -66,7 +66,7 @@ public abstract class AbstractBytesToBytesMapSuite {
   private TaskMemoryManager taskMemoryManager;
   private static final long PAGE_SIZE_BYTES = 1L << 26; // 64 megabytes
 
-  final LinkedList<File> spillFilesCreated = new LinkedList<File>();
+  final LinkedList<File> spillFilesCreated = new LinkedList<>();
   File tempDir;
 
   @Mock(answer = RETURNS_SMART_NULLS) BlockManager blockManager;
@@ -397,7 +397,7 @@ public abstract class AbstractBytesToBytesMapSuite {
     final int size = 65536;
     // Java arrays' hashCodes() aren't based on the arrays' contents, so we 
need to wrap arrays
     // into ByteBuffers in order to use them as keys here.
-    final Map<ByteBuffer, byte[]> expected = new HashMap<ByteBuffer, byte[]>();
+    final Map<ByteBuffer, byte[]> expected = new HashMap<>();
     final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, size, 
PAGE_SIZE_BYTES);
     try {
       // Fill the map to 90% full so that we can trigger probing
@@ -453,7 +453,7 @@ public abstract class AbstractBytesToBytesMapSuite {
     final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 64, 
pageSizeBytes);
     // Java arrays' hashCodes() aren't based on the arrays' contents, so we 
need to wrap arrays
     // into ByteBuffers in order to use them as keys here.
-    final Map<ByteBuffer, byte[]> expected = new HashMap<ByteBuffer, byte[]>();
+    final Map<ByteBuffer, byte[]> expected = new HashMap<>();
     try {
       for (int i = 0; i < 1000; i++) {
         final byte[] key = getRandomByteArray(rand.nextInt(128));

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java
----------------------------------------------------------------------
diff --git 
a/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java 
b/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java
index 45772b6..e884b1b 100644
--- a/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java
+++ b/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java
@@ -76,7 +76,7 @@ public class TestTimSort {
    * @param length The sum of all run lengths that will be added to 
<code>runs</code>.
    */
   private static List<Long> runsJDKWorstCase(int minRun, int length) {
-    List<Long> runs = new ArrayList<Long>();
+    List<Long> runs = new ArrayList<>();
 
     long runningTotal = 0, Y = minRun + 4, X = minRun;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
----------------------------------------------------------------------
diff --git 
a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
 
b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
index 492fe49..b757ddc 100644
--- 
a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
+++ 
b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
@@ -56,7 +56,7 @@ import static org.mockito.Mockito.*;
 
 public class UnsafeExternalSorterSuite {
 
-  final LinkedList<File> spillFilesCreated = new LinkedList<File>();
+  final LinkedList<File> spillFilesCreated = new LinkedList<>();
   final TestMemoryManager memoryManager =
     new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", 
"false"));
   final TaskMemoryManager taskMemoryManager = new 
TaskMemoryManager(memoryManager, 0);

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/docs/sql-programming-guide.md
----------------------------------------------------------------------
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index c4d277f..89fe873 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -760,7 +760,7 @@ JavaRDD<String> people = 
sc.textFile("examples/src/main/resources/people.txt");
 String schemaString = "name age";
 
 // Generate the schema based on the string of schema
-List<StructField> fields = new ArrayList<StructField>();
+List<StructField> fields = new ArrayList<>();
 for (String fieldName: schemaString.split(" ")) {
   fields.add(DataTypes.createStructField(fieldName, DataTypes.StringType, 
true));
 }
@@ -1935,7 +1935,7 @@ val jdbcDF = sqlContext.read.format("jdbc").options(
 
 {% highlight java %}
 
-Map<String, String> options = new HashMap<String, String>();
+Map<String, String> options = new HashMap<>();
 options.put("url", "jdbc:postgresql:dbserver");
 options.put("dbtable", "schema.tablename");
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/docs/streaming-programming-guide.md
----------------------------------------------------------------------
diff --git a/docs/streaming-programming-guide.md 
b/docs/streaming-programming-guide.md
index e92b01a..998644f 100644
--- a/docs/streaming-programming-guide.md
+++ b/docs/streaming-programming-guide.md
@@ -186,7 +186,7 @@ Next, we want to count these words.
 JavaPairDStream<String, Integer> pairs = words.mapToPair(
   new PairFunction<String, String, Integer>() {
     @Override public Tuple2<String, Integer> call(String s) {
-      return new Tuple2<String, Integer>(s, 1);
+      return new Tuple2<>(s, 1);
     }
   });
 JavaPairDStream<String, Integer> wordCounts = pairs.reduceByKey(
@@ -2095,7 +2095,7 @@ unifiedStream.print()
 <div data-lang="java" markdown="1">
 {% highlight java %}
 int numStreams = 5;
-List<JavaPairDStream<String, String>> kafkaStreams = new 
ArrayList<JavaPairDStream<String, String>>(numStreams);
+List<JavaPairDStream<String, String>> kafkaStreams = new 
ArrayList<>(numStreams);
 for (int i = 0; i < numStreams; i++) {
   kafkaStreams.add(KafkaUtils.createStream(...));
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java 
b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
index 1a6caa8..8abc03e 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
@@ -108,7 +108,7 @@ public final class JavaLogQuery {
     JavaPairRDD<Tuple3<String, String, String>, Stats> extracted = 
dataSet.mapToPair(new PairFunction<String, Tuple3<String, String, String>, 
Stats>() {
       @Override
       public Tuple2<Tuple3<String, String, String>, Stats> call(String s) {
-        return new Tuple2<Tuple3<String, String, String>, 
Stats>(extractKey(s), extractStats(s));
+        return new Tuple2<>(extractKey(s), extractStats(s));
       }
     });
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java 
b/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java
index 635fb6a..c3ef93c 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java
@@ -88,7 +88,7 @@ public final class JavaPageRank {
       @Override
       public Tuple2<String, String> call(String s) {
         String[] parts = SPACES.split(s);
-        return new Tuple2<String, String>(parts[0], parts[1]);
+        return new Tuple2<>(parts[0], parts[1]);
       }
     }).distinct().groupByKey().cache();
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java 
b/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
index af87488..04a57a6 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
@@ -38,7 +38,7 @@ public final class JavaSparkPi {
 
     int slices = (args.length == 1) ? Integer.parseInt(args[0]) : 2;
     int n = 100000 * slices;
-    List<Integer> l = new ArrayList<Integer>(n);
+    List<Integer> l = new ArrayList<>(n);
     for (int i = 0; i < n; i++) {
       l.add(i);
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/JavaTC.java
----------------------------------------------------------------------
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaTC.java 
b/examples/src/main/java/org/apache/spark/examples/JavaTC.java
index 2563fcd..ca10384 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaTC.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaTC.java
@@ -41,16 +41,16 @@ public final class JavaTC {
   private static final Random rand = new Random(42);
 
   static List<Tuple2<Integer, Integer>> generateGraph() {
-    Set<Tuple2<Integer, Integer>> edges = new HashSet<Tuple2<Integer, 
Integer>>(numEdges);
+    Set<Tuple2<Integer, Integer>> edges = new HashSet<>(numEdges);
     while (edges.size() < numEdges) {
       int from = rand.nextInt(numVertices);
       int to = rand.nextInt(numVertices);
-      Tuple2<Integer, Integer> e = new Tuple2<Integer, Integer>(from, to);
+      Tuple2<Integer, Integer> e = new Tuple2<>(from, to);
       if (from != to) {
         edges.add(e);
       }
     }
-    return new ArrayList<Tuple2<Integer, Integer>>(edges);
+    return new ArrayList<>(edges);
   }
 
   static class ProjectFn implements PairFunction<Tuple2<Integer, 
Tuple2<Integer, Integer>>,
@@ -59,7 +59,7 @@ public final class JavaTC {
 
     @Override
     public Tuple2<Integer, Integer> call(Tuple2<Integer, Tuple2<Integer, 
Integer>> triple) {
-      return new Tuple2<Integer, Integer>(triple._2()._2(), triple._2()._1());
+      return new Tuple2<>(triple._2()._2(), triple._2()._1());
     }
   }
 
@@ -79,7 +79,7 @@ public final class JavaTC {
       new PairFunction<Tuple2<Integer, Integer>, Integer, Integer>() {
         @Override
         public Tuple2<Integer, Integer> call(Tuple2<Integer, Integer> e) {
-          return new Tuple2<Integer, Integer>(e._2(), e._1());
+          return new Tuple2<>(e._2(), e._1());
         }
     });
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java 
b/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java
index d746a3d..84dbea5 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java
@@ -55,7 +55,7 @@ public final class JavaWordCount {
     JavaPairRDD<String, Integer> ones = words.mapToPair(new 
PairFunction<String, String, Integer>() {
       @Override
       public Tuple2<String, Integer> call(String s) {
-        return new Tuple2<String, Integer>(s, 1);
+        return new Tuple2<>(s, 1);
       }
     });
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java
 
b/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java
index 2898acc..c1f00dd 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java
@@ -52,7 +52,7 @@ public class JavaElementwiseProductExample {
       RowFactory.create("b", Vectors.dense(4.0, 5.0, 6.0))
     ));
 
-    List<StructField> fields = new ArrayList<StructField>(2);
+    List<StructField> fields = new ArrayList<>(2);
     fields.add(DataTypes.createStructField("id", DataTypes.StringType, false));
     fields.add(DataTypes.createStructField("vector", new VectorUDT(), false));
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java
index 5839b0c..66387b9 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java
@@ -54,7 +54,7 @@ class JavaDecisionTreeClassificationExample {
     // Set parameters.
     //  Empty categoricalFeaturesInfo indicates all features are continuous.
     Integer numClasses = 2;
-    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     String impurity = "gini";
     Integer maxDepth = 5;
     Integer maxBins = 32;
@@ -68,7 +68,7 @@ class JavaDecisionTreeClassificationExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), 
p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     Double testErr =

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java
index ccde578..904e7f7 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java
@@ -54,7 +54,7 @@ class JavaDecisionTreeRegressionExample {
 
     // Set parameters.
     // Empty categoricalFeaturesInfo indicates all features are continuous.
-    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     String impurity = "variance";
     Integer maxDepth = 5;
     Integer maxBins = 32;
@@ -68,7 +68,7 @@ class JavaDecisionTreeRegressionExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
       @Override
       public Tuple2<Double, Double> call(LabeledPoint p) {
-        return new Tuple2<Double, Double>(model.predict(p.features()), 
p.label());
+        return new Tuple2<>(model.predict(p.features()), p.label());
       }
     });
     Double testMSE =

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java
index 0c2e4c9..213949e 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java
@@ -58,7 +58,7 @@ public class JavaGradientBoostingClassificationExample {
     boostingStrategy.getTreeStrategy().setNumClasses(2);
     boostingStrategy.getTreeStrategy().setMaxDepth(5);
     // Empty categoricalFeaturesInfo indicates all features are continuous.
-    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     
boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo);
 
     final GradientBoostedTreesModel model =
@@ -69,7 +69,7 @@ public class JavaGradientBoostingClassificationExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), 
p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     Double testErr =

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java
index c1bc209..78db442 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java
@@ -57,7 +57,7 @@ public class JavaGradientBoostingRegressionExample {
     boostingStrategy.setNumIterations(3); // Note: Use more iterations in 
practice.
     boostingStrategy.getTreeStrategy().setMaxDepth(5);
     // Empty categoricalFeaturesInfo indicates all features are continuous.
-    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     
boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo);
 
     final GradientBoostedTreesModel model =
@@ -68,7 +68,7 @@ public class JavaGradientBoostingRegressionExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), 
p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     Double testMSE =

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java
index e632e35..0e15f75 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java
@@ -62,7 +62,7 @@ public class JavaIsotonicRegressionExample {
         @Override
         public Tuple2<Double, Double> call(Tuple3<Double, Double, Double> 
point) {
           Double predictedLabel = model.predict(point._2());
-          return new Tuple2<Double, Double>(predictedLabel, point._1());
+          return new Tuple2<>(predictedLabel, point._1());
         }
       }
     );

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java
index 3e50118..9ca9a78 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java
@@ -70,7 +70,7 @@ public class JavaLinearRegressionWithSGDExample {
       new Function<LabeledPoint, Tuple2<Double, Double>>() {
         public Tuple2<Double, Double> call(LabeledPoint point) {
           double prediction = model.predict(point.features());
-          return new Tuple2<Double, Double>(prediction, point.label());
+          return new Tuple2<>(prediction, point.label());
         }
       }
     );

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java
index 478e615..2b17dbb 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java
@@ -46,7 +46,7 @@ public class JavaNaiveBayesExample {
       test.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), 
p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     double accuracy = predictionAndLabel.filter(new Function<Tuple2<Double, 
Double>, Boolean>() {

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java
index faf76a9..a42c29f 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java
@@ -42,7 +42,7 @@ public class JavaPCAExample {
 
     // $example on$
     double[][] array = {{1.12, 2.05, 3.12}, {5.56, 6.28, 8.94}, {10.2, 8.0, 
20.5}};
-    LinkedList<Vector> rowsList = new LinkedList<Vector>();
+    LinkedList<Vector> rowsList = new LinkedList<>();
     for (int i = 0; i < array.length; i++) {
       Vector currentRow = Vectors.dense(array[i]);
       rowsList.add(currentRow);

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java
index f4c9d8a..24af5d0 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java
@@ -50,7 +50,7 @@ public class JavaRandomForestClassificationExample {
     // Train a RandomForest model.
     // Empty categoricalFeaturesInfo indicates all features are continuous.
     Integer numClasses = 2;
-    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, 
Integer>();
+    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     Integer numTrees = 3; // Use more in practice.
     String featureSubsetStrategy = "auto"; // Let the algorithm choose.
     String impurity = "gini";
@@ -67,7 +67,7 @@ public class JavaRandomForestClassificationExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), 
p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     Double testErr =

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java
index c71125c..afa9045 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java
@@ -51,7 +51,7 @@ public class JavaRandomForestRegressionExample {
 
     // Set parameters.
     // Empty categoricalFeaturesInfo indicates all features are continuous.
-    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     Integer numTrees = 3; // Use more in practice.
     String featureSubsetStrategy = "auto"; // Let the algorithm choose.
     String impurity = "variance";
@@ -67,7 +67,7 @@ public class JavaRandomForestRegressionExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), 
p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     Double testMSE =

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java
index 5e64342..f69aa4b 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java
@@ -64,8 +64,7 @@ public class JavaRecommendationExample {
       model.predict(JavaRDD.toRDD(userProducts)).toJavaRDD().map(
         new Function<Rating, Tuple2<Tuple2<Integer, Integer>, Double>>() {
           public Tuple2<Tuple2<Integer, Integer>, Double> call(Rating r){
-            return new Tuple2<Tuple2<Integer, Integer>, Double>(
-              new Tuple2<Integer, Integer>(r.user(), r.product()), r.rating());
+            return new Tuple2<>(new Tuple2<>(r.user(), r.product()), 
r.rating());
           }
         }
       ));
@@ -73,8 +72,7 @@ public class JavaRecommendationExample {
       JavaPairRDD.fromJavaRDD(ratings.map(
         new Function<Rating, Tuple2<Tuple2<Integer, Integer>, Double>>() {
           public Tuple2<Tuple2<Integer, Integer>, Double> call(Rating r){
-            return new Tuple2<Tuple2<Integer, Integer>, Double>(
-              new Tuple2<Integer, Integer>(r.user(), r.product()), r.rating());
+            return new Tuple2<>(new Tuple2<>(r.user(), r.product()), 
r.rating());
           }
         }
       )).join(predictions).values();

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java
index b417da8..3730e60 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java
@@ -44,7 +44,7 @@ public class JavaSVDExample {
 
     // $example on$
     double[][] array = {{1.12, 2.05, 3.12}, {5.56, 6.28, 8.94}, {10.2, 8.0, 
20.5}};
-    LinkedList<Vector> rowsList = new LinkedList<Vector>();
+    LinkedList<Vector> rowsList = new LinkedList<>();
     for (int i = 0; i < array.length; i++) {
       Vector currentRow = Vectors.dense(array[i]);
       rowsList.add(currentRow);

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
index 7bb70d0..7884b8c 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
@@ -129,7 +129,7 @@ public class JavaActorWordCount {
     }).mapToPair(new PairFunction<String, String, Integer>() {
       @Override
       public Tuple2<String, Integer> call(String s) {
-        return new Tuple2<String, Integer>(s, 1);
+        return new Tuple2<>(s, 1);
       }
     }).reduceByKey(new Function2<Integer, Integer, Integer>() {
       @Override

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
index 3d668ad..5de5634 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
@@ -82,7 +82,7 @@ public class JavaCustomReceiver extends Receiver<String> {
     JavaPairDStream<String, Integer> wordCounts = words.mapToPair(
       new PairFunction<String, String, Integer>() {
         @Override public Tuple2<String, Integer> call(String s) {
-          return new Tuple2<String, Integer>(s, 1);
+          return new Tuple2<>(s, 1);
         }
       }).reduceByKey(new Function2<Integer, Integer, Integer>() {
         @Override

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java
 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java
index 5107500..bfbad91 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java
@@ -63,8 +63,8 @@ public final class JavaDirectKafkaWordCount {
     SparkConf sparkConf = new 
SparkConf().setAppName("JavaDirectKafkaWordCount");
     JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, 
Durations.seconds(2));
 
-    HashSet<String> topicsSet = new 
HashSet<String>(Arrays.asList(topics.split(",")));
-    HashMap<String, String> kafkaParams = new HashMap<String, String>();
+    HashSet<String> topicsSet = new 
HashSet<>(Arrays.asList(topics.split(",")));
+    HashMap<String, String> kafkaParams = new HashMap<>();
     kafkaParams.put("metadata.broker.list", brokers);
 
     // Create direct kafka stream with brokers and topics
@@ -95,7 +95,7 @@ public final class JavaDirectKafkaWordCount {
       new PairFunction<String, String, Integer>() {
         @Override
         public Tuple2<String, Integer> call(String s) {
-          return new Tuple2<String, Integer>(s, 1);
+          return new Tuple2<>(s, 1);
         }
       }).reduceByKey(
         new Function2<Integer, Integer, Integer>() {

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
index 0df4cb4..655da68 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
@@ -69,7 +69,7 @@ public final class JavaKafkaWordCount {
     JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new 
Duration(2000));
 
     int numThreads = Integer.parseInt(args[3]);
-    Map<String, Integer> topicMap = new HashMap<String, Integer>();
+    Map<String, Integer> topicMap = new HashMap<>();
     String[] topics = args[2].split(",");
     for (String topic: topics) {
       topicMap.put(topic, numThreads);
@@ -96,7 +96,7 @@ public final class JavaKafkaWordCount {
       new PairFunction<String, String, Integer>() {
         @Override
         public Tuple2<String, Integer> call(String s) {
-          return new Tuple2<String, Integer>(s, 1);
+          return new Tuple2<>(s, 1);
         }
       }).reduceByKey(new Function2<Integer, Integer, Integer>() {
         @Override

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
index b82b319..5761da6 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
@@ -76,7 +76,7 @@ public final class JavaNetworkWordCount {
       new PairFunction<String, String, Integer>() {
         @Override
         public Tuple2<String, Integer> call(String s) {
-          return new Tuple2<String, Integer>(s, 1);
+          return new Tuple2<>(s, 1);
         }
       }).reduceByKey(new Function2<Integer, Integer, Integer>() {
         @Override

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
index 4ce8437..426eaa5 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
@@ -50,7 +50,7 @@ public final class JavaQueueStream {
 
     // Create the queue through which RDDs can be pushed to
     // a QueueInputDStream
-    Queue<JavaRDD<Integer>> rddQueue = new LinkedList<JavaRDD<Integer>>();
+    Queue<JavaRDD<Integer>> rddQueue = new LinkedList<>();
 
     // Create and push some RDDs into the queue
     List<Integer> list = Lists.newArrayList();
@@ -68,7 +68,7 @@ public final class JavaQueueStream {
         new PairFunction<Integer, Integer, Integer>() {
           @Override
           public Tuple2<Integer, Integer> call(Integer i) {
-            return new Tuple2<Integer, Integer>(i % 10, 1);
+            return new Tuple2<>(i % 10, 1);
           }
         });
     JavaPairDStream<Integer, Integer> reducedStream = mappedStream.reduceByKey(

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
index f9929fc..a597ecb 100644
--- 
a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
+++ 
b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
@@ -142,7 +142,7 @@ public final class JavaRecoverableNetworkWordCount {
       new PairFunction<String, String, Integer>() {
         @Override
         public Tuple2<String, Integer> call(String s) {
-          return new Tuple2<String, Integer>(s, 1);
+          return new Tuple2<>(s, 1);
         }
       }).reduceByKey(new Function2<Integer, Integer, Integer>() {
         @Override

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
----------------------------------------------------------------------
diff --git 
a/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
 
b/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
index 64e044a..5dc825d 100644
--- 
a/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
+++ 
b/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
@@ -136,7 +136,7 @@ public final class JavaKinesisWordCountASL { // needs to be 
public for access fr
     JavaStreamingContext jssc = new JavaStreamingContext(sparkConfig, 
batchInterval);
 
     // Create the Kinesis DStreams
-    List<JavaDStream<byte[]>> streamsList = new 
ArrayList<JavaDStream<byte[]>>(numStreams);
+    List<JavaDStream<byte[]>> streamsList = new ArrayList<>(numStreams);
     for (int i = 0; i < numStreams; i++) {
       streamsList.add(
           KinesisUtils.createStream(jssc, kinesisAppName, streamName, 
endpointUrl, regionName,

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java 
b/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java
index c7ab513..4641032 100644
--- 
a/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java
+++ 
b/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java
@@ -58,12 +58,12 @@ abstract class AbstractCommandBuilder {
   private Map<String, String> effectiveConfig;
 
   public AbstractCommandBuilder() {
-    this.appArgs = new ArrayList<String>();
-    this.childEnv = new HashMap<String, String>();
-    this.conf = new HashMap<String, String>();
-    this.files = new ArrayList<String>();
-    this.jars = new ArrayList<String>();
-    this.pyFiles = new ArrayList<String>();
+    this.appArgs = new ArrayList<>();
+    this.childEnv = new HashMap<>();
+    this.conf = new HashMap<>();
+    this.files = new ArrayList<>();
+    this.jars = new ArrayList<>();
+    this.pyFiles = new ArrayList<>();
   }
 
   /**
@@ -87,7 +87,7 @@ abstract class AbstractCommandBuilder {
    * class.
    */
   List<String> buildJavaCommand(String extraClassPath) throws IOException {
-    List<String> cmd = new ArrayList<String>();
+    List<String> cmd = new ArrayList<>();
     String envJavaHome;
 
     if (javaHome != null) {
@@ -134,7 +134,7 @@ abstract class AbstractCommandBuilder {
   List<String> buildClassPath(String appClassPath) throws IOException {
     String sparkHome = getSparkHome();
 
-    List<String> cp = new ArrayList<String>();
+    List<String> cp = new ArrayList<>();
     addToClassPath(cp, getenv("SPARK_CLASSPATH"));
     addToClassPath(cp, appClassPath);
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java 
b/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
index e328c8a..7942d73 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
@@ -147,7 +147,7 @@ class CommandBuilderUtils {
    * Output: [ "ab cd", "efgh", "i \" j" ]
    */
   static List<String> parseOptionString(String s) {
-    List<String> opts = new ArrayList<String>();
+    List<String> opts = new ArrayList<>();
     StringBuilder opt = new StringBuilder();
     boolean inOpt = false;
     boolean inSingleQuote = false;

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java 
b/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java
index 414ffc2..69fbf43 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java
@@ -129,7 +129,7 @@ class LauncherServer implements Closeable {
       server.setReuseAddress(true);
       server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
 
-      this.clients = new ArrayList<ServerConnection>();
+      this.clients = new ArrayList<>();
       this.threadIds = new AtomicLong();
       this.factory = new NamedThreadFactory(THREAD_NAME_FMT);
       this.pending = new ConcurrentHashMap<>();

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/launcher/src/main/java/org/apache/spark/launcher/Main.java
----------------------------------------------------------------------
diff --git a/launcher/src/main/java/org/apache/spark/launcher/Main.java 
b/launcher/src/main/java/org/apache/spark/launcher/Main.java
index e751e94..1e34bb8 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/Main.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/Main.java
@@ -50,7 +50,7 @@ class Main {
   public static void main(String[] argsArray) throws Exception {
     checkArgument(argsArray.length > 0, "Not enough arguments: missing class 
name.");
 
-    List<String> args = new ArrayList<String>(Arrays.asList(argsArray));
+    List<String> args = new ArrayList<>(Arrays.asList(argsArray));
     String className = args.remove(0);
 
     boolean printLaunchCommand = 
!isEmpty(System.getenv("SPARK_PRINT_LAUNCH_COMMAND"));
@@ -70,7 +70,7 @@ class Main {
           // Ignore parsing exceptions.
         }
 
-        List<String> help = new ArrayList<String>();
+        List<String> help = new ArrayList<>();
         if (parser.className != null) {
           help.add(parser.CLASS);
           help.add(parser.className);
@@ -82,7 +82,7 @@ class Main {
       builder = new SparkClassCommandBuilder(className, args);
     }
 
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = builder.buildCommand(env);
     if (printLaunchCommand) {
       System.err.println("Spark Command: " + join(" ", cmd));
@@ -130,7 +130,7 @@ class Main {
       return cmd;
     }
 
-    List<String> newCmd = new ArrayList<String>();
+    List<String> newCmd = new ArrayList<>();
     newCmd.add("env");
 
     for (Map.Entry<String, String> e : childEnv.entrySet()) {

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
 
b/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
index e575fd3..4018723 100644
--- 
a/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
+++ 
b/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
@@ -44,7 +44,7 @@ class SparkClassCommandBuilder extends AbstractCommandBuilder 
{
 
   @Override
   public List<String> buildCommand(Map<String, String> env) throws IOException 
{
-    List<String> javaOptsKeys = new ArrayList<String>();
+    List<String> javaOptsKeys = new ArrayList<>();
     String memKey = null;
     String extraClassPath = null;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java 
b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
index 20e6003..a542159 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
@@ -75,7 +75,7 @@ public class SparkLauncher {
   /** Used internally to create unique logger names. */
   private static final AtomicInteger COUNTER = new AtomicInteger();
 
-  static final Map<String, String> launcherConfig = new HashMap<String, 
String>();
+  static final Map<String, String> launcherConfig = new HashMap<>();
 
   /**
    * Set a configuration value for the launcher library. These config values 
do not affect the
@@ -428,7 +428,7 @@ public class SparkLauncher {
   }
 
   private ProcessBuilder createBuilder() {
-    List<String> cmd = new ArrayList<String>();
+    List<String> cmd = new ArrayList<>();
     String script = isWindows() ? "spark-submit.cmd" : "spark-submit";
     cmd.add(join(File.separator, builder.getSparkHome(), "bin", script));
     cmd.addAll(builder.buildSparkSubmitArgs());
@@ -437,7 +437,7 @@ public class SparkLauncher {
     // preserved, otherwise the batch interpreter will mess up the arguments. 
Batch scripts are
     // weird.
     if (isWindows()) {
-      List<String> winCmd = new ArrayList<String>();
+      List<String> winCmd = new ArrayList<>();
       for (String arg : cmd) {
         winCmd.add(quoteForBatchScript(arg));
       }

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
 
b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
index 269c89c..b2dd6ac 100644
--- 
a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
+++ 
b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
@@ -67,7 +67,7 @@ class SparkSubmitCommandBuilder extends 
AbstractCommandBuilder {
    * command line parsing works. This maps the class name to the resource to 
use when calling
    * spark-submit.
    */
-  private static final Map<String, String> specialClasses = new 
HashMap<String, String>();
+  private static final Map<String, String> specialClasses = new HashMap<>();
   static {
     specialClasses.put("org.apache.spark.repl.Main", "spark-shell");
     
specialClasses.put("org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver",
@@ -87,12 +87,12 @@ class SparkSubmitCommandBuilder extends 
AbstractCommandBuilder {
   private boolean allowsMixedArguments;
 
   SparkSubmitCommandBuilder() {
-    this.sparkArgs = new ArrayList<String>();
+    this.sparkArgs = new ArrayList<>();
     this.printInfo = false;
   }
 
   SparkSubmitCommandBuilder(List<String> args) {
-    this.sparkArgs = new ArrayList<String>();
+    this.sparkArgs = new ArrayList<>();
     List<String> submitArgs = args;
     if (args.size() > 0 && args.get(0).equals(PYSPARK_SHELL)) {
       this.allowsMixedArguments = true;
@@ -123,7 +123,7 @@ class SparkSubmitCommandBuilder extends 
AbstractCommandBuilder {
   }
 
   List<String> buildSparkSubmitArgs() {
-    List<String> args = new ArrayList<String>();
+    List<String> args = new ArrayList<>();
     SparkSubmitOptionParser parser = new SparkSubmitOptionParser();
 
     if (verbose) {
@@ -244,7 +244,7 @@ class SparkSubmitCommandBuilder extends 
AbstractCommandBuilder {
 
     // The executable is the PYSPARK_DRIVER_PYTHON env variable set by the 
pyspark script,
     // followed by PYSPARK_DRIVER_PYTHON_OPTS.
-    List<String> pyargs = new ArrayList<String>();
+    List<String> pyargs = new ArrayList<>();
     pyargs.add(firstNonEmpty(System.getenv("PYSPARK_DRIVER_PYTHON"), 
"python"));
     String pyOpts = System.getenv("PYSPARK_DRIVER_PYTHON_OPTS");
     if (!isEmpty(pyOpts)) {
@@ -270,7 +270,7 @@ class SparkSubmitCommandBuilder extends 
AbstractCommandBuilder {
     env.put("R_PROFILE_USER",
             join(File.separator, sparkHome, "R", "lib", "SparkR", "profile", 
"shell.R"));
 
-    List<String> args = new ArrayList<String>();
+    List<String> args = new ArrayList<>();
     args.add(firstNonEmpty(System.getenv("SPARKR_DRIVER_R"), "R"));
     return args;
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
 
b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
index 6aad47a..d367318 100644
--- 
a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
+++ 
b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
@@ -73,7 +73,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite 
{
       "spark.randomOption=foo",
       parser.CONF,
       SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH + "=/driverLibPath");
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = buildCommand(sparkSubmitArgs, env);
 
     
assertTrue(findInStringList(env.get(CommandBuilderUtils.getLibPathEnvName()),
@@ -125,7 +125,7 @@ public class SparkSubmitCommandBuilderSuite extends 
BaseSuite {
       "--master=foo",
       "--deploy-mode=bar");
 
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = buildCommand(sparkSubmitArgs, env);
     assertEquals("python", cmd.get(cmd.size() - 1));
     assertEquals(
@@ -142,7 +142,7 @@ public class SparkSubmitCommandBuilderSuite extends 
BaseSuite {
       "script.py",
       "arg1");
 
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = buildCommand(sparkSubmitArgs, env);
 
     assertEquals("foo", findArgValue(cmd, "--master"));
@@ -178,7 +178,7 @@ public class SparkSubmitCommandBuilderSuite extends 
BaseSuite {
           + "/launcher/src/test/resources");
     }
 
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = launcher.buildCommand(env);
 
     // Checks below are different for driver and non-driver mode.
@@ -258,7 +258,7 @@ public class SparkSubmitCommandBuilderSuite extends 
BaseSuite {
   }
 
   private Map<String, String> parseConf(List<String> cmd, 
SparkSubmitOptionParser parser) {
-    Map<String, String> conf = new HashMap<String, String>();
+    Map<String, String> conf = new HashMap<>();
     for (int i = 0; i < cmd.size(); i++) {
       if (cmd.get(i).equals(parser.CONF)) {
         String[] val = cmd.get(i + 1).split("=", 2);

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java
 
b/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java
index 60f25e5..40b9c35 100644
--- 
a/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java
@@ -56,7 +56,7 @@ public class JavaDecisionTreeClassifierSuite implements 
Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 
2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
 
     // This tests setters. Training with various options is tested in Scala.

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java
 
b/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java
index 3c69467..59b6fba 100644
--- 
a/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java
@@ -56,7 +56,7 @@ public class JavaGBTClassifierSuite implements Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 
2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
 
     // This tests setters. Training with various options is tested in Scala.

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java
 
b/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java
index a66a1e1..5485fcb 100644
--- 
a/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java
@@ -57,7 +57,7 @@ public class JavaRandomForestClassifierSuite implements 
Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 
2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
 
     // This tests setters. Training with various options is tested in Scala.

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java
 
b/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java
index ebe800e..d5c9d12 100644
--- 
a/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java
@@ -56,7 +56,7 @@ public class JavaDecisionTreeRegressorSuite implements 
Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 
2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
 
     // This tests setters. Training with various options is tested in Scala.

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java 
b/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java
index fc8c13d..38d15dc 100644
--- 
a/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java
@@ -56,7 +56,7 @@ public class JavaGBTRegressorSuite implements Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 
2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
 
     GBTRegressor rf = new GBTRegressor()

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java
 
b/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java
index a00ce5e..31be888 100644
--- 
a/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java
+++ 
b/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java
@@ -57,7 +57,7 @@ public class JavaRandomForestRegressorSuite implements 
Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 
2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, 
Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
 
     // This tests setters. Training with various options is tested in Scala.

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java 
b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java
index 225a216..db19b30 100644
--- a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java
+++ b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java
@@ -45,9 +45,9 @@ public class JavaLDASuite implements Serializable {
   @Before
   public void setUp() {
     sc = new JavaSparkContext("local", "JavaLDA");
-    ArrayList<Tuple2<Long, Vector>> tinyCorpus = new ArrayList<Tuple2<Long, 
Vector>>();
+    ArrayList<Tuple2<Long, Vector>> tinyCorpus = new ArrayList<>();
     for (int i = 0; i < LDASuite.tinyCorpus().length; i++) {
-      tinyCorpus.add(new Tuple2<Long, 
Vector>((Long)LDASuite.tinyCorpus()[i]._1(),
+      tinyCorpus.add(new Tuple2<>((Long)LDASuite.tinyCorpus()[i]._1(),
           LDASuite.tinyCorpus()[i]._2()));
     }
     JavaRDD<Tuple2<Long, Vector>> tmpCorpus = sc.parallelize(tinyCorpus, 2);
@@ -189,8 +189,8 @@ public class JavaLDASuite implements Serializable {
     double logPerplexity = toyModel.logPerplexity(pairedDocs);
 
     // check: logLikelihood.
-    ArrayList<Tuple2<Long, Vector>> docsSingleWord = new 
ArrayList<Tuple2<Long, Vector>>();
-    docsSingleWord.add(new Tuple2<Long, Vector>(0L, Vectors.dense(1.0, 0.0, 
0.0)));
+    ArrayList<Tuple2<Long, Vector>> docsSingleWord = new ArrayList<>();
+    docsSingleWord.add(new Tuple2<>(0L, Vectors.dense(1.0, 0.0, 0.0)));
     JavaPairRDD<Long, Vector> single = 
JavaPairRDD.fromJavaRDD(sc.parallelize(docsSingleWord));
     double logLikelihood = toyModel.logLikelihood(single);
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java 
b/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java
index 9925aae..8dd2906 100644
--- a/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java
+++ b/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java
@@ -64,7 +64,7 @@ public class JavaDecisionTreeSuite implements Serializable {
   public void runDTUsingConstructor() {
     List<LabeledPoint> arr = 
DecisionTreeSuite.generateCategoricalDataPointsAsJavaList();
     JavaRDD<LabeledPoint> rdd = sc.parallelize(arr);
-    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, 
Integer>();
+    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories
 
     int maxDepth = 4;
@@ -84,7 +84,7 @@ public class JavaDecisionTreeSuite implements Serializable {
   public void runDTUsingStaticMethods() {
     List<LabeledPoint> arr = 
DecisionTreeSuite.generateCategoricalDataPointsAsJavaList();
     JavaRDD<LabeledPoint> rdd = sc.parallelize(arr);
-    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, 
Integer>();
+    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories
 
     int maxDepth = 4;

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java 
b/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java
index 17659d7..24adead 100644
--- a/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java
+++ b/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java
@@ -201,7 +201,7 @@ public class DataTypes {
     if (fields == null) {
       throw new IllegalArgumentException("fields should not be null.");
     }
-    Set<String> distinctNames = new HashSet<String>();
+    Set<String> distinctNames = new HashSet<>();
     for (StructField field : fields) {
       if (field == null) {
         throw new IllegalArgumentException(

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
 
b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
index 6bcd155..5c257bc 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
@@ -149,7 +149,7 @@ public abstract class SpecificParquetRecordReaderBase<T> 
extends RecordReader<Vo
    * by MapReduce.
    */
   public static List<String> listDirectory(File path) throws IOException {
-    List<String> result = new ArrayList<String>();
+    List<String> result = new ArrayList<>();
     if (path.isDirectory()) {
       for (File f: path.listFiles()) {
         result.addAll(listDirectory(f));

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java 
b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
index 640efcc..51f987f 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
@@ -111,7 +111,7 @@ public class JavaApplySchemaSuite implements Serializable {
     df.registerTempTable("people");
     Row[] actual = sqlContext.sql("SELECT * FROM people").collect();
 
-    List<Row> expected = new ArrayList<Row>(2);
+    List<Row> expected = new ArrayList<>(2);
     expected.add(RowFactory.create("Michael", 29));
     expected.add(RowFactory.create("Yin", 28));
 

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java 
b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
index 9b624f3..b054b10 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
@@ -67,7 +67,7 @@ public class JavaDatasetSuite implements Serializable {
   }
 
   private <T1, T2> Tuple2<T1, T2> tuple2(T1 t1, T2 t2) {
-    return new Tuple2<T1, T2>(t1, t2);
+    return new Tuple2<>(t1, t2);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java 
b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java
index 5a167ed..ae0c097 100644
--- 
a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java
+++ 
b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java
@@ -42,14 +42,14 @@ public class MyDoubleAvg extends 
UserDefinedAggregateFunction {
   private DataType _returnDataType;
 
   public MyDoubleAvg() {
-    List<StructField> inputFields = new ArrayList<StructField>();
+    List<StructField> inputFields = new ArrayList<>();
     inputFields.add(DataTypes.createStructField("inputDouble", 
DataTypes.DoubleType, true));
     _inputDataType = DataTypes.createStructType(inputFields);
 
     // The buffer has two values, bufferSum for storing the current sum and
     // bufferCount for storing the number of non-null input values that have 
been contribuetd
     // to the current sum.
-    List<StructField> bufferFields = new ArrayList<StructField>();
+    List<StructField> bufferFields = new ArrayList<>();
     bufferFields.add(DataTypes.createStructField("bufferSum", 
DataTypes.DoubleType, true));
     bufferFields.add(DataTypes.createStructField("bufferCount", 
DataTypes.LongType, true));
     _bufferSchema = DataTypes.createStructType(bufferFields);

http://git-wip-us.apache.org/repos/asf/spark/blob/c3689bc2/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java 
b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java
index c3b7768..d17fb3e 100644
--- 
a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java
+++ 
b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java
@@ -41,11 +41,11 @@ public class MyDoubleSum extends 
UserDefinedAggregateFunction {
   private DataType _returnDataType;
 
   public MyDoubleSum() {
-    List<StructField> inputFields = new ArrayList<StructField>();
+    List<StructField> inputFields = new ArrayList<>();
     inputFields.add(DataTypes.createStructField("inputDouble", 
DataTypes.DoubleType, true));
     _inputDataType = DataTypes.createStructType(inputFields);
 
-    List<StructField> bufferFields = new ArrayList<StructField>();
+    List<StructField> bufferFields = new ArrayList<>();
     bufferFields.add(DataTypes.createStructField("bufferDouble", 
DataTypes.DoubleType, true));
     _bufferSchema = DataTypes.createStructType(bufferFields);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to