Repository: spark
Updated Branches:
  refs/heads/branch-2.3 6152da389 -> db27a9365


[MINOR][BUILD] Fix Java linter errors

## What changes were proposed in this pull request?

This PR cleans up the java-lint errors (for v2.3.0-rc1 tag). Hopefully, this 
will be the final one.

```
$ dev/lint-java
Using `mvn` from path: /usr/local/bin/mvn
Checkstyle checks failed at following occurrences:
[ERROR] 
src/main/java/org/apache/spark/unsafe/memory/HeapMemoryAllocator.java:[85] 
(sizes) LineLength: Line is longer than 100 characters (found 101).
[ERROR] src/main/java/org/apache/spark/launcher/InProcessAppHandle.java:[20,8] 
(imports) UnusedImports: Unused import - java.io.IOException.
[ERROR] 
src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java:[41,9]
 (modifier) ModifierOrder: 'private' modifier out of order with the JLS 
suggestions.
[ERROR] src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java:[464] 
(sizes) LineLength: Line is longer than 100 characters (found 102).
```

## How was this patch tested?

Manual.

```
$ dev/lint-java
Using `mvn` from path: /usr/local/bin/mvn
Checkstyle checks passed.
```

Author: Dongjoon Hyun <dongj...@apache.org>

Closes #20242 from dongjoon-hyun/fix_lint_java_2.3_rc1.

(cherry picked from commit 7bd14cfd40500a0b6462cda647bdbb686a430328)
Signed-off-by: Sameer Agarwal <samee...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/db27a936
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/db27a936
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/db27a936

Branch: refs/heads/branch-2.3
Commit: db27a93652780f234f3c5fe750ef07bc5525d177
Parents: 6152da3
Author: Dongjoon Hyun <dongj...@apache.org>
Authored: Fri Jan 12 10:18:42 2018 -0800
Committer: Sameer Agarwal <samee...@apache.org>
Committed: Fri Jan 12 10:18:59 2018 -0800

----------------------------------------------------------------------
 .../java/org/apache/spark/unsafe/memory/HeapMemoryAllocator.java  | 3 ++-
 .../main/java/org/apache/spark/launcher/InProcessAppHandle.java   | 1 -
 .../spark/sql/execution/datasources/orc/OrcColumnVector.java      | 2 +-
 .../test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java   | 3 ++-
 4 files changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/db27a936/common/unsafe/src/main/java/org/apache/spark/unsafe/memory/HeapMemoryAllocator.java
----------------------------------------------------------------------
diff --git 
a/common/unsafe/src/main/java/org/apache/spark/unsafe/memory/HeapMemoryAllocator.java
 
b/common/unsafe/src/main/java/org/apache/spark/unsafe/memory/HeapMemoryAllocator.java
index 3acfe36..a9603c1 100644
--- 
a/common/unsafe/src/main/java/org/apache/spark/unsafe/memory/HeapMemoryAllocator.java
+++ 
b/common/unsafe/src/main/java/org/apache/spark/unsafe/memory/HeapMemoryAllocator.java
@@ -82,7 +82,8 @@ public class HeapMemoryAllocator implements MemoryAllocator {
       "page has already been freed";
     assert ((memory.pageNumber == MemoryBlock.NO_PAGE_NUMBER)
             || (memory.pageNumber == MemoryBlock.FREED_IN_TMM_PAGE_NUMBER)) :
-      "TMM-allocated pages must first be freed via TMM.freePage(), not 
directly in allocator free()";
+      "TMM-allocated pages must first be freed via TMM.freePage(), not 
directly in allocator " +
+        "free()";
 
     final long size = memory.size();
     if (MemoryAllocator.MEMORY_DEBUG_FILL_ENABLED) {

http://git-wip-us.apache.org/repos/asf/spark/blob/db27a936/launcher/src/main/java/org/apache/spark/launcher/InProcessAppHandle.java
----------------------------------------------------------------------
diff --git 
a/launcher/src/main/java/org/apache/spark/launcher/InProcessAppHandle.java 
b/launcher/src/main/java/org/apache/spark/launcher/InProcessAppHandle.java
index 0d6a73a..acd64c9 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/InProcessAppHandle.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/InProcessAppHandle.java
@@ -17,7 +17,6 @@
 
 package org.apache.spark.launcher;
 
-import java.io.IOException;
 import java.lang.reflect.Method;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.logging.Level;

http://git-wip-us.apache.org/repos/asf/spark/blob/db27a936/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java
 
b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java
index f94c55d..b6e7922 100644
--- 
a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java
+++ 
b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java
@@ -38,7 +38,7 @@ public class OrcColumnVector extends 
org.apache.spark.sql.vectorized.ColumnVecto
   private BytesColumnVector bytesData;
   private DecimalColumnVector decimalData;
   private TimestampColumnVector timestampData;
-  final private boolean isTimestamp;
+  private final boolean isTimestamp;
 
   private int batchSize;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/db27a936/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java 
b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
index 4f8a31f..69a2904 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
@@ -461,7 +461,8 @@ public class JavaDataFrameSuite {
   public void testUDF() {
     UserDefinedFunction foo = udf((Integer i, String s) -> i.toString() + s, 
DataTypes.StringType);
     Dataset<Row> df = spark.table("testData").select(foo.apply(col("key"), 
col("value")));
-    String[] result = df.collectAsList().stream().map(row -> 
row.getString(0)).toArray(String[]::new);
+    String[] result = df.collectAsList().stream().map(row -> row.getString(0))
+      .toArray(String[]::new);
     String[] expected = spark.table("testData").collectAsList().stream()
       .map(row -> row.get(0).toString() + 
row.getString(1)).toArray(String[]::new);
     Assert.assertArrayEquals(expected, result);


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to