[2/2] spark git commit: Preparing development version 1.6.0-SNAPSHOT

2015-12-02 Thread pwendell
Preparing development version 1.6.0-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/5d915fed
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/5d915fed
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/5d915fed

Branch: refs/heads/branch-1.6
Commit: 5d915fed300b47a51b7614d28bd8ea7795b4e841
Parents: bf52584
Author: Patrick Wendell 
Authored: Wed Dec 2 09:54:15 2015 -0800
Committer: Patrick Wendell 
Committed: Wed Dec 2 09:54:15 2015 -0800

--
 assembly/pom.xml| 2 +-
 bagel/pom.xml   | 2 +-
 core/pom.xml| 2 +-
 docker-integration-tests/pom.xml| 2 +-
 examples/pom.xml| 2 +-
 external/flume-assembly/pom.xml | 2 +-
 external/flume-sink/pom.xml | 2 +-
 external/flume/pom.xml  | 2 +-
 external/kafka-assembly/pom.xml | 2 +-
 external/kafka/pom.xml  | 2 +-
 external/mqtt-assembly/pom.xml  | 2 +-
 external/mqtt/pom.xml   | 2 +-
 external/twitter/pom.xml| 2 +-
 external/zeromq/pom.xml | 2 +-
 extras/java8-tests/pom.xml  | 2 +-
 extras/kinesis-asl-assembly/pom.xml | 2 +-
 extras/kinesis-asl/pom.xml  | 2 +-
 extras/spark-ganglia-lgpl/pom.xml   | 2 +-
 graphx/pom.xml  | 2 +-
 launcher/pom.xml| 2 +-
 mllib/pom.xml   | 2 +-
 network/common/pom.xml  | 2 +-
 network/shuffle/pom.xml | 2 +-
 network/yarn/pom.xml| 2 +-
 pom.xml | 2 +-
 repl/pom.xml| 2 +-
 sql/catalyst/pom.xml| 2 +-
 sql/core/pom.xml| 2 +-
 sql/hive-thriftserver/pom.xml   | 2 +-
 sql/hive/pom.xml| 2 +-
 streaming/pom.xml   | 2 +-
 tags/pom.xml| 2 +-
 tools/pom.xml   | 2 +-
 unsafe/pom.xml  | 2 +-
 yarn/pom.xml| 2 +-
 35 files changed, 35 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/5d915fed/assembly/pom.xml
--
diff --git a/assembly/pom.xml b/assembly/pom.xml
index fbabaa5..4b60ee0 100644
--- a/assembly/pom.xml
+++ b/assembly/pom.xml
@@ -21,7 +21,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0
+1.6.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5d915fed/bagel/pom.xml
--
diff --git a/bagel/pom.xml b/bagel/pom.xml
index 1b3e417..672e946 100644
--- a/bagel/pom.xml
+++ b/bagel/pom.xml
@@ -21,7 +21,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0
+1.6.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5d915fed/core/pom.xml
--
diff --git a/core/pom.xml b/core/pom.xml
index 15b8d75..61744bb 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -21,7 +21,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0
+1.6.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5d915fed/docker-integration-tests/pom.xml
--
diff --git a/docker-integration-tests/pom.xml b/docker-integration-tests/pom.xml
index ee9de91..dee0c4a 100644
--- a/docker-integration-tests/pom.xml
+++ b/docker-integration-tests/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0
+1.6.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5d915fed/examples/pom.xml
--
diff --git a/examples/pom.xml b/examples/pom.xml
index 37b15bb..f5ab2a7 100644
--- a/examples/pom.xml
+++ b/examples/pom.xml
@@ -21,7 +21,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0
+1.6.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5d915fed/external/flume-assembly/pom.xml
--
diff --git a/external/flume-assembly/pom.xml b/external/flume-assembly/pom.xml
index 295455a..dceedcf 100644
--- a/external/flume-assembly/pom.xml
+++ b/external/flume-assembly/pom.xml
@@ -21,7 +21,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0
+1.6.0-SNAPSHOT
 ../../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/5d915fed/external/flume-sink/pom.xml
--
diff --git a/external/flume-sink/pom.xml 

[1/2] spark git commit: Preparing Spark release v1.6.0-rc1

2015-12-02 Thread pwendell
Repository: spark
Updated Branches:
  refs/heads/branch-1.6 f449a407f -> 5d915fed3


Preparing Spark release v1.6.0-rc1


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/bf525845
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/bf525845
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/bf525845

Branch: refs/heads/branch-1.6
Commit: bf525845cef159d2d4c9f4d64e158f037179b5c4
Parents: f449a40
Author: Patrick Wendell 
Authored: Wed Dec 2 09:54:10 2015 -0800
Committer: Patrick Wendell 
Committed: Wed Dec 2 09:54:10 2015 -0800

--
 assembly/pom.xml| 2 +-
 bagel/pom.xml   | 2 +-
 core/pom.xml| 2 +-
 docker-integration-tests/pom.xml| 2 +-
 examples/pom.xml| 2 +-
 external/flume-assembly/pom.xml | 2 +-
 external/flume-sink/pom.xml | 2 +-
 external/flume/pom.xml  | 2 +-
 external/kafka-assembly/pom.xml | 2 +-
 external/kafka/pom.xml  | 2 +-
 external/mqtt-assembly/pom.xml  | 2 +-
 external/mqtt/pom.xml   | 2 +-
 external/twitter/pom.xml| 2 +-
 external/zeromq/pom.xml | 2 +-
 extras/java8-tests/pom.xml  | 2 +-
 extras/kinesis-asl-assembly/pom.xml | 2 +-
 extras/kinesis-asl/pom.xml  | 2 +-
 extras/spark-ganglia-lgpl/pom.xml   | 2 +-
 graphx/pom.xml  | 2 +-
 launcher/pom.xml| 2 +-
 mllib/pom.xml   | 2 +-
 network/common/pom.xml  | 2 +-
 network/shuffle/pom.xml | 2 +-
 network/yarn/pom.xml| 2 +-
 pom.xml | 2 +-
 repl/pom.xml| 2 +-
 sql/catalyst/pom.xml| 2 +-
 sql/core/pom.xml| 2 +-
 sql/hive-thriftserver/pom.xml   | 2 +-
 sql/hive/pom.xml| 2 +-
 streaming/pom.xml   | 2 +-
 tags/pom.xml| 2 +-
 tools/pom.xml   | 2 +-
 unsafe/pom.xml  | 2 +-
 yarn/pom.xml| 2 +-
 35 files changed, 35 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/bf525845/assembly/pom.xml
--
diff --git a/assembly/pom.xml b/assembly/pom.xml
index 4b60ee0..fbabaa5 100644
--- a/assembly/pom.xml
+++ b/assembly/pom.xml
@@ -21,7 +21,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0-SNAPSHOT
+1.6.0
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/bf525845/bagel/pom.xml
--
diff --git a/bagel/pom.xml b/bagel/pom.xml
index 672e946..1b3e417 100644
--- a/bagel/pom.xml
+++ b/bagel/pom.xml
@@ -21,7 +21,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0-SNAPSHOT
+1.6.0
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/bf525845/core/pom.xml
--
diff --git a/core/pom.xml b/core/pom.xml
index 61744bb..15b8d75 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -21,7 +21,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0-SNAPSHOT
+1.6.0
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/bf525845/docker-integration-tests/pom.xml
--
diff --git a/docker-integration-tests/pom.xml b/docker-integration-tests/pom.xml
index dee0c4a..ee9de91 100644
--- a/docker-integration-tests/pom.xml
+++ b/docker-integration-tests/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0-SNAPSHOT
+1.6.0
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/bf525845/examples/pom.xml
--
diff --git a/examples/pom.xml b/examples/pom.xml
index f5ab2a7..37b15bb 100644
--- a/examples/pom.xml
+++ b/examples/pom.xml
@@ -21,7 +21,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0-SNAPSHOT
+1.6.0
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/bf525845/external/flume-assembly/pom.xml
--
diff --git a/external/flume-assembly/pom.xml b/external/flume-assembly/pom.xml
index dceedcf..295455a 100644
--- a/external/flume-assembly/pom.xml
+++ b/external/flume-assembly/pom.xml
@@ -21,7 +21,7 @@
   
 org.apache.spark
 spark-parent_2.10
-1.6.0-SNAPSHOT
+1.6.0
 ../../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/spark/blob/bf525845/external/flume-sink/pom.xml

Git Push Summary

2015-12-02 Thread pwendell
Repository: spark
Updated Tags:  refs/tags/v1.6.0-rc1 [created] bf525845c

-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-12094][SQL] Prettier tree string for TreeNode

2015-12-02 Thread yhuai
Repository: spark
Updated Branches:
  refs/heads/master 128c29035 -> a1542ce2f


[SPARK-12094][SQL] Prettier tree string for TreeNode

When examining plans of complex queries with multiple joins, a pain point of 
mine is that, it's hard to immediately see the sibling node of a specific query 
plan node. This PR adds tree lines for the tree string of a `TreeNode`, so that 
the result can be visually more intuitive.

Author: Cheng Lian 

Closes #10099 from liancheng/prettier-tree-string.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/a1542ce2
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/a1542ce2
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/a1542ce2

Branch: refs/heads/master
Commit: a1542ce2f33ad365ff437d2d3014b9de2f6670e5
Parents: 128c290
Author: Cheng Lian 
Authored: Wed Dec 2 09:36:12 2015 -0800
Committer: Yin Huai 
Committed: Wed Dec 2 09:36:12 2015 -0800

--
 .../spark/sql/catalyst/trees/TreeNode.scala | 31 
 1 file changed, 26 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/a1542ce2/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
--
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
index ad2bd78..dfea583 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
@@ -393,7 +393,7 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] 
extends Product {
   override def toString: String = treeString
 
   /** Returns a string representation of the nodes in this tree */
-  def treeString: String = generateTreeString(0, new StringBuilder).toString
+  def treeString: String = generateTreeString(0, Nil, new 
StringBuilder).toString
 
   /**
* Returns a string representation of the nodes in this tree, where each 
operator is numbered.
@@ -419,12 +419,33 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] 
extends Product {
 }
   }
 
-  /** Appends the string represent of this node and its children to the given 
StringBuilder. */
-  protected def generateTreeString(depth: Int, builder: StringBuilder): 
StringBuilder = {
-builder.append(" " * depth)
+  /**
+   * Appends the string represent of this node and its children to the given 
StringBuilder.
+   *
+   * The `i`-th element in `lastChildren` indicates whether the ancestor of 
the current node at
+   * depth `i + 1` is the last child of its own parent node.  The depth of the 
root node is 0, and
+   * `lastChildren` for the root node should be empty.
+   */
+  protected def generateTreeString(
+  depth: Int, lastChildren: Seq[Boolean], builder: StringBuilder): 
StringBuilder = {
+if (depth > 0) {
+  lastChildren.init.foreach { isLast =>
+val prefixFragment = if (isLast) "   " else ":  "
+builder.append(prefixFragment)
+  }
+
+  val branch = if (lastChildren.last) "+- " else ":- "
+  builder.append(branch)
+}
+
 builder.append(simpleString)
 builder.append("\n")
-children.foreach(_.generateTreeString(depth + 1, builder))
+
+if (children.nonEmpty) {
+  children.init.foreach(_.generateTreeString(depth + 1, lastChildren :+ 
false, builder))
+  children.last.generateTreeString(depth + 1, lastChildren :+ true, 
builder)
+}
+
 builder
   }
 


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-12094][SQL] Prettier tree string for TreeNode

2015-12-02 Thread yhuai
Repository: spark
Updated Branches:
  refs/heads/branch-1.6 d79dd971d -> f449a407f


[SPARK-12094][SQL] Prettier tree string for TreeNode

When examining plans of complex queries with multiple joins, a pain point of 
mine is that, it's hard to immediately see the sibling node of a specific query 
plan node. This PR adds tree lines for the tree string of a `TreeNode`, so that 
the result can be visually more intuitive.

Author: Cheng Lian 

Closes #10099 from liancheng/prettier-tree-string.

(cherry picked from commit a1542ce2f33ad365ff437d2d3014b9de2f6670e5)
Signed-off-by: Yin Huai 


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/f449a407
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/f449a407
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/f449a407

Branch: refs/heads/branch-1.6
Commit: f449a407f6f152c676524d4348bbe34d4d3fbfca
Parents: d79dd97
Author: Cheng Lian 
Authored: Wed Dec 2 09:36:12 2015 -0800
Committer: Yin Huai 
Committed: Wed Dec 2 09:36:20 2015 -0800

--
 .../spark/sql/catalyst/trees/TreeNode.scala | 31 
 1 file changed, 26 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/f449a407/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
--
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
index ad2bd78..dfea583 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
@@ -393,7 +393,7 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] 
extends Product {
   override def toString: String = treeString
 
   /** Returns a string representation of the nodes in this tree */
-  def treeString: String = generateTreeString(0, new StringBuilder).toString
+  def treeString: String = generateTreeString(0, Nil, new 
StringBuilder).toString
 
   /**
* Returns a string representation of the nodes in this tree, where each 
operator is numbered.
@@ -419,12 +419,33 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] 
extends Product {
 }
   }
 
-  /** Appends the string represent of this node and its children to the given 
StringBuilder. */
-  protected def generateTreeString(depth: Int, builder: StringBuilder): 
StringBuilder = {
-builder.append(" " * depth)
+  /**
+   * Appends the string represent of this node and its children to the given 
StringBuilder.
+   *
+   * The `i`-th element in `lastChildren` indicates whether the ancestor of 
the current node at
+   * depth `i + 1` is the last child of its own parent node.  The depth of the 
root node is 0, and
+   * `lastChildren` for the root node should be empty.
+   */
+  protected def generateTreeString(
+  depth: Int, lastChildren: Seq[Boolean], builder: StringBuilder): 
StringBuilder = {
+if (depth > 0) {
+  lastChildren.init.foreach { isLast =>
+val prefixFragment = if (isLast) "   " else ":  "
+builder.append(prefixFragment)
+  }
+
+  val branch = if (lastChildren.last) "+- " else ":- "
+  builder.append(branch)
+}
+
 builder.append(simpleString)
 builder.append("\n")
-children.foreach(_.generateTreeString(depth + 1, builder))
+
+if (children.nonEmpty) {
+  children.init.foreach(_.generateTreeString(depth + 1, lastChildren :+ 
false, builder))
+  children.last.generateTreeString(depth + 1, lastChildren :+ true, 
builder)
+}
+
 builder
   }
 


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-12001] Allow partially-stopped StreamingContext to be completely stopped

2015-12-02 Thread joshrosen
Repository: spark
Updated Branches:
  refs/heads/master a1542ce2f -> 452690ba1


[SPARK-12001] Allow partially-stopped StreamingContext to be completely stopped

If `StreamingContext.stop()` is interrupted midway through the call, the 
context will be marked as stopped but certain state will have not been cleaned 
up. Because `state = STOPPED` will be set, subsequent `stop()` calls will be 
unable to finish stopping the context, preventing any new StreamingContexts 
from being created.

This patch addresses this issue by only marking the context as `STOPPED` once 
the `stop()` has successfully completed which allows `stop()` to be called a 
second time in order to finish stopping the context in case the original 
`stop()` call was interrupted.

I discovered this issue by examining logs from a failed Jenkins run in which 
this race condition occurred in `FailureSuite`, leaking an unstoppable context 
and causing all subsequent tests to fail.

Author: Josh Rosen 

Closes #9982 from JoshRosen/SPARK-12001.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/452690ba
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/452690ba
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/452690ba

Branch: refs/heads/master
Commit: 452690ba1cc3c667bdd9f3022c43c9a10267880b
Parents: a1542ce
Author: Josh Rosen 
Authored: Wed Dec 2 13:44:01 2015 -0800
Committer: Josh Rosen 
Committed: Wed Dec 2 13:44:01 2015 -0800

--
 .../spark/streaming/StreamingContext.scala  | 49 +++-
 1 file changed, 27 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/452690ba/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala
--
diff --git 
a/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala 
b/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala
index 6fb8ad3..cf843e3 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala
@@ -699,28 +699,33 @@ class StreamingContext private[streaming] (
 " AsynchronousListenerBus")
 }
 synchronized {
-  try {
-state match {
-  case INITIALIZED =>
-logWarning("StreamingContext has not been started yet")
-  case STOPPED =>
-logWarning("StreamingContext has already been stopped")
-  case ACTIVE =>
-scheduler.stop(stopGracefully)
-// Removing the streamingSource to de-register the metrics on 
stop()
-env.metricsSystem.removeSource(streamingSource)
-uiTab.foreach(_.detach())
-StreamingContext.setActiveContext(null)
-waiter.notifyStop()
-if (shutdownHookRef != null) {
-  shutdownHookRefToRemove = shutdownHookRef
-  shutdownHookRef = null
-}
-logInfo("StreamingContext stopped successfully")
-}
-  } finally {
-// The state should always be Stopped after calling `stop()`, even if 
we haven't started yet
-state = STOPPED
+  // The state should always be Stopped after calling `stop()`, even if we 
haven't started yet
+  state match {
+case INITIALIZED =>
+  logWarning("StreamingContext has not been started yet")
+  state = STOPPED
+case STOPPED =>
+  logWarning("StreamingContext has already been stopped")
+  state = STOPPED
+case ACTIVE =>
+  // It's important that we don't set state = STOPPED until the very 
end of this case,
+  // since we need to ensure that we're still able to call `stop()` to 
recover from
+  // a partially-stopped StreamingContext which resulted from this 
`stop()` call being
+  // interrupted. See SPARK-12001 for more details. Because the body 
of this case can be
+  // executed twice in the case of a partial stop, all methods called 
here need to be
+  // idempotent.
+  scheduler.stop(stopGracefully)
+  // Removing the streamingSource to de-register the metrics on stop()
+  env.metricsSystem.removeSource(streamingSource)
+  uiTab.foreach(_.detach())
+  StreamingContext.setActiveContext(null)
+  waiter.notifyStop()
+  if (shutdownHookRef != null) {
+shutdownHookRefToRemove = shutdownHookRef
+shutdownHookRef = null
+  }
+  logInfo("StreamingContext stopped successfully")
+  state = STOPPED
   }
 }
 if (shutdownHookRefToRemove != null) {



spark git commit: [SPARK-10266][DOCUMENTATION, ML] Fixed @Since annotation for ml.tunning

2015-12-02 Thread meng
Repository: spark
Updated Branches:
  refs/heads/master 452690ba1 -> de07d06ab


[SPARK-10266][DOCUMENTATION, ML] Fixed @Since annotation for ml.tunning

cc mengxr noel-smith

I worked on this issues based on https://github.com/apache/spark/pull/8729.
ehsanmok  thank you for your contricution!

Author: Yu ISHIKAWA 
Author: Ehsan M.Kermani 

Closes #9338 from yu-iskw/JIRA-10266.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/de07d06a
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/de07d06a
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/de07d06a

Branch: refs/heads/master
Commit: de07d06abecf3516c95d099b6c01a86e0c8cfd8c
Parents: 452690b
Author: Yu ISHIKAWA 
Authored: Wed Dec 2 14:15:54 2015 -0800
Committer: Xiangrui Meng 
Committed: Wed Dec 2 14:15:54 2015 -0800

--
 .../apache/spark/ml/tuning/CrossValidator.scala | 34 ++--
 .../spark/ml/tuning/ParamGridBuilder.scala  | 14 ++--
 .../spark/ml/tuning/TrainValidationSplit.scala  | 26 ---
 3 files changed, 58 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/de07d06a/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
--
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala 
b/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
index 83a9048..5c09f1a 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
@@ -19,18 +19,18 @@ package org.apache.spark.ml.tuning
 
 import com.github.fommil.netlib.F2jBLAS
 import org.apache.hadoop.fs.Path
-import org.json4s.{JObject, DefaultFormats}
 import org.json4s.jackson.JsonMethods._
+import org.json4s.{DefaultFormats, JObject}
 
-import org.apache.spark.ml.classification.OneVsRestParams
-import org.apache.spark.ml.feature.RFormulaModel
-import org.apache.spark.{SparkContext, Logging}
+import org.apache.spark.{Logging, SparkContext}
 import org.apache.spark.annotation.{Experimental, Since}
 import org.apache.spark.ml._
+import org.apache.spark.ml.classification.OneVsRestParams
 import org.apache.spark.ml.evaluation.Evaluator
+import org.apache.spark.ml.feature.RFormulaModel
 import org.apache.spark.ml.param._
-import org.apache.spark.ml.util._
 import org.apache.spark.ml.util.DefaultParamsReader.Metadata
+import org.apache.spark.ml.util._
 import org.apache.spark.mllib.util.MLUtils
 import org.apache.spark.sql.DataFrame
 import org.apache.spark.sql.types.StructType
@@ -58,26 +58,34 @@ private[ml] trait CrossValidatorParams extends 
ValidatorParams {
  * :: Experimental ::
  * K-fold cross validation.
  */
+@Since("1.2.0")
 @Experimental
-class CrossValidator(override val uid: String) extends 
Estimator[CrossValidatorModel]
+class CrossValidator @Since("1.2.0") (@Since("1.4.0") override val uid: String)
+  extends Estimator[CrossValidatorModel]
   with CrossValidatorParams with MLWritable with Logging {
 
+  @Since("1.2.0")
   def this() = this(Identifiable.randomUID("cv"))
 
   private val f2jBLAS = new F2jBLAS
 
   /** @group setParam */
+  @Since("1.2.0")
   def setEstimator(value: Estimator[_]): this.type = set(estimator, value)
 
   /** @group setParam */
+  @Since("1.2.0")
   def setEstimatorParamMaps(value: Array[ParamMap]): this.type = 
set(estimatorParamMaps, value)
 
   /** @group setParam */
+  @Since("1.2.0")
   def setEvaluator(value: Evaluator): this.type = set(evaluator, value)
 
   /** @group setParam */
+  @Since("1.2.0")
   def setNumFolds(value: Int): this.type = set(numFolds, value)
 
+  @Since("1.4.0")
   override def fit(dataset: DataFrame): CrossValidatorModel = {
 val schema = dataset.schema
 transformSchema(schema, logging = true)
@@ -116,10 +124,12 @@ class CrossValidator(override val uid: String) extends 
Estimator[CrossValidatorM
 copyValues(new CrossValidatorModel(uid, bestModel, 
metrics).setParent(this))
   }
 
+  @Since("1.4.0")
   override def transformSchema(schema: StructType): StructType = {
 $(estimator).transformSchema(schema)
   }
 
+  @Since("1.4.0")
   override def validateParams(): Unit = {
 super.validateParams()
 val est = $(estimator)
@@ -128,6 +138,7 @@ class CrossValidator(override val uid: String) extends 
Estimator[CrossValidatorM
 }
   }
 
+  @Since("1.4.0")
   override def copy(extra: ParamMap): CrossValidator = {
 val copied = defaultCopy(extra).asInstanceOf[CrossValidator]
 if (copied.isDefined(estimator)) {
@@ -308,26 +319,31 @@ object CrossValidator extends MLReadable[CrossValidator] {
  * @param avgMetrics Average 

spark git commit: [SPARK-10266][DOCUMENTATION, ML] Fixed @Since annotation for ml.tunning

2015-12-02 Thread meng
Repository: spark
Updated Branches:
  refs/heads/branch-1.6 5d915fed3 -> 911259e9a


[SPARK-10266][DOCUMENTATION, ML] Fixed @Since annotation for ml.tunning

cc mengxr noel-smith

I worked on this issues based on https://github.com/apache/spark/pull/8729.
ehsanmok  thank you for your contricution!

Author: Yu ISHIKAWA 
Author: Ehsan M.Kermani 

Closes #9338 from yu-iskw/JIRA-10266.

(cherry picked from commit de07d06abecf3516c95d099b6c01a86e0c8cfd8c)
Signed-off-by: Xiangrui Meng 


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/911259e9
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/911259e9
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/911259e9

Branch: refs/heads/branch-1.6
Commit: 911259e9af6f9a81e775b1aa6d82fa44956bf993
Parents: 5d915fe
Author: Yu ISHIKAWA 
Authored: Wed Dec 2 14:15:54 2015 -0800
Committer: Xiangrui Meng 
Committed: Wed Dec 2 14:16:05 2015 -0800

--
 .../apache/spark/ml/tuning/CrossValidator.scala | 34 ++--
 .../spark/ml/tuning/ParamGridBuilder.scala  | 14 ++--
 .../spark/ml/tuning/TrainValidationSplit.scala  | 26 ---
 3 files changed, 58 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/911259e9/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
--
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala 
b/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
index 83a9048..5c09f1a 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala
@@ -19,18 +19,18 @@ package org.apache.spark.ml.tuning
 
 import com.github.fommil.netlib.F2jBLAS
 import org.apache.hadoop.fs.Path
-import org.json4s.{JObject, DefaultFormats}
 import org.json4s.jackson.JsonMethods._
+import org.json4s.{DefaultFormats, JObject}
 
-import org.apache.spark.ml.classification.OneVsRestParams
-import org.apache.spark.ml.feature.RFormulaModel
-import org.apache.spark.{SparkContext, Logging}
+import org.apache.spark.{Logging, SparkContext}
 import org.apache.spark.annotation.{Experimental, Since}
 import org.apache.spark.ml._
+import org.apache.spark.ml.classification.OneVsRestParams
 import org.apache.spark.ml.evaluation.Evaluator
+import org.apache.spark.ml.feature.RFormulaModel
 import org.apache.spark.ml.param._
-import org.apache.spark.ml.util._
 import org.apache.spark.ml.util.DefaultParamsReader.Metadata
+import org.apache.spark.ml.util._
 import org.apache.spark.mllib.util.MLUtils
 import org.apache.spark.sql.DataFrame
 import org.apache.spark.sql.types.StructType
@@ -58,26 +58,34 @@ private[ml] trait CrossValidatorParams extends 
ValidatorParams {
  * :: Experimental ::
  * K-fold cross validation.
  */
+@Since("1.2.0")
 @Experimental
-class CrossValidator(override val uid: String) extends 
Estimator[CrossValidatorModel]
+class CrossValidator @Since("1.2.0") (@Since("1.4.0") override val uid: String)
+  extends Estimator[CrossValidatorModel]
   with CrossValidatorParams with MLWritable with Logging {
 
+  @Since("1.2.0")
   def this() = this(Identifiable.randomUID("cv"))
 
   private val f2jBLAS = new F2jBLAS
 
   /** @group setParam */
+  @Since("1.2.0")
   def setEstimator(value: Estimator[_]): this.type = set(estimator, value)
 
   /** @group setParam */
+  @Since("1.2.0")
   def setEstimatorParamMaps(value: Array[ParamMap]): this.type = 
set(estimatorParamMaps, value)
 
   /** @group setParam */
+  @Since("1.2.0")
   def setEvaluator(value: Evaluator): this.type = set(evaluator, value)
 
   /** @group setParam */
+  @Since("1.2.0")
   def setNumFolds(value: Int): this.type = set(numFolds, value)
 
+  @Since("1.4.0")
   override def fit(dataset: DataFrame): CrossValidatorModel = {
 val schema = dataset.schema
 transformSchema(schema, logging = true)
@@ -116,10 +124,12 @@ class CrossValidator(override val uid: String) extends 
Estimator[CrossValidatorM
 copyValues(new CrossValidatorModel(uid, bestModel, 
metrics).setParent(this))
   }
 
+  @Since("1.4.0")
   override def transformSchema(schema: StructType): StructType = {
 $(estimator).transformSchema(schema)
   }
 
+  @Since("1.4.0")
   override def validateParams(): Unit = {
 super.validateParams()
 val est = $(estimator)
@@ -128,6 +138,7 @@ class CrossValidator(override val uid: String) extends 
Estimator[CrossValidatorM
 }
   }
 
+  @Since("1.4.0")
   override def copy(extra: ParamMap): CrossValidator = {
 val copied = defaultCopy(extra).asInstanceOf[CrossValidator]
 if 

spark git commit: [SPARK-12093][SQL] Fix the error of comment in DDLParser

2015-12-02 Thread rxin
Repository: spark
Updated Branches:
  refs/heads/branch-1.6 911259e9a -> cb142fd1e


[SPARK-12093][SQL] Fix the error of comment in DDLParser

Author: Yadong Qi 

Closes #10096 from watermen/patch-1.

(cherry picked from commit d0d7ec533062151269b300ed455cf150a69098c0)
Signed-off-by: Reynold Xin 


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/cb142fd1
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/cb142fd1
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/cb142fd1

Branch: refs/heads/branch-1.6
Commit: cb142fd1e6d98b140de3813775c5a58ea624b1d4
Parents: 911259e
Author: Yadong Qi 
Authored: Thu Dec 3 08:48:49 2015 +0800
Committer: Reynold Xin 
Committed: Thu Dec 3 08:49:18 2015 +0800

--
 .../org/apache/spark/sql/execution/datasources/DDLParser.scala | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/cb142fd1/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
--
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
index 6969b42..f22508b 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
@@ -66,15 +66,15 @@ class DDLParser(parseQuery: String => LogicalPlan)
   protected def start: Parser[LogicalPlan] = ddl
 
   /**
-   * `CREATE [TEMPORARY] TABLE avroTable [IF NOT EXISTS]
+   * `CREATE [TEMPORARY] TABLE [IF NOT EXISTS] avroTable
* USING org.apache.spark.sql.avro
* OPTIONS (path "../hive/src/test/resources/data/files/episodes.avro")`
* or
-   * `CREATE [TEMPORARY] TABLE avroTable(intField int, stringField string...) 
[IF NOT EXISTS]
+   * `CREATE [TEMPORARY] TABLE [IF NOT EXISTS] avroTable(intField int, 
stringField string...)
* USING org.apache.spark.sql.avro
* OPTIONS (path "../hive/src/test/resources/data/files/episodes.avro")`
* or
-   * `CREATE [TEMPORARY] TABLE avroTable [IF NOT EXISTS]
+   * `CREATE [TEMPORARY] TABLE [IF NOT EXISTS] avroTable
* USING org.apache.spark.sql.avro
* OPTIONS (path "../hive/src/test/resources/data/files/episodes.avro")`
* AS SELECT ...


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-12093][SQL] Fix the error of comment in DDLParser

2015-12-02 Thread rxin
Repository: spark
Updated Branches:
  refs/heads/master de07d06ab -> d0d7ec533


[SPARK-12093][SQL] Fix the error of comment in DDLParser

Author: Yadong Qi 

Closes #10096 from watermen/patch-1.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/d0d7ec53
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/d0d7ec53
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/d0d7ec53

Branch: refs/heads/master
Commit: d0d7ec533062151269b300ed455cf150a69098c0
Parents: de07d06
Author: Yadong Qi 
Authored: Thu Dec 3 08:48:49 2015 +0800
Committer: Reynold Xin 
Committed: Thu Dec 3 08:48:49 2015 +0800

--
 .../org/apache/spark/sql/execution/datasources/DDLParser.scala | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/d0d7ec53/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
--
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
index 6969b42..f22508b 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
@@ -66,15 +66,15 @@ class DDLParser(parseQuery: String => LogicalPlan)
   protected def start: Parser[LogicalPlan] = ddl
 
   /**
-   * `CREATE [TEMPORARY] TABLE avroTable [IF NOT EXISTS]
+   * `CREATE [TEMPORARY] TABLE [IF NOT EXISTS] avroTable
* USING org.apache.spark.sql.avro
* OPTIONS (path "../hive/src/test/resources/data/files/episodes.avro")`
* or
-   * `CREATE [TEMPORARY] TABLE avroTable(intField int, stringField string...) 
[IF NOT EXISTS]
+   * `CREATE [TEMPORARY] TABLE [IF NOT EXISTS] avroTable(intField int, 
stringField string...)
* USING org.apache.spark.sql.avro
* OPTIONS (path "../hive/src/test/resources/data/files/episodes.avro")`
* or
-   * `CREATE [TEMPORARY] TABLE avroTable [IF NOT EXISTS]
+   * `CREATE [TEMPORARY] TABLE [IF NOT EXISTS] avroTable
* USING org.apache.spark.sql.avro
* OPTIONS (path "../hive/src/test/resources/data/files/episodes.avro")`
* AS SELECT ...


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-12000] do not specify arg types when reference a method in ScalaDoc

2015-12-02 Thread meng
Repository: spark
Updated Branches:
  refs/heads/branch-1.6 cb142fd1e -> 656d44e20


[SPARK-12000] do not specify arg types when reference a method in ScalaDoc

This fixes SPARK-12000, verified on my local with JDK 7. It seems that 
`scaladoc` try to match method names and messed up with annotations.

cc: JoshRosen jkbradley

Author: Xiangrui Meng 

Closes #10114 from mengxr/SPARK-12000.2.

(cherry picked from commit 9bb695b7a82d837e2c7a724514ea6b203efb5364)
Signed-off-by: Xiangrui Meng 


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/656d44e2
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/656d44e2
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/656d44e2

Branch: refs/heads/branch-1.6
Commit: 656d44e2021d2f637d724c1d71ecdca1f447a4be
Parents: cb142fd
Author: Xiangrui Meng 
Authored: Wed Dec 2 17:19:31 2015 -0800
Committer: Xiangrui Meng 
Committed: Wed Dec 2 17:19:45 2015 -0800

--
 .../org/apache/spark/mllib/clustering/BisectingKMeans.scala  | 2 +-
 .../org/apache/spark/mllib/clustering/BisectingKMeansModel.scala | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/656d44e2/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala
--
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala
index 29a7aa0..82adfa6 100644
--- 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala
@@ -214,7 +214,7 @@ class BisectingKMeans private (
   }
 
   /**
-   * Java-friendly version of [[run(RDD[Vector])*]]
+   * Java-friendly version of [[run()]].
*/
   def run(data: JavaRDD[Vector]): BisectingKMeansModel = run(data.rdd)
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/656d44e2/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
--
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
index 5015f15..f942e56 100644
--- 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
@@ -64,7 +64,7 @@ class BisectingKMeansModel @Since("1.6.0") (
   }
 
   /**
-   * Java-friendly version of [[predict(RDD[Vector])*]]
+   * Java-friendly version of [[predict()]].
*/
   @Since("1.6.0")
   def predict(points: JavaRDD[Vector]): JavaRDD[java.lang.Integer] =
@@ -88,7 +88,7 @@ class BisectingKMeansModel @Since("1.6.0") (
   }
 
   /**
-   * Java-friendly version of [[computeCost(RDD[Vector])*]].
+   * Java-friendly version of [[computeCost()]].
*/
   @Since("1.6.0")
   def computeCost(data: JavaRDD[Vector]): Double = this.computeCost(data.rdd)


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-12000] do not specify arg types when reference a method in ScalaDoc

2015-12-02 Thread meng
Repository: spark
Updated Branches:
  refs/heads/master d0d7ec533 -> 9bb695b7a


[SPARK-12000] do not specify arg types when reference a method in ScalaDoc

This fixes SPARK-12000, verified on my local with JDK 7. It seems that 
`scaladoc` try to match method names and messed up with annotations.

cc: JoshRosen jkbradley

Author: Xiangrui Meng 

Closes #10114 from mengxr/SPARK-12000.2.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/9bb695b7
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/9bb695b7
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/9bb695b7

Branch: refs/heads/master
Commit: 9bb695b7a82d837e2c7a724514ea6b203efb5364
Parents: d0d7ec5
Author: Xiangrui Meng 
Authored: Wed Dec 2 17:19:31 2015 -0800
Committer: Xiangrui Meng 
Committed: Wed Dec 2 17:19:31 2015 -0800

--
 .../org/apache/spark/mllib/clustering/BisectingKMeans.scala  | 2 +-
 .../org/apache/spark/mllib/clustering/BisectingKMeansModel.scala | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/9bb695b7/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala
--
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala
index 29a7aa0..82adfa6 100644
--- 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala
@@ -214,7 +214,7 @@ class BisectingKMeans private (
   }
 
   /**
-   * Java-friendly version of [[run(RDD[Vector])*]]
+   * Java-friendly version of [[run()]].
*/
   def run(data: JavaRDD[Vector]): BisectingKMeansModel = run(data.rdd)
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/9bb695b7/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
--
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
index 5015f15..f942e56 100644
--- 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala
@@ -64,7 +64,7 @@ class BisectingKMeansModel @Since("1.6.0") (
   }
 
   /**
-   * Java-friendly version of [[predict(RDD[Vector])*]]
+   * Java-friendly version of [[predict()]].
*/
   @Since("1.6.0")
   def predict(points: JavaRDD[Vector]): JavaRDD[java.lang.Integer] =
@@ -88,7 +88,7 @@ class BisectingKMeansModel @Since("1.6.0") (
   }
 
   /**
-   * Java-friendly version of [[computeCost(RDD[Vector])*]].
+   * Java-friendly version of [[computeCost()]].
*/
   @Since("1.6.0")
   def computeCost(data: JavaRDD[Vector]): Double = this.computeCost(data.rdd)


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-12109][SQL] Expressions's simpleString should delegate to its toString.

2015-12-02 Thread rxin
Repository: spark
Updated Branches:
  refs/heads/master ae4025337 -> ec2b6c26c


[SPARK-12109][SQL] Expressions's simpleString should delegate to its toString.

https://issues.apache.org/jira/browse/SPARK-12109

The change of https://issues.apache.org/jira/browse/SPARK-11596 exposed the 
problem.
In the sql plan viz, the filter shows

![image](https://cloud.githubusercontent.com/assets/2072857/11547075/1a285230-9906-11e5-8481-2bb451e35ef1.png)

After changes in this PR, the viz is back to normal.
![image](https://cloud.githubusercontent.com/assets/2072857/11547080/2bc570f4-9906-11e5-8897-3b3bff173276.png)

Author: Yin Huai 

Closes #10111 from yhuai/SPARK-12109.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/ec2b6c26
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/ec2b6c26
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/ec2b6c26

Branch: refs/heads/master
Commit: ec2b6c26c9b6bd59d29b5d7af2742aca7e6e0b07
Parents: ae40253
Author: Yin Huai 
Authored: Thu Dec 3 11:21:24 2015 +0800
Committer: Reynold Xin 
Committed: Thu Dec 3 11:21:24 2015 +0800

--
 .../org/apache/spark/sql/catalyst/expressions/Expression.scala| 3 ++-
 .../apache/spark/sql/catalyst/expressions/windowExpressions.scala | 3 ---
 .../main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala | 2 +-
 3 files changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/ec2b6c26/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
--
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
index 4ee6542..614f0c0 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
@@ -207,12 +207,13 @@ abstract class Expression extends TreeNode[Expression] {
 }.toString
   }
 
-
   private def flatArguments = productIterator.flatMap {
 case t: Traversable[_] => t
 case single => single :: Nil
   }
 
+  override def simpleString: String = toString
+
   override def toString: String = prettyName + flatArguments.mkString("(", 
",", ")")
 
   /**

http://git-wip-us.apache.org/repos/asf/spark/blob/ec2b6c26/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
--
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
index 09ec0e3..1680aa8 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
@@ -71,9 +71,6 @@ case class WindowSpecDefinition(
 childrenResolved && checkInputDataTypes().isSuccess &&
   frameSpecification.isInstanceOf[SpecifiedWindowFrame]
 
-
-  override def toString: String = simpleString
-
   override def nullable: Boolean = true
   override def foldable: Boolean = false
   override def dataType: DataType = throw new UnsupportedOperationException

http://git-wip-us.apache.org/repos/asf/spark/blob/ec2b6c26/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
--
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
index dfea583..d838d84 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
@@ -380,7 +380,7 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] 
extends Product {
   /** Returns a string representing the arguments to this node, minus any 
children */
   def argString: String = productIterator.flatMap {
 case tn: TreeNode[_] if containsChild(tn) => Nil
-case tn: TreeNode[_] => s"(${tn.simpleString})" :: Nil
+case tn: TreeNode[_] => s"${tn.simpleString}" :: Nil
 case seq: Seq[BaseType] if seq.toSet.subsetOf(children.toSet) => Nil
 case seq: Seq[_] => seq.mkString("[", ",", "]") :: Nil
 case set: Set[_] => set.mkString("{", ",", "}") :: Nil


-
To unsubscribe, e-mail: 

spark git commit: [SPARK-12109][SQL] Expressions's simpleString should delegate to its toString.

2015-12-02 Thread rxin
Repository: spark
Updated Branches:
  refs/heads/branch-1.6 6914ee9f0 -> 6674fd8aa


[SPARK-12109][SQL] Expressions's simpleString should delegate to its toString.

https://issues.apache.org/jira/browse/SPARK-12109

The change of https://issues.apache.org/jira/browse/SPARK-11596 exposed the 
problem.
In the sql plan viz, the filter shows

![image](https://cloud.githubusercontent.com/assets/2072857/11547075/1a285230-9906-11e5-8481-2bb451e35ef1.png)

After changes in this PR, the viz is back to normal.
![image](https://cloud.githubusercontent.com/assets/2072857/11547080/2bc570f4-9906-11e5-8897-3b3bff173276.png)

Author: Yin Huai 

Closes #10111 from yhuai/SPARK-12109.

(cherry picked from commit ec2b6c26c9b6bd59d29b5d7af2742aca7e6e0b07)
Signed-off-by: Reynold Xin 


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/6674fd8a
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/6674fd8a
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/6674fd8a

Branch: refs/heads/branch-1.6
Commit: 6674fd8aa9b04966bd7d19650754805cd241e399
Parents: 6914ee9
Author: Yin Huai 
Authored: Thu Dec 3 11:21:24 2015 +0800
Committer: Reynold Xin 
Committed: Thu Dec 3 11:21:46 2015 +0800

--
 .../org/apache/spark/sql/catalyst/expressions/Expression.scala| 3 ++-
 .../apache/spark/sql/catalyst/expressions/windowExpressions.scala | 3 ---
 .../main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala | 2 +-
 3 files changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/6674fd8a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
--
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
index 4ee6542..614f0c0 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
@@ -207,12 +207,13 @@ abstract class Expression extends TreeNode[Expression] {
 }.toString
   }
 
-
   private def flatArguments = productIterator.flatMap {
 case t: Traversable[_] => t
 case single => single :: Nil
   }
 
+  override def simpleString: String = toString
+
   override def toString: String = prettyName + flatArguments.mkString("(", 
",", ")")
 
   /**

http://git-wip-us.apache.org/repos/asf/spark/blob/6674fd8a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
--
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
index 09ec0e3..1680aa8 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
@@ -71,9 +71,6 @@ case class WindowSpecDefinition(
 childrenResolved && checkInputDataTypes().isSuccess &&
   frameSpecification.isInstanceOf[SpecifiedWindowFrame]
 
-
-  override def toString: String = simpleString
-
   override def nullable: Boolean = true
   override def foldable: Boolean = false
   override def dataType: DataType = throw new UnsupportedOperationException

http://git-wip-us.apache.org/repos/asf/spark/blob/6674fd8a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
--
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
index dfea583..d838d84 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
@@ -380,7 +380,7 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] 
extends Product {
   /** Returns a string representing the arguments to this node, minus any 
children */
   def argString: String = productIterator.flatMap {
 case tn: TreeNode[_] if containsChild(tn) => Nil
-case tn: TreeNode[_] => s"(${tn.simpleString})" :: Nil
+case tn: TreeNode[_] => s"${tn.simpleString}" :: Nil
 case seq: Seq[BaseType] if seq.toSet.subsetOf(children.toSet) => Nil
 case seq: Seq[_] => seq.mkString("[", ",", "]") :: Nil
 case set: Set[_] => set.mkString("{", ",", "}") 

spark git commit: [SPARK-12082][FLAKY-TEST] Increase timeouts in NettyBlockTransferSecuritySuite

2015-12-02 Thread rxin
Repository: spark
Updated Branches:
  refs/heads/branch-1.6 656d44e20 -> 6914ee9f0


[SPARK-12082][FLAKY-TEST] Increase timeouts in NettyBlockTransferSecuritySuite

We should try increasing a timeout in NettyBlockTransferSecuritySuite in order 
to reduce that suite's flakiness in Jenkins.

Author: Josh Rosen 

Closes #10113 from JoshRosen/SPARK-12082.

(cherry picked from commit ae402533738be06ac802914ed3e48f0d5fa54cbe)
Signed-off-by: Reynold Xin 


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/6914ee9f
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/6914ee9f
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/6914ee9f

Branch: refs/heads/branch-1.6
Commit: 6914ee9f0a063b880a0329365f465dcbe96e1adb
Parents: 656d44e
Author: Josh Rosen 
Authored: Thu Dec 3 11:12:02 2015 +0800
Committer: Reynold Xin 
Committed: Thu Dec 3 11:12:23 2015 +0800

--
 .../spark/network/netty/NettyBlockTransferSecuritySuite.scala  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/6914ee9f/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
--
diff --git 
a/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
 
b/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
index 3940527..98da941 100644
--- 
a/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
+++ 
b/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
@@ -148,7 +148,7 @@ class NettyBlockTransferSecuritySuite extends SparkFunSuite 
with MockitoSugar wi
 }
   })
 
-Await.ready(promise.future, FiniteDuration(1000, TimeUnit.MILLISECONDS))
+Await.ready(promise.future, FiniteDuration(10, TimeUnit.SECONDS))
 promise.future.value.get
   }
 }


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-12082][FLAKY-TEST] Increase timeouts in NettyBlockTransferSecuritySuite

2015-12-02 Thread rxin
Repository: spark
Updated Branches:
  refs/heads/master 9bb695b7a -> ae4025337


[SPARK-12082][FLAKY-TEST] Increase timeouts in NettyBlockTransferSecuritySuite

We should try increasing a timeout in NettyBlockTransferSecuritySuite in order 
to reduce that suite's flakiness in Jenkins.

Author: Josh Rosen 

Closes #10113 from JoshRosen/SPARK-12082.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/ae402533
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/ae402533
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/ae402533

Branch: refs/heads/master
Commit: ae402533738be06ac802914ed3e48f0d5fa54cbe
Parents: 9bb695b
Author: Josh Rosen 
Authored: Thu Dec 3 11:12:02 2015 +0800
Committer: Reynold Xin 
Committed: Thu Dec 3 11:12:02 2015 +0800

--
 .../spark/network/netty/NettyBlockTransferSecuritySuite.scala  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/ae402533/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
--
diff --git 
a/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
 
b/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
index 3940527..98da941 100644
--- 
a/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
+++ 
b/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferSecuritySuite.scala
@@ -148,7 +148,7 @@ class NettyBlockTransferSecuritySuite extends SparkFunSuite 
with MockitoSugar wi
 }
   })
 
-Await.ready(promise.future, FiniteDuration(1000, TimeUnit.MILLISECONDS))
+Await.ready(promise.future, FiniteDuration(10, TimeUnit.SECONDS))
 promise.future.value.get
   }
 }


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-3580][CORE] Add Consistent Method To Get Number of RDD Partitions Across Different Languages

2015-12-02 Thread srowen
Repository: spark
Updated Branches:
  refs/heads/master 4375eb3f4 -> 128c29035


[SPARK-3580][CORE] Add Consistent Method To Get Number of RDD Partitions Across 
Different Languages

I have tried to address all the comments in pull request 
https://github.com/apache/spark/pull/2447.

Note that the second commit (using the new method in all internal code of all 
components) is quite intrusive and could be omitted.

Author: Jeroen Schot 

Closes #9767 from schot/master.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/128c2903
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/128c2903
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/128c2903

Branch: refs/heads/master
Commit: 128c29035b4e7383cc3a9a6c7a9ab6136205ac6c
Parents: 4375eb3
Author: Jeroen Schot 
Authored: Wed Dec 2 09:40:07 2015 +
Committer: Sean Owen 
Committed: Wed Dec 2 09:40:07 2015 +

--
 .../scala/org/apache/spark/api/java/JavaRDDLike.scala  |  5 +
 core/src/main/scala/org/apache/spark/rdd/RDD.scala |  8 +++-
 core/src/test/java/org/apache/spark/JavaAPISuite.java  | 13 +
 .../src/test/scala/org/apache/spark/rdd/RDDSuite.scala |  1 +
 project/MimaExcludes.scala |  4 
 5 files changed, 30 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/128c2903/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
--
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala 
b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
index 1e9d4f1..0e4d7dc 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
@@ -28,6 +28,7 @@ import com.google.common.base.Optional
 import org.apache.hadoop.io.compress.CompressionCodec
 
 import org.apache.spark._
+import org.apache.spark.annotation.Since
 import org.apache.spark.api.java.JavaPairRDD._
 import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
 import org.apache.spark.api.java.JavaUtils.mapAsSerializableJavaMap
@@ -62,6 +63,10 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends 
Serializable {
   /** Set of partitions in this RDD. */
   def partitions: JList[Partition] = rdd.partitions.toSeq.asJava
 
+  /** Return the number of partitions in this RDD. */
+  @Since("1.6.0")
+  def getNumPartitions: Int = rdd.getNumPartitions
+
   /** The partitioner of this RDD. */
   def partitioner: Optional[Partitioner] = 
JavaUtils.optionToOptional(rdd.partitioner)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/128c2903/core/src/main/scala/org/apache/spark/rdd/RDD.scala
--
diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala 
b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
index 8b3731d..9fe9d83 100644
--- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
@@ -31,7 +31,7 @@ import org.apache.hadoop.mapred.TextOutputFormat
 
 import org.apache.spark._
 import org.apache.spark.Partitioner._
-import org.apache.spark.annotation.DeveloperApi
+import org.apache.spark.annotation.{Since, DeveloperApi}
 import org.apache.spark.api.java.JavaRDD
 import org.apache.spark.partial.BoundedDouble
 import org.apache.spark.partial.CountEvaluator
@@ -243,6 +243,12 @@ abstract class RDD[T: ClassTag](
   }
 
   /**
+* Returns the number of partitions of this RDD.
+*/
+  @Since("1.6.0")
+  final def getNumPartitions: Int = partitions.length
+
+  /**
* Get the preferred locations of a partition, taking into account whether 
the
* RDD is checkpointed.
*/

http://git-wip-us.apache.org/repos/asf/spark/blob/128c2903/core/src/test/java/org/apache/spark/JavaAPISuite.java
--
diff --git a/core/src/test/java/org/apache/spark/JavaAPISuite.java 
b/core/src/test/java/org/apache/spark/JavaAPISuite.java
index 4d4e982..11f1248 100644
--- a/core/src/test/java/org/apache/spark/JavaAPISuite.java
+++ b/core/src/test/java/org/apache/spark/JavaAPISuite.java
@@ -973,6 +973,19 @@ public class JavaAPISuite implements Serializable {
 Assert.assertEquals("[3, 7]", partitionSums.collect().toString());
   }
 
+  @Test
+  public void getNumPartitions(){
+JavaRDD rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 
8), 3);
+JavaDoubleRDD rdd2 = sc.parallelizeDoubles(Arrays.asList(1.0, 2.0, 3.0, 
4.0), 2);
+JavaPairRDD rdd3 = sc.parallelizePairs(Arrays.asList(
+new Tuple2<>("a", 1),
+new 

spark git commit: [SPARK-3580][CORE] Add Consistent Method To Get Number of RDD Partitions Across Different Languages

2015-12-02 Thread srowen
Repository: spark
Updated Branches:
  refs/heads/branch-1.6 c47a7373a -> d79dd971d


[SPARK-3580][CORE] Add Consistent Method To Get Number of RDD Partitions Across 
Different Languages

I have tried to address all the comments in pull request 
https://github.com/apache/spark/pull/2447.

Note that the second commit (using the new method in all internal code of all 
components) is quite intrusive and could be omitted.

Author: Jeroen Schot 

Closes #9767 from schot/master.

(cherry picked from commit 128c29035b4e7383cc3a9a6c7a9ab6136205ac6c)
Signed-off-by: Sean Owen 


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/d79dd971
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/d79dd971
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/d79dd971

Branch: refs/heads/branch-1.6
Commit: d79dd971d01b69f8065b802fb5a78023ca905c7c
Parents: c47a737
Author: Jeroen Schot 
Authored: Wed Dec 2 09:40:07 2015 +
Committer: Sean Owen 
Committed: Wed Dec 2 09:41:53 2015 +

--
 .../scala/org/apache/spark/api/java/JavaRDDLike.scala  |  5 +
 core/src/main/scala/org/apache/spark/rdd/RDD.scala |  8 +++-
 core/src/test/java/org/apache/spark/JavaAPISuite.java  | 13 +
 .../src/test/scala/org/apache/spark/rdd/RDDSuite.scala |  1 +
 project/MimaExcludes.scala |  4 
 5 files changed, 30 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/d79dd971/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
--
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala 
b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
index 1e9d4f1..0e4d7dc 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
@@ -28,6 +28,7 @@ import com.google.common.base.Optional
 import org.apache.hadoop.io.compress.CompressionCodec
 
 import org.apache.spark._
+import org.apache.spark.annotation.Since
 import org.apache.spark.api.java.JavaPairRDD._
 import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
 import org.apache.spark.api.java.JavaUtils.mapAsSerializableJavaMap
@@ -62,6 +63,10 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends 
Serializable {
   /** Set of partitions in this RDD. */
   def partitions: JList[Partition] = rdd.partitions.toSeq.asJava
 
+  /** Return the number of partitions in this RDD. */
+  @Since("1.6.0")
+  def getNumPartitions: Int = rdd.getNumPartitions
+
   /** The partitioner of this RDD. */
   def partitioner: Optional[Partitioner] = 
JavaUtils.optionToOptional(rdd.partitioner)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/d79dd971/core/src/main/scala/org/apache/spark/rdd/RDD.scala
--
diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala 
b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
index 8b3731d..9fe9d83 100644
--- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
@@ -31,7 +31,7 @@ import org.apache.hadoop.mapred.TextOutputFormat
 
 import org.apache.spark._
 import org.apache.spark.Partitioner._
-import org.apache.spark.annotation.DeveloperApi
+import org.apache.spark.annotation.{Since, DeveloperApi}
 import org.apache.spark.api.java.JavaRDD
 import org.apache.spark.partial.BoundedDouble
 import org.apache.spark.partial.CountEvaluator
@@ -243,6 +243,12 @@ abstract class RDD[T: ClassTag](
   }
 
   /**
+* Returns the number of partitions of this RDD.
+*/
+  @Since("1.6.0")
+  final def getNumPartitions: Int = partitions.length
+
+  /**
* Get the preferred locations of a partition, taking into account whether 
the
* RDD is checkpointed.
*/

http://git-wip-us.apache.org/repos/asf/spark/blob/d79dd971/core/src/test/java/org/apache/spark/JavaAPISuite.java
--
diff --git a/core/src/test/java/org/apache/spark/JavaAPISuite.java 
b/core/src/test/java/org/apache/spark/JavaAPISuite.java
index 4d4e982..11f1248 100644
--- a/core/src/test/java/org/apache/spark/JavaAPISuite.java
+++ b/core/src/test/java/org/apache/spark/JavaAPISuite.java
@@ -973,6 +973,19 @@ public class JavaAPISuite implements Serializable {
 Assert.assertEquals("[3, 7]", partitionSums.collect().toString());
   }
 
+  @Test
+  public void getNumPartitions(){
+JavaRDD rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 
8), 3);
+JavaDoubleRDD rdd2 = sc.parallelizeDoubles(Arrays.asList(1.0, 2.0, 3.0, 
4.0), 2);
+