Repository: spark
Updated Branches:
  refs/heads/master 0e6368ffa -> 5c1489015


[DOC] add missing parameters in SparkContext.scala for scala doc

Author: Zhang, Liye <liye.zh...@intel.com>

Closes #8412 from liyezhang556520/minorDoc.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/5c148901
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/5c148901
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/5c148901

Branch: refs/heads/master
Commit: 5c14890159a5711072bf395f662b2433a389edf9
Parents: 0e6368f
Author: Zhang, Liye <liye.zh...@intel.com>
Authored: Tue Aug 25 11:48:55 2015 +0100
Committer: Sean Owen <so...@cloudera.com>
Committed: Tue Aug 25 11:48:55 2015 +0100

----------------------------------------------------------------------
 .../main/scala/org/apache/spark/SparkContext.scala   | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/5c148901/core/src/main/scala/org/apache/spark/SparkContext.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala 
b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 1ddaca8..9849aff 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -114,6 +114,7 @@ class SparkContext(config: SparkConf) extends Logging with 
ExecutorAllocationCli
    * :: DeveloperApi ::
    * Alternative constructor for setting preferred locations where Spark will 
create executors.
    *
+   * @param config a [[org.apache.spark.SparkConf]] object specifying other 
Spark parameters
    * @param preferredNodeLocationData used in YARN mode to select nodes to 
launch containers on.
    * Can be generated using 
[[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]
    * from a list of input files or InputFormats for the application.
@@ -145,6 +146,9 @@ class SparkContext(config: SparkConf) extends Logging with 
ExecutorAllocationCli
    * @param jars Collection of JARs to send to the cluster. These can be paths 
on the local file
    *             system or HDFS, HTTP, HTTPS, or FTP URLs.
    * @param environment Environment variables to set on worker nodes.
+   * @param preferredNodeLocationData used in YARN mode to select nodes to 
launch containers on.
+   * Can be generated using 
[[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]
+   * from a list of input files or InputFormats for the application.
    */
   def this(
       master: String,
@@ -841,6 +845,9 @@ class SparkContext(config: SparkConf) extends Logging with 
ExecutorAllocationCli
    * @note Small files are preferred, large file is also allowable, but may 
cause bad performance.
    * @note On some filesystems, `.../path/&#42;` can be a more efficient way 
to read all files
    *       in a directory rather than `.../path/` or `.../path`
+   *
+   * @param path Directory to the input data files, the path can be comma 
separated paths as the
+   *             list of inputs.
    * @param minPartitions A suggestion value of the minimal splitting number 
for input data.
    */
   def wholeTextFiles(
@@ -889,6 +896,9 @@ class SparkContext(config: SparkConf) extends Logging with 
ExecutorAllocationCli
    * @note Small files are preferred; very large files may cause bad 
performance.
    * @note On some filesystems, `.../path/&#42;` can be a more efficient way 
to read all files
    *       in a directory rather than `.../path/` or `.../path`
+   *
+   * @param path Directory to the input data files, the path can be comma 
separated paths as the
+   *             list of inputs.
    * @param minPartitions A suggestion value of the minimal splitting number 
for input data.
    */
   @Experimental
@@ -918,8 +928,11 @@ class SparkContext(config: SparkConf) extends Logging with 
ExecutorAllocationCli
    * '''Note:''' We ensure that the byte array for each record in the 
resulting RDD
    * has the provided record length.
    *
-   * @param path Directory to the input data files
+   * @param path Directory to the input data files, the path can be comma 
separated paths as the
+   *             list of inputs.
    * @param recordLength The length at which to split the records
+   * @param conf Configuration for setting up the dataset.
+   *
    * @return An RDD of data with values, represented as byte arrays
    */
   @Experimental


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to