Repository: spark
Updated Branches:
  refs/heads/branch-2.0 025b3e9f1 -> 1ad3bbd0a


[MINOR][DOCS] Replace remaining 'sqlContext' in ScalaDoc/JavaDoc.

## What changes were proposed in this pull request?

According to the recent change, this PR replaces all the remaining `sqlContext` 
usage with `spark` in ScalaDoc/JavaDoc (.scala/.java files) except 
`SQLContext.scala`, `SparkPlan.scala', and `DatasetHolder.scala`.

## How was this patch tested?

Manual.

Author: Dongjoon Hyun <dongj...@apache.org>

Closes #13125 from dongjoon-hyun/minor_doc_sparksession.

(cherry picked from commit 9f176dd3918129a72282a6b7a12e2899cbb6dac9)
Signed-off-by: Nick Pentreath <ni...@za.ibm.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/1ad3bbd0
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/1ad3bbd0
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/1ad3bbd0

Branch: refs/heads/branch-2.0
Commit: 1ad3bbd0a4c7d4ab4aad0806f345d3904d7cd441
Parents: 025b3e9
Author: Dongjoon Hyun <dongj...@apache.org>
Authored: Tue May 17 20:50:22 2016 +0200
Committer: Nick Pentreath <ni...@za.ibm.com>
Committed: Tue May 17 20:50:47 2016 +0200

----------------------------------------------------------------------
 .../main/scala/org/apache/spark/ml/feature/package.scala  |  2 +-
 .../main/scala/org/apache/spark/sql/DataFrameReader.scala |  4 ++--
 .../org/apache/spark/sql/DataFrameStatFunctions.scala     | 10 +++++-----
 .../scala/org/apache/spark/sql/ExperimentalMethods.scala  |  2 +-
 .../datasources/PartitioningAwareFileCatalog.scala        |  8 ++++----
 .../src/main/scala/org/apache/spark/sql/functions.scala   |  4 ++--
 6 files changed, 15 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/1ad3bbd0/mllib/src/main/scala/org/apache/spark/ml/feature/package.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/package.scala 
b/mllib/src/main/scala/org/apache/spark/ml/feature/package.scala
index 4571ab2..b94187a 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/package.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/package.scala
@@ -44,7 +44,7 @@ import org.apache.spark.sql.DataFrame
  *   import org.apache.spark.ml.Pipeline
  *
  *   // a DataFrame with three columns: id (integer), text (string), and 
rating (double).
- *   val df = sqlContext.createDataFrame(Seq(
+ *   val df = spark.createDataFrame(Seq(
  *     (0, "Hi I heard about Spark", 3.0),
  *     (1, "I wish Java could use case classes", 4.0),
  *     (2, "Logistic regression models are neat", 4.0)

http://git-wip-us.apache.org/repos/asf/spark/blob/1ad3bbd0/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index e1a64df..011aff4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -446,10 +446,10 @@ class DataFrameReader private[sql](sparkSession: 
SparkSession) extends Logging {
    * Each line in the text file is a new row in the resulting Dataset. For 
example:
    * {{{
    *   // Scala:
-   *   sqlContext.read.text("/path/to/spark/README.md")
+   *   spark.read.text("/path/to/spark/README.md")
    *
    *   // Java:
-   *   sqlContext.read().text("/path/to/spark/README.md")
+   *   spark.read().text("/path/to/spark/README.md")
    * }}}
    *
    * @param paths input path

http://git-wip-us.apache.org/repos/asf/spark/blob/1ad3bbd0/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala
index 3eb1f0f..1855eab 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala
@@ -160,8 +160,8 @@ final class DataFrameStatFunctions private[sql](df: 
DataFrame) {
    * @return A DataFrame containing for the contingency table.
    *
    * {{{
-   *    val df = sqlContext.createDataFrame(Seq((1, 1), (1, 2), (2, 1), (2, 
1), (2, 3), (3, 2),
-   *      (3, 3))).toDF("key", "value")
+   *    val df = spark.createDataFrame(Seq((1, 1), (1, 2), (2, 1), (2, 1), (2, 
3), (3, 2), (3, 3)))
+   *      .toDF("key", "value")
    *    val ct = df.stat.crosstab("key", "value")
    *    ct.show()
    *    +---------+---+---+---+
@@ -197,7 +197,7 @@ final class DataFrameStatFunctions private[sql](df: 
DataFrame) {
    *    val rows = Seq.tabulate(100) { i =>
    *      if (i % 2 == 0) (1, -1.0) else (i, i * -1.0)
    *    }
-   *    val df = sqlContext.createDataFrame(rows).toDF("a", "b")
+   *    val df = spark.createDataFrame(rows).toDF("a", "b")
    *    // find the items with a frequency greater than 0.4 (observed 40% of 
the time) for columns
    *    // "a" and "b"
    *    val freqSingles = df.stat.freqItems(Array("a", "b"), 0.4)
@@ -258,7 +258,7 @@ final class DataFrameStatFunctions private[sql](df: 
DataFrame) {
    *    val rows = Seq.tabulate(100) { i =>
    *      if (i % 2 == 0) (1, -1.0) else (i, i * -1.0)
    *    }
-   *    val df = sqlContext.createDataFrame(rows).toDF("a", "b")
+   *    val df = spark.createDataFrame(rows).toDF("a", "b")
    *    // find the items with a frequency greater than 0.4 (observed 40% of 
the time) for columns
    *    // "a" and "b"
    *    val freqSingles = df.stat.freqItems(Seq("a", "b"), 0.4)
@@ -314,7 +314,7 @@ final class DataFrameStatFunctions private[sql](df: 
DataFrame) {
    * @return a new [[DataFrame]] that represents the stratified sample
    *
    * {{{
-   *    val df = sqlContext.createDataFrame(Seq((1, 1), (1, 2), (2, 1), (2, 
1), (2, 3), (3, 2),
+   *    val df = spark.createDataFrame(Seq((1, 1), (1, 2), (2, 1), (2, 1), (2, 
3), (3, 2),
    *      (3, 3))).toDF("key", "value")
    *    val fractions = Map(1 -> 1.0, 3 -> 0.5)
    *    df.stat.sampleBy("key", fractions, 36L).show()

http://git-wip-us.apache.org/repos/asf/spark/blob/1ad3bbd0/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala
index a49da6d..a435734 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala
@@ -27,7 +27,7 @@ import org.apache.spark.sql.catalyst.rules.Rule
  * regarding binary compatibility and source compatibility of methods here.
  *
  * {{{
- *   sqlContext.experimental.extraStrategies += ...
+ *   spark.experimental.extraStrategies += ...
  * }}}
  *
  * @since 1.3.0

http://git-wip-us.apache.org/repos/asf/spark/blob/1ad3bbd0/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala
index e0e4ddc..406d2e8 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala
@@ -168,17 +168,17 @@ abstract class PartitioningAwareFileCatalog(
    *
    * By default, the paths of the dataset provided by users will be base paths.
    * Below are three typical examples,
-   * Case 1) `sqlContext.read.parquet("/path/something=true/")`: the base path 
will be
+   * Case 1) `spark.read.parquet("/path/something=true/")`: the base path will 
be
    * `/path/something=true/`, and the returned DataFrame will not contain a 
column of `something`.
-   * Case 2) `sqlContext.read.parquet("/path/something=true/a.parquet")`: the 
base path will be
+   * Case 2) `spark.read.parquet("/path/something=true/a.parquet")`: the base 
path will be
    * still `/path/something=true/`, and the returned DataFrame will also not 
contain a column of
    * `something`.
-   * Case 3) `sqlContext.read.parquet("/path/")`: the base path will be 
`/path/`, and the returned
+   * Case 3) `spark.read.parquet("/path/")`: the base path will be `/path/`, 
and the returned
    * DataFrame will have the column of `something`.
    *
    * Users also can override the basePath by setting `basePath` in the options 
to pass the new base
    * path to the data source.
-   * For example, `sqlContext.read.option("basePath", 
"/path/").parquet("/path/something=true/")`,
+   * For example, `spark.read.option("basePath", 
"/path/").parquet("/path/something=true/")`,
    * and the returned DataFrame will have the column of `something`.
    */
   private def basePaths: Set[Path] = {

http://git-wip-us.apache.org/repos/asf/spark/blob/1ad3bbd0/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 07f5504..65bc043 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -2952,8 +2952,8 @@ object functions {
    *  import org.apache.spark.sql._
    *
    *  val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value")
-   *  val sqlContext = df.sqlContext
-   *  sqlContext.udf.register("simpleUDF", (v: Int) => v * v)
+   *  val spark = df.sparkSession
+   *  spark.udf.register("simpleUDF", (v: Int) => v * v)
    *  df.select($"id", callUDF("simpleUDF", $"value"))
    * }}}
    *


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to