Repository: spark
Updated Branches:
  refs/heads/branch-2.0 f90b2ea1d -> 54aef1c14


http://git-wip-us.apache.org/repos/asf/spark/blob/54aef1c1/R/pkg/R/mllib.R
----------------------------------------------------------------------
diff --git a/R/pkg/R/mllib.R b/R/pkg/R/mllib.R
index 2127dae..d6ff2aa 100644
--- a/R/pkg/R/mllib.R
+++ b/R/pkg/R/mllib.R
@@ -29,24 +29,28 @@
 #'
 #' @param jobj a Java object reference to the backing Scala 
GeneralizedLinearRegressionWrapper
 #' @export
+#' @note GeneralizedLinearRegressionModel since 2.0.0
 setClass("GeneralizedLinearRegressionModel", representation(jobj = "jobj"))
 
 #' S4 class that represents a NaiveBayesModel
 #'
 #' @param jobj a Java object reference to the backing Scala NaiveBayesWrapper
 #' @export
+#' @note NaiveBayesModel since 2.0.0
 setClass("NaiveBayesModel", representation(jobj = "jobj"))
 
 #' S4 class that represents a AFTSurvivalRegressionModel
 #'
 #' @param jobj a Java object reference to the backing Scala 
AFTSurvivalRegressionWrapper
 #' @export
+#' @note AFTSurvivalRegressionModel since 2.0.0
 setClass("AFTSurvivalRegressionModel", representation(jobj = "jobj"))
 
 #' S4 class that represents a KMeansModel
 #'
 #' @param jobj a Java object reference to the backing Scala KMeansModel
 #' @export
+#' @note KMeansModel since 2.0.0
 setClass("KMeansModel", representation(jobj = "jobj"))
 
 #' Fits a generalized linear model
@@ -73,6 +77,7 @@ setClass("KMeansModel", representation(jobj = "jobj"))
 #' model <- spark.glm(df, Sepal_Length ~ Sepal_Width, family="gaussian")
 #' summary(model)
 #' }
+#' @note spark.glm since 2.0.0
 setMethod(
     "spark.glm",
     signature(data = "SparkDataFrame", formula = "formula"),
@@ -120,6 +125,7 @@ setMethod(
 #' model <- glm(Sepal_Length ~ Sepal_Width, df, family="gaussian")
 #' summary(model)
 #' }
+#' @note glm since 1.5.0
 setMethod("glm", signature(formula = "formula", family = "ANY", data = 
"SparkDataFrame"),
           function(formula, family = gaussian, data, epsilon = 1e-06, maxit = 
25) {
             spark.glm(data, formula, family, epsilon, maxit)
@@ -138,6 +144,7 @@ setMethod("glm", signature(formula = "formula", family = 
"ANY", data = "SparkDat
 #' model <- glm(y ~ x, trainingData)
 #' summary(model)
 #' }
+#' @note summary(GeneralizedLinearRegressionModel) since 2.0.0
 setMethod("summary", signature(object = "GeneralizedLinearRegressionModel"),
           function(object, ...) {
             jobj <- object@jobj
@@ -173,6 +180,7 @@ setMethod("summary", signature(object = 
"GeneralizedLinearRegressionModel"),
 #' @rdname print
 #' @name print.summary.GeneralizedLinearRegressionModel
 #' @export
+#' @note print.summary.GeneralizedLinearRegressionModel since 2.0.0
 print.summary.GeneralizedLinearRegressionModel <- function(x, ...) {
   if (x$is.loaded) {
     cat("\nSaved-loaded model does not support output 'Deviance Residuals'.\n")
@@ -215,6 +223,7 @@ print.summary.GeneralizedLinearRegressionModel <- 
function(x, ...) {
 #' predicted <- predict(model, testData)
 #' showDF(predicted)
 #' }
+#' @note predict(GeneralizedLinearRegressionModel) since 1.5.0
 setMethod("predict", signature(object = "GeneralizedLinearRegressionModel"),
           function(object, newData) {
             return(dataFrame(callJMethod(object@jobj, "transform", 
newData@sdf)))
@@ -236,6 +245,7 @@ setMethod("predict", signature(object = 
"GeneralizedLinearRegressionModel"),
 #' predicted <- predict(model, testData)
 #' showDF(predicted)
 #'}
+#' @note predict(NaiveBayesModel) since 2.0.0
 setMethod("predict", signature(object = "NaiveBayesModel"),
           function(object, newData) {
             return(dataFrame(callJMethod(object@jobj, "transform", 
newData@sdf)))
@@ -256,6 +266,7 @@ setMethod("predict", signature(object = "NaiveBayesModel"),
 #' model <- spark.naiveBayes(trainingData, y ~ x)
 #' summary(model)
 #'}
+#' @note summary(NaiveBayesModel) since 2.0.0
 setMethod("summary", signature(object = "NaiveBayesModel"),
           function(object, ...) {
             jobj <- object@jobj
@@ -289,6 +300,7 @@ setMethod("summary", signature(object = "NaiveBayesModel"),
 #' \dontrun{
 #' model <- spark.kmeans(data, ~ ., k=2, initMode="random")
 #' }
+#' @note spark.kmeans since 2.0.0
 setMethod("spark.kmeans", signature(data = "SparkDataFrame", formula = 
"formula"),
           function(data, formula, k, maxIter = 10, initMode = c("random", 
"k-means||")) {
             formula <- paste(deparse(formula), collapse = "")
@@ -313,6 +325,7 @@ setMethod("spark.kmeans", signature(data = 
"SparkDataFrame", formula = "formula"
 #' fitted.model <- fitted(model)
 #' showDF(fitted.model)
 #'}
+#' @note fitted since 2.0.0
 setMethod("fitted", signature(object = "KMeansModel"),
           function(object, method = c("centers", "classes"), ...) {
             method <- match.arg(method)
@@ -339,6 +352,7 @@ setMethod("fitted", signature(object = "KMeansModel"),
 #' model <- spark.kmeans(trainingData, ~ ., 2)
 #' summary(model)
 #' }
+#' @note summary(KMeansModel) since 2.0.0
 setMethod("summary", signature(object = "KMeansModel"),
           function(object, ...) {
             jobj <- object@jobj
@@ -374,6 +388,7 @@ setMethod("summary", signature(object = "KMeansModel"),
 #' predicted <- predict(model, testData)
 #' showDF(predicted)
 #' }
+#' @note predict(KMeansModel) since 2.0.0
 setMethod("predict", signature(object = "KMeansModel"),
           function(object, newData) {
             return(dataFrame(callJMethod(object@jobj, "transform", 
newData@sdf)))
@@ -396,6 +411,7 @@ setMethod("predict", signature(object = "KMeansModel"),
 #' df <- createDataFrame(infert)
 #' model <- spark.naiveBayes(df, education ~ ., laplace = 0)
 #'}
+#' @note spark.naiveBayes since 2.0.0
 setMethod("spark.naiveBayes", signature(data = "SparkDataFrame", formula = 
"formula"),
     function(data, formula, laplace = 0, ...) {
         formula <- paste(deparse(formula), collapse = "")
@@ -423,6 +439,7 @@ setMethod("spark.naiveBayes", signature(data = 
"SparkDataFrame", formula = "form
 #' path <- "path/to/model"
 #' write.ml(model, path)
 #' }
+#' @note write.ml(NaiveBayesModel, character) since 2.0.0
 setMethod("write.ml", signature(object = "NaiveBayesModel", path = 
"character"),
           function(object, path, overwrite = FALSE) {
             writer <- callJMethod(object@jobj, "write")
@@ -450,6 +467,7 @@ setMethod("write.ml", signature(object = "NaiveBayesModel", 
path = "character"),
 #' path <- "path/to/model"
 #' write.ml(model, path)
 #' }
+#' @note write.ml(AFTSurvivalRegressionModel, character) since 2.0.0
 setMethod("write.ml", signature(object = "AFTSurvivalRegressionModel", path = 
"character"),
           function(object, path, overwrite = FALSE) {
             writer <- callJMethod(object@jobj, "write")
@@ -477,6 +495,7 @@ setMethod("write.ml", signature(object = 
"AFTSurvivalRegressionModel", path = "c
 #' path <- "path/to/model"
 #' write.ml(model, path)
 #' }
+#' @note write.ml(GeneralizedLinearRegressionModel, character) since 2.0.0
 setMethod("write.ml", signature(object = "GeneralizedLinearRegressionModel", 
path = "character"),
           function(object, path, overwrite = FALSE) {
             writer <- callJMethod(object@jobj, "write")
@@ -504,6 +523,7 @@ setMethod("write.ml", signature(object = 
"GeneralizedLinearRegressionModel", pat
 #' path <- "path/to/model"
 #' write.ml(model, path)
 #' }
+#' @note write.ml(KMeansModel, character) since 2.0.0
 setMethod("write.ml", signature(object = "KMeansModel", path = "character"),
           function(object, path, overwrite = FALSE) {
             writer <- callJMethod(object@jobj, "write")
@@ -525,6 +545,7 @@ setMethod("write.ml", signature(object = "KMeansModel", 
path = "character"),
 #' path <- "path/to/model"
 #' model <- read.ml(path)
 #' }
+#' @note read.ml since 2.0.0
 read.ml <- function(path) {
   path <- suppressWarnings(normalizePath(path))
   jobj <- callJStatic("org.apache.spark.ml.r.RWrappers", "load", path)
@@ -558,6 +579,7 @@ read.ml <- function(path) {
 #' df <- createDataFrame(ovarian)
 #' model <- spark.survreg(df, Surv(futime, fustat) ~ ecog_ps + rx)
 #' }
+#' @note spark.survreg since 2.0.0
 setMethod("spark.survreg", signature(data = "SparkDataFrame", formula = 
"formula"),
           function(data, formula, ...) {
             formula <- paste(deparse(formula), collapse = "")
@@ -581,6 +603,7 @@ setMethod("spark.survreg", signature(data = 
"SparkDataFrame", formula = "formula
 #' model <- spark.survreg(trainingData, Surv(futime, fustat) ~ ecog_ps + rx)
 #' summary(model)
 #' }
+#' @note summary(AFTSurvivalRegressionModel) since 2.0.0
 setMethod("summary", signature(object = "AFTSurvivalRegressionModel"),
           function(object, ...) {
             jobj <- object@jobj
@@ -608,6 +631,7 @@ setMethod("summary", signature(object = 
"AFTSurvivalRegressionModel"),
 #' predicted <- predict(model, testData)
 #' showDF(predicted)
 #' }
+#' @note predict(AFTSurvivalRegressionModel) since 2.0.0
 setMethod("predict", signature(object = "AFTSurvivalRegressionModel"),
           function(object, newData) {
             return(dataFrame(callJMethod(object@jobj, "transform", 
newData@sdf)))

http://git-wip-us.apache.org/repos/asf/spark/blob/54aef1c1/R/pkg/R/schema.R
----------------------------------------------------------------------
diff --git a/R/pkg/R/schema.R b/R/pkg/R/schema.R
index 89a2cfa..fb23c78 100644
--- a/R/pkg/R/schema.R
+++ b/R/pkg/R/schema.R
@@ -35,6 +35,7 @@
 #'               function(key, x) { y <- data.frame(key, mean(x$b), 
stringsAsFactors = FALSE) },
 #'               schema)
 #' }
+#' @note structType since 1.4.0
 structType <- function(x, ...) {
   UseMethod("structType", x)
 }
@@ -67,6 +68,7 @@ structType.structField <- function(x, ...) {
 #'
 #' @param x A StructType object
 #' @param ... further arguments passed to or from other methods
+#' @note print.structType since 1.4.0
 print.structType <- function(x, ...) {
   cat("StructType\n",
       sapply(x$fields(),
@@ -98,7 +100,7 @@ print.structType <- function(x, ...) {
 #'               function(key, x) { y <- data.frame(key, mean(x$b), 
stringsAsFactors = FALSE) },
 #'               schema)
 #' }
-
+#' @note structField since 1.4.0
 structField <- function(x, ...) {
   UseMethod("structField", x)
 }
@@ -202,6 +204,7 @@ structField.character <- function(x, type, nullable = TRUE) 
{
 #'
 #' @param x A StructField object
 #' @param ... further arguments passed to or from other methods
+#' @note print.structField since 1.4.0
 print.structField <- function(x, ...) {
   cat("StructField(name = \"", x$name(),
       "\", type = \"", x$dataType.toString(),

http://git-wip-us.apache.org/repos/asf/spark/blob/54aef1c1/R/pkg/R/sparkR.R
----------------------------------------------------------------------
diff --git a/R/pkg/R/sparkR.R b/R/pkg/R/sparkR.R
index d05660c..94d0e63 100644
--- a/R/pkg/R/sparkR.R
+++ b/R/pkg/R/sparkR.R
@@ -31,6 +31,7 @@ connExists <- function(env) {
 #' @rdname sparkR.session.stop
 #' @name sparkR.stop
 #' @export
+#' @note sparkR.stop since 1.4.0
 sparkR.stop <- function() {
   sparkR.session.stop()
 }
@@ -41,7 +42,7 @@ sparkR.stop <- function() {
 #' @rdname sparkR.session.stop
 #' @name sparkR.session.stop
 #' @export
-#' @note since 2.0.0
+#' @note sparkR.session.stop since 2.0.0
 sparkR.session.stop <- function() {
   env <- .sparkREnv
   if (exists(".sparkRCon", envir = env)) {
@@ -112,7 +113,7 @@ sparkR.session.stop <- function() {
 #'                  c("one.jar", "two.jar", "three.jar"),
 #'                  c("com.databricks:spark-avro_2.10:2.0.1"))
 #'}
-
+#' @note sparkR.init since 1.4.0
 sparkR.init <- function(
   master = "",
   appName = "SparkR",
@@ -265,7 +266,7 @@ sparkR.sparkContext <- function(
 #' sc <- sparkR.init()
 #' sqlContext <- sparkRSQL.init(sc)
 #'}
-
+#' @note sparkRSQL.init since 1.4.0
 sparkRSQL.init <- function(jsc = NULL) {
   .Deprecated("sparkR.session")
 
@@ -293,7 +294,7 @@ sparkRSQL.init <- function(jsc = NULL) {
 #' sc <- sparkR.init()
 #' sqlContext <- sparkRHive.init(sc)
 #'}
-
+#' @note sparkRHive.init since 1.4.0
 sparkRHive.init <- function(jsc = NULL) {
   .Deprecated("sparkR.session")
 
@@ -334,8 +335,7 @@ sparkRHive.init <- function(jsc = NULL) {
 #'                c("com.databricks:spark-avro_2.10:2.0.1"))
 #' sparkR.session(spark.master = "yarn-client", spark.executor.memory = "4g")
 #'}
-#' @note since 2.0.0
-
+#' @note sparkR.session since 2.0.0
 sparkR.session <- function(
   master = "",
   appName = "SparkR",
@@ -399,7 +399,7 @@ sparkR.session <- function(
 #' sc <- sparkR.init()
 #' setJobGroup(sc, "myJobGroup", "My job group description", TRUE)
 #'}
-
+#' @note setJobGroup since 1.5.0
 setJobGroup <- function(sc, groupId, description, interruptOnCancel) {
   callJMethod(sc, "setJobGroup", groupId, description, interruptOnCancel)
 }
@@ -412,7 +412,7 @@ setJobGroup <- function(sc, groupId, description, 
interruptOnCancel) {
 #' sc <- sparkR.init()
 #' clearJobGroup(sc)
 #'}
-
+#' @note clearJobGroup since 1.5.0
 clearJobGroup <- function(sc) {
   callJMethod(sc, "clearJobGroup")
 }
@@ -426,7 +426,7 @@ clearJobGroup <- function(sc) {
 #' sc <- sparkR.init()
 #' cancelJobGroup(sc, "myJobGroup")
 #'}
-
+#' @note cancelJobGroup since 1.5.0
 cancelJobGroup <- function(sc, groupId) {
   callJMethod(sc, "cancelJobGroup", groupId)
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/54aef1c1/R/pkg/R/stats.R
----------------------------------------------------------------------
diff --git a/R/pkg/R/stats.R b/R/pkg/R/stats.R
index 6b53517..e92b9e3 100644
--- a/R/pkg/R/stats.R
+++ b/R/pkg/R/stats.R
@@ -40,6 +40,7 @@ setOldClass("jobj")
 #' df <- read.json("/path/to/file.json")
 #' ct <- crosstab(df, "title", "gender")
 #' }
+#' @note crosstab since 1.5.0
 setMethod("crosstab",
           signature(x = "SparkDataFrame", col1 = "character", col2 = 
"character"),
           function(x, col1, col2) {
@@ -65,6 +66,7 @@ setMethod("crosstab",
 #' df <- read.json("/path/to/file.json")
 #' cov <- cov(df, "title", "gender")
 #' }
+#' @note cov since 1.6.0
 setMethod("cov",
           signature(x = "SparkDataFrame"),
           function(x, col1, col2) {
@@ -95,6 +97,7 @@ setMethod("cov",
 #' corr <- corr(df, "title", "gender")
 #' corr <- corr(df, "title", "gender", method = "pearson")
 #' }
+#' @note corr since 1.6.0
 setMethod("corr",
           signature(x = "SparkDataFrame"),
           function(x, col1, col2, method = "pearson") {
@@ -123,6 +126,7 @@ setMethod("corr",
 #' df <- read.json("/path/to/file.json")
 #' fi = freqItems(df, c("title", "gender"))
 #' }
+#' @note freqItems since 1.6.0
 setMethod("freqItems", signature(x = "SparkDataFrame", cols = "character"),
           function(x, cols, support = 0.01) {
             statFunctions <- callJMethod(x@sdf, "stat")
@@ -160,6 +164,7 @@ setMethod("freqItems", signature(x = "SparkDataFrame", cols 
= "character"),
 #' df <- read.json("/path/to/file.json")
 #' quantiles <- approxQuantile(df, "key", c(0.5, 0.8), 0.0)
 #' }
+#' @note approxQuantile since 2.0.0
 setMethod("approxQuantile",
           signature(x = "SparkDataFrame", col = "character",
                     probabilities = "numeric", relativeError = "numeric"),
@@ -188,6 +193,7 @@ setMethod("approxQuantile",
 #' df <- read.json("/path/to/file.json")
 #' sample <- sampleBy(df, "key", fractions, 36)
 #' }
+#' @note sampleBy since 1.6.0
 setMethod("sampleBy",
           signature(x = "SparkDataFrame", col = "character",
                     fractions = "list", seed = "numeric"),

http://git-wip-us.apache.org/repos/asf/spark/blob/54aef1c1/R/pkg/R/utils.R
----------------------------------------------------------------------
diff --git a/R/pkg/R/utils.R b/R/pkg/R/utils.R
index aafb344..d5c062d 100644
--- a/R/pkg/R/utils.R
+++ b/R/pkg/R/utils.R
@@ -115,6 +115,7 @@ isRDD <- function(name, env) {
 #' hashCode(1.0) # 1072693248
 #' hashCode("1") # 49
 #'}
+#' @note hashCode since 1.4.0
 hashCode <- function(key) {
   if (class(key) == "integer") {
     as.integer(key[[1]])

http://git-wip-us.apache.org/repos/asf/spark/blob/54aef1c1/R/pkg/R/window.R
----------------------------------------------------------------------
diff --git a/R/pkg/R/window.R b/R/pkg/R/window.R
index 7ecf70a..e4bc933 100644
--- a/R/pkg/R/window.R
+++ b/R/pkg/R/window.R
@@ -32,6 +32,7 @@
 #'   ws <- window.partitionBy(df$key1, df$key2)
 #'   df1 <- select(df, over(lead("value", 1), ws))
 #' }
+#' @note window.partitionBy(character) since 2.0.0
 setMethod("window.partitionBy",
           signature(col = "character"),
           function(col, ...) {
@@ -45,6 +46,7 @@ setMethod("window.partitionBy",
 #' @rdname window.partitionBy
 #' @name window.partitionBy
 #' @export
+#' @note window.partitionBy(Column) since 2.0.0
 setMethod("window.partitionBy",
           signature(col = "Column"),
           function(col, ...) {
@@ -72,6 +74,7 @@ setMethod("window.partitionBy",
 #'   ws <- window.orderBy(df$key1, df$key2)
 #'   df1 <- select(df, over(lead("value", 1), ws))
 #' }
+#' @note window.orderBy(character) since 2.0.0
 setMethod("window.orderBy",
           signature(col = "character"),
           function(col, ...) {
@@ -85,6 +88,7 @@ setMethod("window.orderBy",
 #' @rdname window.orderBy
 #' @name window.orderBy
 #' @export
+#' @note window.orderBy(Column) since 2.0.0
 setMethod("window.orderBy",
           signature(col = "Column"),
           function(col, ...) {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to