svn commit: r28132 - in /dev/spark/2.3.3-SNAPSHOT-2018_07_15_22_02-dae352a-docs: ./ _site/ _site/api/ _site/api/R/ _site/api/java/ _site/api/java/lib/ _site/api/java/org/ _site/api/java/org/apache/ _s

2018-07-15 Thread pwendell
Author: pwendell
Date: Mon Jul 16 05:16:28 2018
New Revision: 28132

Log:
Apache Spark 2.3.3-SNAPSHOT-2018_07_15_22_02-dae352a docs


[This commit notification would consist of 1443 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-24813][TESTS][HIVE][HOTFIX][BRANCH-2.2] HiveExternalCatalogVersionsSuite still flaky; fall back to Apache archive

2018-07-15 Thread gurwls223
Repository: spark
Updated Branches:
  refs/heads/branch-2.2 a8537a5ab -> 4bc4ccd63


[SPARK-24813][TESTS][HIVE][HOTFIX][BRANCH-2.2] HiveExternalCatalogVersionsSuite 
still flaky; fall back to Apache archive

## What changes were proposed in this pull request?

Try only unique ASF mirrors to download Spark release; fall back to Apache 
archive if no mirrors available or release is not mirrored

## How was this patch tested?

Existing HiveExternalCatalogVersionsSuite

Author: Sean Owen 

Closes #21779 from srowen/SPARK-24813.2.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/4bc4ccd6
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/4bc4ccd6
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/4bc4ccd6

Branch: refs/heads/branch-2.2
Commit: 4bc4ccd63b7d56fba1814505136b0d33517ed8f0
Parents: a8537a5
Author: Sean Owen 
Authored: Mon Jul 16 12:26:45 2018 +0800
Committer: hyukjinkwon 
Committed: Mon Jul 16 12:26:45 2018 +0800

--
 .../hive/HiveExternalCatalogVersionsSuite.scala | 20 +++-
 1 file changed, 15 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/4bc4ccd6/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
--
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
index 92c27e8..313059b 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
@@ -53,12 +53,22 @@ class HiveExternalCatalogVersionsSuite extends 
SparkSubmitTestUtils {
   }
 
   private def tryDownloadSpark(version: String, path: String): Unit = {
-// Try mirrors a few times until one succeeds
-for (i <- 0 until 3) {
-  val preferredMirror =
-Seq("wget", "https://www.apache.org/dyn/closer.lua?preferred=true;, 
"-q", "-O", "-").!!.trim
+// Try a few mirrors first; fall back to Apache archive
+val mirrors =
+  (0 until 2).flatMap { _ =>
+try {
+  Some(Seq("wget",
+"https://www.apache.org/dyn/closer.lua?preferred=true;, "-q", 
"-O", "-").!!.trim)
+} catch {
+  // If we can't get a mirror URL, skip it. No retry.
+  case _: Exception => None
+}
+  }
+val sites = mirrors.distinct :+ "https://archive.apache.org/dist;
+logInfo(s"Trying to download Spark $version from $sites")
+for (site <- sites) {
   val filename = s"spark-$version-bin-hadoop2.7.tgz"
-  val url = s"$preferredMirror/spark/spark-$version/$filename"
+  val url = s"$site/spark/spark-$version/$filename"
   logInfo(s"Downloading Spark $version from $url")
   if (Seq("wget", url, "-q", "-P", path).! == 0) {
 val downloaded = new File(sparkTestingDir, filename).getCanonicalPath


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-24676][SQL] Project required data from CSV parsed data when column pruning disabled

2018-07-15 Thread lixiao
Repository: spark
Updated Branches:
  refs/heads/master bcf7121ed -> d463533de


[SPARK-24676][SQL] Project required data from CSV parsed data when column 
pruning disabled

## What changes were proposed in this pull request?
This pr modified code to project required data from CSV parsed data when column 
pruning disabled.
In the current master, an exception below happens if 
`spark.sql.csv.parser.columnPruning.enabled` is false. This is because required 
formats and CSV parsed formats are different from each other;
```
./bin/spark-shell --conf spark.sql.csv.parser.columnPruning.enabled=false
scala> val dir = "/tmp/spark-csv/csv"
scala> spark.range(10).selectExpr("id % 2 AS p", 
"id").write.mode("overwrite").partitionBy("p").csv(dir)
scala> spark.read.csv(dir).selectExpr("sum(p)").collect()
18/06/25 13:48:46 ERROR Executor: Exception in task 2.0 in stage 2.0 (TID 7)
java.lang.ClassCastException: org.apache.spark.unsafe.types.UTF8String cannot 
be cast to java.lang.Integer
at scala.runtime.BoxesRunTime.unboxToInt(BoxesRunTime.java:101)
at 
org.apache.spark.sql.catalyst.expressions.BaseGenericInternalRow$class.getInt(rows.scala:41)
...
```

## How was this patch tested?
Added tests in `CSVSuite`.

Author: Takeshi Yamamuro 

Closes #21657 from maropu/SPARK-24676.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/d463533d
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/d463533d
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/d463533d

Branch: refs/heads/master
Commit: d463533ded89a05e9f77e590fd3de2ffa212d68b
Parents: bcf7121
Author: Takeshi Yamamuro 
Authored: Sun Jul 15 20:22:09 2018 -0700
Committer: Xiao Li 
Committed: Sun Jul 15 20:22:09 2018 -0700

--
 .../datasources/csv/UnivocityParser.scala   | 54 +++-
 .../execution/datasources/csv/CSVSuite.scala| 29 +++
 2 files changed, 70 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/d463533d/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/UnivocityParser.scala
--
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/UnivocityParser.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/UnivocityParser.scala
index aa545e1..79143cc 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/UnivocityParser.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/UnivocityParser.scala
@@ -33,29 +33,49 @@ import 
org.apache.spark.sql.execution.datasources.FailureSafeParser
 import org.apache.spark.sql.types._
 import org.apache.spark.unsafe.types.UTF8String
 
+
+/**
+ * Constructs a parser for a given schema that translates CSV data to an 
[[InternalRow]].
+ *
+ * @param dataSchema The CSV data schema that is specified by the user, or 
inferred from underlying
+ *   data files.
+ * @param requiredSchema The schema of the data that should be output for each 
row. This should be a
+ *   subset of the columns in dataSchema.
+ * @param options Configuration options for a CSV parser.
+ */
 class UnivocityParser(
 dataSchema: StructType,
 requiredSchema: StructType,
 val options: CSVOptions) extends Logging {
   require(requiredSchema.toSet.subsetOf(dataSchema.toSet),
-"requiredSchema should be the subset of schema.")
+s"requiredSchema (${requiredSchema.catalogString}) should be the subset of 
" +
+  s"dataSchema (${dataSchema.catalogString}).")
 
   def this(schema: StructType, options: CSVOptions) = this(schema, schema, 
options)
 
   // A `ValueConverter` is responsible for converting the given value to a 
desired type.
   private type ValueConverter = String => Any
 
+  // This index is used to reorder parsed tokens
+  private val tokenIndexArr =
+requiredSchema.map(f => 
java.lang.Integer.valueOf(dataSchema.indexOf(f))).toArray
+
+  // When column pruning is enabled, the parser only parses the required 
columns based on
+  // their positions in the data schema.
+  private val parsedSchema = if (options.columnPruning) requiredSchema else 
dataSchema
+
   val tokenizer = {
 val parserSetting = options.asParserSettings
-if (options.columnPruning && requiredSchema.length < dataSchema.length) {
-  val tokenIndexArr = requiredSchema.map(f => 
java.lang.Integer.valueOf(dataSchema.indexOf(f)))
+// When to-be-parsed schema is shorter than the to-be-read data schema, we 
let Univocity CSV
+// parser select a sequence of fields for reading by their positions.
+// if (options.columnPruning && requiredSchema.length < dataSchema.length) 
{
+if (parsedSchema.length < 

svn commit: r28129 - in /dev/spark/2.4.0-SNAPSHOT-2018_07_15_20_01-bbc2ffc-docs: ./ _site/ _site/api/ _site/api/R/ _site/api/java/ _site/api/java/lib/ _site/api/java/org/ _site/api/java/org/apache/ _s

2018-07-15 Thread pwendell
Author: pwendell
Date: Mon Jul 16 03:15:34 2018
New Revision: 28129

Log:
Apache Spark 2.4.0-SNAPSHOT-2018_07_15_20_01-bbc2ffc docs


[This commit notification would consist of 1467 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [TRIVIAL][ML] GMM unpersist RDD after training

2018-07-15 Thread felixcheung
Repository: spark
Updated Branches:
  refs/heads/master bbc2ffc8a -> bcf7121ed


[TRIVIAL][ML] GMM unpersist RDD after training

## What changes were proposed in this pull request?
unpersist `instances` after training

## How was this patch tested?
existing tests

Author: 郑瑞峰 

Closes #21562 from zhengruifeng/gmm_unpersist.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/bcf7121e
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/bcf7121e
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/bcf7121e

Branch: refs/heads/master
Commit: bcf7121ed2283d88424863ac1d35393870eaae6b
Parents: bbc2ffc
Author: 郑瑞峰 
Authored: Sun Jul 15 20:14:17 2018 -0700
Committer: Felix Cheung 
Committed: Sun Jul 15 20:14:17 2018 -0700

--
 .../scala/org/apache/spark/ml/clustering/GaussianMixture.scala| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/bcf7121e/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
--
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala 
b/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
index dae64ba..f0707b3 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
@@ -341,7 +341,7 @@ class GaussianMixture @Since("2.0.0") (
 val sc = dataset.sparkSession.sparkContext
 val numClusters = $(k)
 
-val instances: RDD[Vector] = dataset
+val instances = dataset
   .select(DatasetUtils.columnToVector(dataset, getFeaturesCol)).rdd.map {
   case Row(features: Vector) => features
 }.cache()
@@ -416,6 +416,7 @@ class GaussianMixture @Since("2.0.0") (
   iter += 1
 }
 
+instances.unpersist(false)
 val gaussianDists = gaussians.map { case (mean, covVec) =>
   val cov = GaussianMixture.unpackUpperTriangularMatrix(numFeatures, 
covVec.values)
   new MultivariateGaussian(mean, cov)


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-24813][TESTS][HIVE][HOTFIX] HiveExternalCatalogVersionsSuite still flaky; fall back to Apache archive

2018-07-15 Thread gurwls223
Repository: spark
Updated Branches:
  refs/heads/branch-2.3 f9a2b0a87 -> dae352a29


[SPARK-24813][TESTS][HIVE][HOTFIX] HiveExternalCatalogVersionsSuite still 
flaky; fall back to Apache archive

## What changes were proposed in this pull request?

Try only unique ASF mirrors to download Spark release; fall back to Apache 
archive if no mirrors available or release is not mirrored

## How was this patch tested?

Existing HiveExternalCatalogVersionsSuite

Author: Sean Owen 

Closes #21776 from srowen/SPARK-24813.

(cherry picked from commit bbc2ffc8ab27192384def9847c36b873efd87234)
Signed-off-by: hyukjinkwon 


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/dae352a2
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/dae352a2
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/dae352a2

Branch: refs/heads/branch-2.3
Commit: dae352a2929733279911f3d49d8483b20ae28767
Parents: f9a2b0a
Author: Sean Owen 
Authored: Mon Jul 16 09:29:51 2018 +0800
Committer: hyukjinkwon 
Committed: Mon Jul 16 09:30:18 2018 +0800

--
 .../hive/HiveExternalCatalogVersionsSuite.scala | 24 +---
 1 file changed, 16 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/dae352a2/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
--
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
index 5149218..f821268 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
@@ -56,14 +56,21 @@ class HiveExternalCatalogVersionsSuite extends 
SparkSubmitTestUtils {
   }
 
   private def tryDownloadSpark(version: String, path: String): Unit = {
-// Try mirrors a few times until one succeeds
-for (i <- 0 until 3) {
-  // we don't retry on a failure to get mirror url. If we can't get a 
mirror url,
-  // the test fails (getStringFromUrl will throw an exception)
-  val preferredMirror =
-
getStringFromUrl("https://www.apache.org/dyn/closer.lua?preferred=true;)
+// Try a few mirrors first; fall back to Apache archive
+val mirrors =
+  (0 until 2).flatMap { _ =>
+try {
+  
Some(getStringFromUrl("https://www.apache.org/dyn/closer.lua?preferred=true;))
+} catch {
+  // If we can't get a mirror URL, skip it. No retry.
+  case _: Exception => None
+}
+  }
+val sites = mirrors.distinct :+ "https://archive.apache.org/dist;
+logInfo(s"Trying to download Spark $version from $sites")
+for (site <- sites) {
   val filename = s"spark-$version-bin-hadoop2.7.tgz"
-  val url = s"$preferredMirror/spark/spark-$version/$filename"
+  val url = s"$site/spark/spark-$version/$filename"
   logInfo(s"Downloading Spark $version from $url")
   try {
 getFileFromUrl(url, path, filename)
@@ -83,7 +90,8 @@ class HiveExternalCatalogVersionsSuite extends 
SparkSubmitTestUtils {
   Seq("rm", "-rf", targetDir).!
 }
   } catch {
-case ex: Exception => logWarning(s"Failed to download Spark $version 
from $url", ex)
+case ex: Exception =>
+  logWarning(s"Failed to download Spark $version from $url: 
${ex.getMessage}")
   }
 }
 fail(s"Unable to download Spark $version")


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark git commit: [SPARK-24813][TESTS][HIVE][HOTFIX] HiveExternalCatalogVersionsSuite still flaky; fall back to Apache archive

2018-07-15 Thread gurwls223
Repository: spark
Updated Branches:
  refs/heads/master 5d62a985d -> bbc2ffc8a


[SPARK-24813][TESTS][HIVE][HOTFIX] HiveExternalCatalogVersionsSuite still 
flaky; fall back to Apache archive

## What changes were proposed in this pull request?

Try only unique ASF mirrors to download Spark release; fall back to Apache 
archive if no mirrors available or release is not mirrored

## How was this patch tested?

Existing HiveExternalCatalogVersionsSuite

Author: Sean Owen 

Closes #21776 from srowen/SPARK-24813.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/bbc2ffc8
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/bbc2ffc8
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/bbc2ffc8

Branch: refs/heads/master
Commit: bbc2ffc8ab27192384def9847c36b873efd87234
Parents: 5d62a98
Author: Sean Owen 
Authored: Mon Jul 16 09:29:51 2018 +0800
Committer: hyukjinkwon 
Committed: Mon Jul 16 09:29:51 2018 +0800

--
 .../hive/HiveExternalCatalogVersionsSuite.scala | 24 +---
 1 file changed, 16 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark/blob/bbc2ffc8/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
--
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
index 5149218..f821268 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala
@@ -56,14 +56,21 @@ class HiveExternalCatalogVersionsSuite extends 
SparkSubmitTestUtils {
   }
 
   private def tryDownloadSpark(version: String, path: String): Unit = {
-// Try mirrors a few times until one succeeds
-for (i <- 0 until 3) {
-  // we don't retry on a failure to get mirror url. If we can't get a 
mirror url,
-  // the test fails (getStringFromUrl will throw an exception)
-  val preferredMirror =
-
getStringFromUrl("https://www.apache.org/dyn/closer.lua?preferred=true;)
+// Try a few mirrors first; fall back to Apache archive
+val mirrors =
+  (0 until 2).flatMap { _ =>
+try {
+  
Some(getStringFromUrl("https://www.apache.org/dyn/closer.lua?preferred=true;))
+} catch {
+  // If we can't get a mirror URL, skip it. No retry.
+  case _: Exception => None
+}
+  }
+val sites = mirrors.distinct :+ "https://archive.apache.org/dist;
+logInfo(s"Trying to download Spark $version from $sites")
+for (site <- sites) {
   val filename = s"spark-$version-bin-hadoop2.7.tgz"
-  val url = s"$preferredMirror/spark/spark-$version/$filename"
+  val url = s"$site/spark/spark-$version/$filename"
   logInfo(s"Downloading Spark $version from $url")
   try {
 getFileFromUrl(url, path, filename)
@@ -83,7 +90,8 @@ class HiveExternalCatalogVersionsSuite extends 
SparkSubmitTestUtils {
   Seq("rm", "-rf", targetDir).!
 }
   } catch {
-case ex: Exception => logWarning(s"Failed to download Spark $version 
from $url", ex)
+case ex: Exception =>
+  logWarning(s"Failed to download Spark $version from $url: 
${ex.getMessage}")
   }
 }
 fail(s"Unable to download Spark $version")


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



svn commit: r28123 - in /dev/spark/v2.3.2-rc3-docs: ./ _site/ _site/api/ _site/api/R/ _site/api/java/ _site/api/java/lib/ _site/api/java/org/ _site/api/java/org/apache/ _site/api/java/org/apache/spark

2018-07-15 Thread jshao
Author: jshao
Date: Sun Jul 15 07:30:18 2018
New Revision: 28123

Log:
Apache Spark v2.3.2-rc3 docs


[This commit notification would consist of 1446 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



svn commit: r28122 - in /dev/spark/2.4.0-SNAPSHOT-2018_07_15_00_01-6999321-docs: ./ _site/ _site/api/ _site/api/R/ _site/api/java/ _site/api/java/lib/ _site/api/java/org/ _site/api/java/org/apache/ _s

2018-07-15 Thread pwendell
Author: pwendell
Date: Sun Jul 15 07:15:56 2018
New Revision: 28122

Log:
Apache Spark 2.4.0-SNAPSHOT-2018_07_15_00_01-6999321 docs


[This commit notification would consist of 1467 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



svn commit: r28121 - in /release/spark: spark-1.6.3/ spark-2.0.2/ spark-2.1.2/ spark-2.2.1/ spark-2.3.0/

2018-07-15 Thread srowen
Author: srowen
Date: Sun Jul 15 06:14:36 2018
New Revision: 28121

Log:
Remove non-current Spark releases 1.6.3, 2.0.2, 2.1.2, 2.2.1, 2.3.0 from dist

Removed:
release/spark/spark-1.6.3/
release/spark/spark-2.0.2/
release/spark/spark-2.1.2/
release/spark/spark-2.2.1/
release/spark/spark-2.3.0/


-
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org



spark-website git commit: Point to archive site for non-current releases; remove old releases from dropdown

2018-07-15 Thread srowen
Repository: spark-website
Updated Branches:
  refs/heads/asf-site 85c47b705 -> f5d7dfafe


Point to archive site for non-current releases; remove old releases from 
dropdown


Project: http://git-wip-us.apache.org/repos/asf/spark-website/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark-website/commit/f5d7dfaf
Tree: http://git-wip-us.apache.org/repos/asf/spark-website/tree/f5d7dfaf
Diff: http://git-wip-us.apache.org/repos/asf/spark-website/diff/f5d7dfaf

Branch: refs/heads/asf-site
Commit: f5d7dfafe9061abd656e29aa889b5680c4669131
Parents: 85c47b7
Author: Sean Owen 
Authored: Sun Jul 15 01:07:50 2018 -0500
Committer: Sean Owen 
Committed: Sun Jul 15 01:07:50 2018 -0500

--
 js/downloads.js  | 15 +--
 site/js/downloads.js | 15 +--
 2 files changed, 10 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/spark-website/blob/f5d7dfaf/js/downloads.js
--
diff --git a/js/downloads.js b/js/downloads.js
index 3344267..d5ab599 100644
--- a/js/downloads.js
+++ b/js/downloads.js
@@ -30,22 +30,17 @@ var packagesV7 = [hadoop2p7, hadoop2p6, hadoop2p4, 
hadoop2p3, hadoopFree, source
 var packagesV8 = [hadoop2p7, hadoop2p6, hadoopFree, sources];
 
 addRelease("2.3.1", new Date("06/08/2018"), packagesV8, true, true);
-addRelease("2.3.0", new Date("02/28/2018"), packagesV8, true, true);
+addRelease("2.3.0", new Date("02/28/2018"), packagesV8, true, false);
 addRelease("2.2.2", new Date("07/02/2018"), packagesV8, true, true);
-addRelease("2.2.1", new Date("12/01/2017"), packagesV8, true, true);
+addRelease("2.2.1", new Date("12/01/2017"), packagesV8, true, false);
 addRelease("2.2.0", new Date("07/11/2017"), packagesV8, true, false);
 addRelease("2.1.3", new Date("06/29/2018"), packagesV7, true, true);
-addRelease("2.1.2", new Date("10/09/2017"), packagesV7, true, true);
+addRelease("2.1.2", new Date("10/09/2017"), packagesV7, true, false);
 addRelease("2.1.1", new Date("05/02/2017"), packagesV7, true, false);
 addRelease("2.1.0", new Date("12/28/2016"), packagesV7, true, false);
-addRelease("2.0.2", new Date("11/14/2016"), packagesV7, true, true);
-addRelease("2.0.1", new Date("10/03/2016"), packagesV7, true, false);
-addRelease("2.0.0", new Date("07/26/2016"), packagesV7, true, false);
+addRelease("2.0.2", new Date("11/14/2016"), packagesV7, true, false);
 //addRelease("2.0.0-preview", new Date("05/24/2016"), 
sources.concat(packagesV7), true, false);
-addRelease("1.6.3", new Date("11/07/2016"), packagesV6, true, true);
-addRelease("1.6.2", new Date("06/25/2016"), packagesV6, true, false);
-addRelease("1.6.1", new Date("03/09/2016"), packagesV6, true, false);
-addRelease("1.6.0", new Date("01/04/2016"), packagesV6, true, false);
+addRelease("1.6.3", new Date("11/07/2016"), packagesV6, true, false);
 
 function append(el, contents) {
   el.innerHTML += contents;

http://git-wip-us.apache.org/repos/asf/spark-website/blob/f5d7dfaf/site/js/downloads.js
--
diff --git a/site/js/downloads.js b/site/js/downloads.js
index 3344267..d5ab599 100644
--- a/site/js/downloads.js
+++ b/site/js/downloads.js
@@ -30,22 +30,17 @@ var packagesV7 = [hadoop2p7, hadoop2p6, hadoop2p4, 
hadoop2p3, hadoopFree, source
 var packagesV8 = [hadoop2p7, hadoop2p6, hadoopFree, sources];
 
 addRelease("2.3.1", new Date("06/08/2018"), packagesV8, true, true);
-addRelease("2.3.0", new Date("02/28/2018"), packagesV8, true, true);
+addRelease("2.3.0", new Date("02/28/2018"), packagesV8, true, false);
 addRelease("2.2.2", new Date("07/02/2018"), packagesV8, true, true);
-addRelease("2.2.1", new Date("12/01/2017"), packagesV8, true, true);
+addRelease("2.2.1", new Date("12/01/2017"), packagesV8, true, false);
 addRelease("2.2.0", new Date("07/11/2017"), packagesV8, true, false);
 addRelease("2.1.3", new Date("06/29/2018"), packagesV7, true, true);
-addRelease("2.1.2", new Date("10/09/2017"), packagesV7, true, true);
+addRelease("2.1.2", new Date("10/09/2017"), packagesV7, true, false);
 addRelease("2.1.1", new Date("05/02/2017"), packagesV7, true, false);
 addRelease("2.1.0", new Date("12/28/2016"), packagesV7, true, false);
-addRelease("2.0.2", new Date("11/14/2016"), packagesV7, true, true);
-addRelease("2.0.1", new Date("10/03/2016"), packagesV7, true, false);
-addRelease("2.0.0", new Date("07/26/2016"), packagesV7, true, false);
+addRelease("2.0.2", new Date("11/14/2016"), packagesV7, true, false);
 //addRelease("2.0.0-preview", new Date("05/24/2016"), 
sources.concat(packagesV7), true, false);
-addRelease("1.6.3", new Date("11/07/2016"), packagesV6, true, true);
-addRelease("1.6.2", new Date("06/25/2016"), packagesV6, true, false);
-addRelease("1.6.1", new Date("03/09/2016"), packagesV6, true, false);
-addRelease("1.6.0", new