Repository: spark
Updated Branches:
  refs/heads/master fbaf15050 -> 409d688fb


[SPARK-25864][SQL][TEST] Make main args accessible for BenchmarkBase's subclass

## What changes were proposed in this pull request?

Set main args correctly in BenchmarkBase, to make it accessible for its 
subclass.
It will benefit:
- BuiltInDataSourceWriteBenchmark
- AvroWriteBenchmark

## How was this patch tested?

manual tests

Closes #22872 from yucai/main_args.

Authored-by: yucai <y...@ebay.com>
Signed-off-by: Wenchen Fan <wenc...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/409d688f
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/409d688f
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/409d688f

Branch: refs/heads/master
Commit: 409d688fb6169ecbc41f6296c6341ae3ed7d1ec8
Parents: fbaf150
Author: yucai <y...@ebay.com>
Authored: Mon Oct 29 20:00:31 2018 +0800
Committer: Wenchen Fan <wenc...@databricks.com>
Committed: Mon Oct 29 20:00:31 2018 +0800

----------------------------------------------------------------------
 .../test/scala/org/apache/spark/benchmark/BenchmarkBase.scala    | 4 ++--
 .../test/scala/org/apache/spark/serializer/KryoBenchmark.scala   | 2 +-
 .../apache/spark/mllib/linalg/UDTSerializationBenchmark.scala    | 2 +-
 .../src/test/scala/org/apache/spark/sql/HashBenchmark.scala      | 2 +-
 .../test/scala/org/apache/spark/sql/HashByteArrayBenchmark.scala | 2 +-
 .../scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala   | 2 +-
 .../src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala   | 2 +-
 .../spark/sql/execution/benchmark/AggregateBenchmark.scala       | 2 +-
 .../spark/sql/execution/benchmark/BloomFilterBenchmark.scala     | 2 +-
 .../spark/sql/execution/benchmark/DataSourceReadBenchmark.scala  | 2 +-
 .../spark/sql/execution/benchmark/FilterPushdownBenchmark.scala  | 2 +-
 .../org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala | 2 +-
 .../org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala | 2 +-
 .../spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala  | 2 +-
 .../apache/spark/sql/execution/benchmark/RangeBenchmark.scala    | 2 +-
 .../org/apache/spark/sql/execution/benchmark/SortBenchmark.scala | 2 +-
 .../spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala | 2 +-
 .../spark/sql/execution/benchmark/WideSchemaBenchmark.scala      | 2 +-
 .../columnar/compression/CompressionSchemeBenchmark.scala        | 2 +-
 .../spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala  | 2 +-
 .../execution/benchmark/ObjectHashAggregateExecBenchmark.scala   | 2 +-
 .../scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala   | 2 +-
 22 files changed, 23 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala 
b/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala
index 89e927e..24e596e 100644
--- a/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala
+++ b/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala
@@ -30,7 +30,7 @@ abstract class BenchmarkBase {
    * Implementations of this method are supposed to use the wrapper method 
`runBenchmark`
    * for each benchmark scenario.
    */
-  def runBenchmarkSuite(): Unit
+  def runBenchmarkSuite(mainArgs: Array[String]): Unit
 
   final def runBenchmark(benchmarkName: String)(func: => Any): Unit = {
     val separator = "=" * 96
@@ -51,7 +51,7 @@ abstract class BenchmarkBase {
       output = Some(new FileOutputStream(file))
     }
 
-    runBenchmarkSuite()
+    runBenchmarkSuite(args)
 
     output.foreach { o =>
       if (o != null) {

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala 
b/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala
index 8a52c13..d7730f2 100644
--- a/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala
+++ b/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala
@@ -39,7 +39,7 @@ import org.apache.spark.serializer.KryoTest._
 object KryoBenchmark extends BenchmarkBase {
 
   val N = 1000000
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     val name = "Benchmark Kryo Unsafe vs safe Serialization"
     runBenchmark(name) {
       val benchmark = new Benchmark(name, N, 10, output = output)

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala
 
b/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala
index 6c1d580..5f19e46 100644
--- 
a/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala
+++ 
b/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala
@@ -32,7 +32,7 @@ import 
org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
  */
 object UDTSerializationBenchmark extends BenchmarkBase {
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
 
     runBenchmark("VectorUDT de/serialization") {
       val iters = 1e2.toInt

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala
index 4226ab3..3b4b80d 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala
@@ -102,7 +102,7 @@ object HashBenchmark extends BenchmarkBase {
     }
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     val singleInt = new StructType().add("i", IntegerType)
     test("single ints", singleInt, 1 << 15, 1 << 14)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/catalyst/src/test/scala/org/apache/spark/sql/HashByteArrayBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/HashByteArrayBenchmark.scala 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/HashByteArrayBenchmark.scala
index 7dc865d..dbfa7bb 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/HashByteArrayBenchmark.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/HashByteArrayBenchmark.scala
@@ -83,7 +83,7 @@ object HashByteArrayBenchmark extends BenchmarkBase {
     benchmark.run()
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("Benchmark for MurMurHash 3 and xxHash64") {
       test(8, 42L, 1 << 10, 1 << 11)
       test(16, 42L, 1 << 10, 1 << 11)

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala
index e7a9948..42a4cfc 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala
@@ -41,7 +41,7 @@ object UnsafeProjectionBenchmark extends BenchmarkBase {
     (1 to numRows).map(_ => 
encoder.toRow(generator().asInstanceOf[Row]).copy()).toArray
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("unsafe projection") {
       val iters = 1024 * 16
       val numRows = 1024 * 16

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala
index e3df449..dba906f 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala
@@ -256,7 +256,7 @@ object DatasetBenchmark extends SqlBasedBenchmark {
       .getOrCreate()
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     val numRows = 100000000
     val numChains = 10
     runBenchmark("Dataset Benchmark") {

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
index 86e0df2..b7d2898 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
@@ -44,7 +44,7 @@ import org.apache.spark.unsafe.map.BytesToBytesMap
  */
 object AggregateBenchmark extends SqlBasedBenchmark {
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("aggregate without grouping") {
       val N = 500L << 22
       codegenBenchmark("agg w/o group", N) {

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala
index 2f3caca..f727ebc 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala
@@ -80,7 +80,7 @@ object BloomFilterBenchmark extends SqlBasedBenchmark {
     }
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     writeBenchmark()
     readBenchmark()
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
index a1e7f9e..a1f51f8 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
@@ -585,7 +585,7 @@ object DataSourceReadBenchmark extends BenchmarkBase with 
SQLHelper {
     }
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("SQL Single Numeric Column Scan") {
       Seq(ByteType, ShortType, IntegerType, LongType, FloatType, 
DoubleType).foreach {
         dataType => numericScanBenchmark(1024 * 1024 * 15, dataType)

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
index cf05ca3..017b74a 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
@@ -198,7 +198,7 @@ object FilterPushdownBenchmark extends BenchmarkBase with 
SQLHelper {
     }
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("Pushdown for many distinct value case") {
       withTempPath { dir =>
         withTempTable("orcTable", "parquetTable") {

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala
index 7bad4cb..ad81711 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala
@@ -164,7 +164,7 @@ object JoinBenchmark extends SqlBasedBenchmark {
     }
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("Join Benchmark") {
       broadcastHashJoinLongKey()
       broadcastHashJoinLongKeyWithDuplicates()

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala
index 4338086..c4662c8 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala
@@ -148,7 +148,7 @@ object MiscBenchmark extends SqlBasedBenchmark {
     }
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     filterAndAggregateWithoutGroup(500L << 22)
     limitAndAggregateWithoutGroup(500L << 20)
     sample(500 << 18)

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala
index 83edf73..8b1c422 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala
@@ -36,7 +36,7 @@ object PrimitiveArrayBenchmark extends BenchmarkBase {
     .config("spark.sql.autoBroadcastJoinThreshold", 1)
     .getOrCreate()
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("Write primitive arrays in dataset") {
       writeDatasetArray(4)
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala
index a844e02..a9f873f 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala
@@ -32,7 +32,7 @@ import org.apache.spark.benchmark.Benchmark
  */
 object RangeBenchmark extends SqlBasedBenchmark {
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     import spark.implicits._
 
     runBenchmark("range") {

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala
index 9a54e23..784438c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala
@@ -119,7 +119,7 @@ object SortBenchmark extends BenchmarkBase {
     benchmark.run()
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("radix sort") {
       sortBenchmark()
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala
index 79eaeab..f582d84 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala
@@ -194,7 +194,7 @@ object UnsafeArrayDataBenchmark extends BenchmarkBase {
     benchmark.run
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("Benchmark UnsafeArrayData") {
       readUnsafeArray(10)
       writeUnsafeArray(10)

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala
index 1246619..f4642e7 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala
@@ -208,7 +208,7 @@ object WideSchemaBenchmark extends SqlBasedBenchmark {
     deleteTmpFiles()
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
 
     runBenchmarkWithDeleteTmpFiles("parsing large select expressions") {
       parsingLargeSelectExpressions()

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
index 0f90797..8ea20f2 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
@@ -233,7 +233,7 @@ object CompressionSchemeBenchmark extends BenchmarkBase 
with AllCompressionSchem
     runDecodeBenchmark("STRING Decode", iters, count, STRING, testData)
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("Compression Scheme Benchmark") {
       bitEncodingBenchmark(1024)
       shortEncodingBenchmark(1024)

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala
index f311465..953b3a6 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala
@@ -443,7 +443,7 @@ object ColumnarBatchBenchmark extends BenchmarkBase {
     benchmark.run
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("Int Read/Write") {
       intAccess(1024 * 40)
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala
index 50ee096..3226e3a 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala
@@ -212,7 +212,7 @@ object ObjectHashAggregateExecBenchmark extends 
BenchmarkBase with SQLHelper {
     Column(approxPercentile.toAggregateExpression(isDistinct))
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("Hive UDAF vs Spark AF") {
       hiveUDAFvsSparkAF(2 << 15)
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/409d688f/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala
index 870ad48..ec13288 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala
@@ -336,7 +336,7 @@ object OrcReadBenchmark extends BenchmarkBase with 
SQLHelper {
     }
   }
 
-  override def runBenchmarkSuite(): Unit = {
+  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
     runBenchmark("SQL Single Numeric Column Scan") {
       Seq(ByteType, ShortType, IntegerType, LongType, FloatType, 
DoubleType).foreach { dataType =>
         numericScanBenchmark(1024 * 1024 * 15, dataType)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to