This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new e1c6594  [SPARK-34900][TEST][3.0] Make sure benchmarks can run using 
spark-submit cmd described in the guide
e1c6594 is described below

commit e1c6594d83565e18d4f1eac5a1f21bf677d44ae9
Author: yangjie01 <yangji...@baidu.com>
AuthorDate: Wed Mar 31 00:01:22 2021 +0900

    [SPARK-34900][TEST][3.0] Make sure benchmarks can run using spark-submit 
cmd described in the guide
    
    ### What changes were proposed in this pull request?
    Some `spark-submit`  commands used to run benchmarks in the user's guide is 
wrong, we can't use these commands to run benchmarks successful.
    
    So the major changes of this pr is correct these wrong commands, for 
example, run a benchmark which inherits from `SqlBasedBenchmark`, we must 
specify `--jars <spark core test jar>,<spark catalyst test jar>` because 
`SqlBasedBenchmark` based benchmark extends `BenchmarkBase(defined in spark 
core test jar)` and `SQLHelper(defined in spark catalyst test jar)`.
    
    Another change of this pr is removed the `scalatest Assertions` dependency 
of Benchmarks because `scalatest-*.jar` are not in the distribution package, it 
will be troublesome to use.
    
    ### Why are the changes needed?
    Make sure benchmarks can run using spark-submit cmd described in the guide
    
    ### Does this PR introduce _any_ user-facing change?
    No.
    
    ### How was this patch tested?
    Use the corrected `spark-submit` commands to run benchmarks successfully.
    
    Closes #32003 from LuciferYang/SPARK-34900-30.
    
    Authored-by: yangjie01 <yangji...@baidu.com>
    Signed-off-by: HyukjinKwon <gurwls...@apache.org>
---
 .../test/scala/org/apache/spark/MapStatusesSerDeserBenchmark.scala   | 4 +---
 core/src/test/scala/org/apache/spark/rdd/CoalescedRDDBenchmark.scala | 2 +-
 core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala  | 2 +-
 .../scala/org/apache/spark/serializer/KryoSerializerBenchmark.scala  | 2 +-
 .../test/scala/org/apache/spark/util/PropertiesCloneBenchmark.scala  | 4 ++--
 .../scala/org/apache/spark/util/random/XORShiftRandomBenchmark.scala | 2 +-
 .../org/apache/spark/sql/execution/benchmark/AvroReadBenchmark.scala | 2 +-
 .../org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala    | 4 +++-
 .../test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala  | 3 ++-
 sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala  | 4 +++-
 .../apache/spark/sql/execution/benchmark/AggregateBenchmark.scala    | 3 ++-
 .../apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala  | 3 ++-
 .../sql/execution/benchmark/BuiltInDataSourceWriteBenchmark.scala    | 3 ++-
 .../org/apache/spark/sql/execution/benchmark/DateTimeBenchmark.scala | 3 ++-
 .../spark/sql/execution/benchmark/DateTimeRebaseBenchmark.scala      | 3 ++-
 .../org/apache/spark/sql/execution/benchmark/ExtractBenchmark.scala  | 3 ++-
 .../spark/sql/execution/benchmark/FilterPushdownBenchmark.scala      | 3 ++-
 .../sql/execution/benchmark/HashedRelationMetricsBenchmark.scala     | 5 ++---
 .../apache/spark/sql/execution/benchmark/InExpressionBenchmark.scala | 4 +++-
 .../org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala     | 5 ++---
 .../apache/spark/sql/execution/benchmark/MakeDateTimeBenchmark.scala | 3 ++-
 .../org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala     | 4 +++-
 .../sql/execution/benchmark/OrcNestedSchemaPruningBenchmark.scala    | 3 ++-
 .../sql/execution/benchmark/OrcV2NestedSchemaPruningBenchmark.scala  | 3 ++-
 .../benchmark/ParquetNestedPredicatePushDownBenchmark.scala          | 3 ++-
 .../execution/benchmark/ParquetNestedSchemaPruningBenchmark.scala    | 3 ++-
 .../spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala      | 4 +++-
 .../org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala    | 3 ++-
 .../org/apache/spark/sql/execution/benchmark/SortBenchmark.scala     | 3 ++-
 .../org/apache/spark/sql/execution/benchmark/UDFBenchmark.scala      | 3 ++-
 .../spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala     | 3 ++-
 .../apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala   | 5 ++---
 .../execution/columnar/compression/CompressionSchemeBenchmark.scala  | 5 +++--
 .../apache/spark/sql/execution/ui/MetricsAggregationBenchmark.scala  | 3 ++-
 .../spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala      | 4 +++-
 .../sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala   | 3 +--
 .../test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala  | 3 +--
 37 files changed, 73 insertions(+), 49 deletions(-)

diff --git 
a/core/src/test/scala/org/apache/spark/MapStatusesSerDeserBenchmark.scala 
b/core/src/test/scala/org/apache/spark/MapStatusesSerDeserBenchmark.scala
index 78f1246..e433f42 100644
--- a/core/src/test/scala/org/apache/spark/MapStatusesSerDeserBenchmark.scala
+++ b/core/src/test/scala/org/apache/spark/MapStatusesSerDeserBenchmark.scala
@@ -17,8 +17,6 @@
 
 package org.apache.spark
 
-import org.scalatest.Assertions._
-
 import org.apache.spark.benchmark.Benchmark
 import org.apache.spark.benchmark.BenchmarkBase
 import org.apache.spark.scheduler.CompressedMapStatus
@@ -28,7 +26,7 @@ import org.apache.spark.storage.BlockManagerId
  * Benchmark for MapStatuses serialization & deserialization performance.
  * {{{
  *   To run this benchmark:
- *   1. without sbt: bin/spark-submit --class <this class> --jars <core test 
jar>
+ *   1. without sbt: bin/spark-submit --class <this class> <spark core test 
jar>
  *   2. build/sbt "core/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"core/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/MapStatusesSerDeserBenchmark-results.txt".
diff --git 
a/core/src/test/scala/org/apache/spark/rdd/CoalescedRDDBenchmark.scala 
b/core/src/test/scala/org/apache/spark/rdd/CoalescedRDDBenchmark.scala
index 617ca5a..f62c561 100644
--- a/core/src/test/scala/org/apache/spark/rdd/CoalescedRDDBenchmark.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/CoalescedRDDBenchmark.scala
@@ -29,7 +29,7 @@ import org.apache.spark.benchmark.{Benchmark, BenchmarkBase}
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar>
+ *      bin/spark-submit --class <this class> <spark core test jar>
  *   2. build/sbt "core/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "core/test:runMain <this 
class>"
diff --git 
a/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala 
b/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala
index 525e682..1c17d7b 100644
--- a/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala
+++ b/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala
@@ -31,7 +31,7 @@ import org.apache.spark.serializer.KryoTest._
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar>
+ *      bin/spark-submit --class <this class> <spark core test jar>
  *   2. build/sbt "core/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "core/test:runMain <this 
class>"
diff --git 
a/core/src/test/scala/org/apache/spark/serializer/KryoSerializerBenchmark.scala 
b/core/src/test/scala/org/apache/spark/serializer/KryoSerializerBenchmark.scala
index dde0c98..7f5dbff 100644
--- 
a/core/src/test/scala/org/apache/spark/serializer/KryoSerializerBenchmark.scala
+++ 
b/core/src/test/scala/org/apache/spark/serializer/KryoSerializerBenchmark.scala
@@ -34,7 +34,7 @@ import org.apache.spark.util.ThreadUtils
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar>
+ *      bin/spark-submit --class <this class> <spark core test jar>
  *   2. build/sbt "core/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "core/test:runMain <this 
class>"
diff --git 
a/core/src/test/scala/org/apache/spark/util/PropertiesCloneBenchmark.scala 
b/core/src/test/scala/org/apache/spark/util/PropertiesCloneBenchmark.scala
index baacc75..ff4a494 100644
--- a/core/src/test/scala/org/apache/spark/util/PropertiesCloneBenchmark.scala
+++ b/core/src/test/scala/org/apache/spark/util/PropertiesCloneBenchmark.scala
@@ -26,11 +26,11 @@ import org.apache.spark.benchmark.{Benchmark, BenchmarkBase}
 
 
 /**
- * Benchmark for Kryo Unsafe vs safe Serialization.
+ * Benchmark for SerializationUtils.clone vs Utils.cloneProperties.
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar>
+ *      bin/spark-submit --class <this class> <spark core test jar>
  *   2. build/sbt "core/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "core/test:runMain <this 
class>"
diff --git 
a/core/src/test/scala/org/apache/spark/util/random/XORShiftRandomBenchmark.scala
 
b/core/src/test/scala/org/apache/spark/util/random/XORShiftRandomBenchmark.scala
index b0563fd..7fd63a2 100644
--- 
a/core/src/test/scala/org/apache/spark/util/random/XORShiftRandomBenchmark.scala
+++ 
b/core/src/test/scala/org/apache/spark/util/random/XORShiftRandomBenchmark.scala
@@ -27,7 +27,7 @@ import org.apache.spark.util.Utils.times
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar>
+ *      bin/spark-submit --class <this class> <spark core test jar>
  *   2. build/sbt "core/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "core/test:runMain <this 
class>"
diff --git 
a/external/avro/src/test/scala/org/apache/spark/sql/execution/benchmark/AvroReadBenchmark.scala
 
b/external/avro/src/test/scala/org/apache/spark/sql/execution/benchmark/AvroReadBenchmark.scala
index dc9606f..6157dc4 100644
--- 
a/external/avro/src/test/scala/org/apache/spark/sql/execution/benchmark/AvroReadBenchmark.scala
+++ 
b/external/avro/src/test/scala/org/apache/spark/sql/execution/benchmark/AvroReadBenchmark.scala
@@ -29,7 +29,7 @@ import org.apache.spark.sql.types._
  * {{{
  *   To run this benchmark:
  *   1. without sbt: bin/spark-submit --class <this class>
- *        --jars <catalyst test jar>,<core test jar>,<spark-avro jar> <avro 
test jar>
+ *        --jars <catalyst test jar>,<core test jar>,<sql test 
jar>,<spark-avro jar> <avro test jar>
  *   2. build/sbt "avro/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"avro/test:runMain <this class>"
  *      Results will be written to "benchmarks/AvroReadBenchmark-results.txt".
diff --git 
a/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala
 
b/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala
index 3caa8f6..e748e32 100644
--- 
a/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala
+++ 
b/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala
@@ -24,7 +24,9 @@ import 
org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
  * Serialization benchmark for VectorUDT.
  * To run this benchmark:
  * {{{
- * 1. without sbt: bin/spark-submit --class <this class> <spark mllib test jar>
+ * 1. without sbt:
+ *    bin/spark-submit --class <this class>
+ *      --jars <spark core test jar> <spark mllib test jar>
  * 2. build/sbt "mllib/test:runMain <this class>"
  * 3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"mllib/test:runMain <this class>"
  *    Results will be written to 
"benchmarks/UDTSerializationBenchmark-results.txt".
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala
index 950e313..352afaa 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala
@@ -27,7 +27,8 @@ import org.apache.spark.sql.types._
  * Benchmark `UnsafeProjection` for fixed-length/primitive-type fields.
  * {{{
  *   To run this benchmark:
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt:
+ *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<spark catalyst test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/UnsafeProjectionBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala
index e47a6a6..955d0f9 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala
@@ -28,7 +28,9 @@ import org.apache.spark.sql.types.StringType
  * Benchmark for Dataset typed operations comparing with DataFrame and RDD 
versions.
  * To run this benchmark:
  * {{{
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt:
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <spark sql 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to "benchmarks/DatasetBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
index 965d782..d77ef6e 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
@@ -36,7 +36,8 @@ import org.apache.spark.unsafe.map.BytesToBytesMap
  * Benchmark to measure performance for aggregate primitives.
  * To run this benchmark:
  * {{{
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt: bin/spark-submit --class <this class>
+ *      --jars <spark core test jar>,<spark catalyst test jar> <spark sql test 
jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to "benchmarks/AggregateBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala
index ae241b3..f78ccf9 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala
@@ -29,7 +29,8 @@ import org.apache.spark.benchmark.Benchmark
  *
  * To run this benchmark:
  * {{{
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt: bin/spark-submit --class <this class>
+ *     --jars <spark core test jar>,<spark catalyst test jar> <spark sql test 
jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/BloomFilterBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BuiltInDataSourceWriteBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BuiltInDataSourceWriteBenchmark.scala
index 6925bdd..361deb0 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BuiltInDataSourceWriteBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BuiltInDataSourceWriteBenchmark.scala
@@ -31,7 +31,8 @@ import org.apache.spark.sql.internal.SQLConf
  *
  *   To measure specified formats, run it with arguments.
  *   1. without sbt:
- *        bin/spark-submit --class <this class> <spark sql test jar> format1 
[format2] [...]
+ *        bin/spark-submit --class <this class> --jars <spark core test jar>,
+ *        <spark catalyst test jar> <spark sql test jar> format1 [format2] 
[...]
  *   2. build/sbt "sql/test:runMain <this class> format1 [format2] [...]"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt
  *        "sql/test:runMain <this class> format1 [format2] [...]"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeBenchmark.scala
index c7b8737..03670a4 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeBenchmark.scala
@@ -32,7 +32,8 @@ import org.apache.spark.sql.internal.SQLConf
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<sql core test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <sql core 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this 
class>"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeRebaseBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeRebaseBenchmark.scala
index 7caaa53..1f3e3ce 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeRebaseBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeRebaseBenchmark.scala
@@ -37,7 +37,8 @@ object DateTime extends Enumeration {
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<sql core test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <sql core 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this 
class>"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ExtractBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ExtractBenchmark.scala
index 287854d..e2a5a5c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ExtractBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ExtractBenchmark.scala
@@ -27,7 +27,8 @@ import org.apache.spark.sql.internal.SQLConf
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<sql core test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <sql core 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this 
class>"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
index b3f65d4..45e3393 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
@@ -33,7 +33,8 @@ import org.apache.spark.sql.types.{ByteType, Decimal, 
DecimalType, TimestampType
  * Benchmark to measure read performance with Filter pushdown.
  * To run this benchmark:
  * {{{
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt: bin/spark-submit --class <this class>
+ *      --jars <spark core test jar>,<spark catalyst test jar> <spark sql test 
jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/FilterPushdownBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/HashedRelationMetricsBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/HashedRelationMetricsBenchmark.scala
index f3647b3..f03c22a 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/HashedRelationMetricsBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/HashedRelationMetricsBenchmark.scala
@@ -17,8 +17,6 @@
 
 package org.apache.spark.sql.execution.benchmark
 
-import org.scalatest.Assertions._
-
 import org.apache.spark.SparkConf
 import org.apache.spark.benchmark.Benchmark
 import org.apache.spark.internal.config.MEMORY_OFFHEAP_ENABLED
@@ -32,7 +30,8 @@ import org.apache.spark.sql.types.LongType
  * Benchmark to measure metrics performance at HashedRelation.
  * To run this benchmark:
  * {{{
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt: bin/spark-submit --class <this class>
+ *      --jars <spark core test jar>,<spark catalyst test jar> <spark sql test 
jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/HashedRelationMetricsBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/InExpressionBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/InExpressionBenchmark.scala
index caf3387..54f593d 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/InExpressionBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/InExpressionBenchmark.scala
@@ -31,7 +31,9 @@ import org.apache.spark.sql.types._
  *
  * To run this benchmark:
  * {{{
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt:
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <spark sql 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/InExpressionBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala
index 1cc9289..c20a26e 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala
@@ -17,8 +17,6 @@
 
 package org.apache.spark.sql.execution.benchmark
 
-import org.scalatest.Assertions._
-
 import org.apache.spark.sql.execution.joins._
 import org.apache.spark.sql.functions._
 import org.apache.spark.sql.internal.SQLConf
@@ -29,7 +27,8 @@ import org.apache.spark.sql.types.IntegerType
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<spark sql test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <spark sql 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this 
class>"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MakeDateTimeBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MakeDateTimeBenchmark.scala
index c92098c..6ab4fcc 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MakeDateTimeBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MakeDateTimeBenchmark.scala
@@ -25,7 +25,8 @@ import org.apache.spark.sql.internal.SQLConf
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<sql core test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <sql core 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this 
class>"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala
index 2aecf55..1860546 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala
@@ -24,7 +24,9 @@ import org.apache.spark.sql.internal.SQLConf
  * Benchmark to measure whole stage codegen performance.
  * To run this benchmark:
  * {{{
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt:
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <sql core 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to "benchmarks/MiscBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/OrcNestedSchemaPruningBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/OrcNestedSchemaPruningBenchmark.scala
index eeb2605..a59da45 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/OrcNestedSchemaPruningBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/OrcNestedSchemaPruningBenchmark.scala
@@ -24,7 +24,8 @@ import org.apache.spark.sql.internal.SQLConf
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<sql core test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <spark sql 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this 
class>"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/OrcV2NestedSchemaPruningBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/OrcV2NestedSchemaPruningBenchmark.scala
index e735d1c..d0289df 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/OrcV2NestedSchemaPruningBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/OrcV2NestedSchemaPruningBenchmark.scala
@@ -22,7 +22,8 @@ package org.apache.spark.sql.execution.benchmark
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<sql core test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <sql core 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this 
class>"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedPredicatePushDownBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedPredicatePushDownBenchmark.scala
index d2bd962..555f63d 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedPredicatePushDownBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedPredicatePushDownBenchmark.scala
@@ -27,7 +27,8 @@ import org.apache.spark.sql.internal.SQLConf
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<sql core test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <sql core 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this 
class>"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedSchemaPruningBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedSchemaPruningBenchmark.scala
index 1c9cc2c..f8f0ab1 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedSchemaPruningBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedSchemaPruningBenchmark.scala
@@ -22,7 +22,8 @@ package org.apache.spark.sql.execution.benchmark
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<sql core test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <sql core 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this 
class>"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala
index e07921b..b09549b 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala
@@ -23,7 +23,9 @@ import org.apache.spark.sql.SparkSession
 /**
  * Benchmark primitive arrays via DataFrame and Dataset program using 
primitive arrays
  * To run this benchmark:
- * 1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ * 1. without sbt:
+ *    bin/spark-submit --class <this class>
+ *      --jars <spark core test jar>,<spark catalyst test jar> < spark sql 
test jar>
  * 2. build/sbt "sql/test:runMain <this class>"
  * 3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *    Results will be written to 
"benchmarks/PrimitiveArrayBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala
index e566f5d..e9bdff5 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala
@@ -24,7 +24,8 @@ import org.apache.spark.benchmark.Benchmark
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<spark sql test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <spark sql 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to "benchmarks/RangeBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala
index 2c9e8a909..8b8710d 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala
@@ -30,7 +30,8 @@ import org.apache.spark.util.random.XORShiftRandom
  * Benchmark to measure performance for aggregate primitives.
  * {{{
  *   To run this benchmark:
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt:
+ *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<sql core test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to "benchmarks/<this class>-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UDFBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UDFBenchmark.scala
index ee8a6e7..34e51ef 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UDFBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UDFBenchmark.scala
@@ -28,7 +28,8 @@ import org.apache.spark.sql.types.{IntegerType, StringType}
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<sql core test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <sql core 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result:
  *      SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this 
class>"
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala
index 9b0389c..67eb20c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala
@@ -27,7 +27,8 @@ import 
org.apache.spark.sql.catalyst.expressions.UnsafeArrayData
  * Benchmark [[UnsafeArrayDataBenchmark]] for UnsafeArrayData
  * To run this benchmark:
  * {{{
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt:
+ *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<spark sql test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/UnsafeArrayDataBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala
index 77dc3a1..c209a6d 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala
@@ -19,8 +19,6 @@ package org.apache.spark.sql.execution.benchmark
 
 import java.io.File
 
-import org.scalatest.Assertions._
-
 import org.apache.spark.benchmark.Benchmark
 import org.apache.spark.sql.DataFrame
 import org.apache.spark.util.Utils
@@ -30,7 +28,8 @@ import org.apache.spark.util.Utils
  * To run this benchmark:
  * {{{
  *   1. without sbt:
- *      bin/spark-submit --class <this class> --jars <spark core test jar> 
<spark sql test jar>
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <spark sql 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/WideSchemaBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
index fcb1839..cad7f7a 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
@@ -22,7 +22,6 @@ import java.nio.charset.StandardCharsets
 
 import org.apache.commons.lang3.RandomStringUtils
 import org.apache.commons.math3.distribution.LogNormalDistribution
-import org.scalatest.Assertions._
 
 import org.apache.spark.benchmark.{Benchmark, BenchmarkBase}
 import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
@@ -34,7 +33,9 @@ import org.apache.spark.util.Utils._
  * Benchmark to decoders using various compression schemes.
  * To run this benchmark:
  * {{{
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt:
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar>,<spark catalyst test jar> <spark sql 
test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/CompressionSchemeBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/MetricsAggregationBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/MetricsAggregationBenchmark.scala
index a88abc8..cc209c1 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/MetricsAggregationBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/MetricsAggregationBenchmark.scala
@@ -38,7 +38,8 @@ import org.apache.spark.util.kvstore.InMemoryStore
  * Benchmark for metrics aggregation in the SQL listener.
  * {{{
  *   To run this benchmark:
- *   1. without sbt: bin/spark-submit --class <this class> --jars <core test 
jar>
+ *   1. without sbt:
+ *      bin/spark-submit --class <this class> --jars <core test jar> <spark 
sql test jar>
  *   2. build/sbt "core/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"core/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/MetricsAggregationBenchmark-results.txt".
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala
index 953b3a6..f9ae611 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala
@@ -32,7 +32,9 @@ import org.apache.spark.util.collection.BitSet
  * Benchmark to low level memory access using different ways to manage buffers.
  * To run this benchmark:
  * {{{
- *   1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
+ *   1. without sbt:
+ *      bin/spark-submit --class <this class>
+ *        --jars <spark core test jar> <spark sql test jar>
  *   2. build/sbt "sql/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"sql/test:runMain <this class>"
  *      Results will be written to 
"benchmarks/ColumnarBatchBenchmark-results.txt".
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala
index 3f806ad..46a60ef 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala
@@ -35,8 +35,7 @@ import org.apache.spark.sql.types.LongType
  * To run this benchmark:
  * {{{
  *   1. without sbt: bin/spark-submit --class <this class>
- *        --jars <spark catalyst test jar>,<spark core test jar>,<spark hive 
jar>
- *        --packages org.spark-project.hive:hive-exec:1.2.1.spark2
+ *        --jars <spark catalyst test jar>,<spark core test jar>,<spark sql 
test jar>
  *        <spark hive test jar>
  *   2. build/sbt "hive/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"hive/test:runMain <this class>"
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala
index a26412c..6acb03e 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala
@@ -33,8 +33,7 @@ import org.apache.spark.sql.types._
  * {{{
  *   To run this benchmark:
  *   1. without sbt: bin/spark-submit --class <this class>
- *        --jars <catalyst test jar>,<core test jar>,<sql jar>,<hive-exec 
jar>,<spark-hive jar>
- *       <spark-hive test jar>
+ *        --jars <catalyst test jar>,<core test jar>,<spark sql test jar> 
<spark-hive test jar>
  *   2. build/sbt "hive/test:runMain <this class>"
  *   3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt 
"hive/test:runMain <this class>"
  *      Results will be written to "benchmarks/OrcReadBenchmark-results.txt".

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to