Github user dongjoon-hyun commented on a diff in the pull request:
https://github.com/apache/spark/pull/22486#discussion_r219711018
--- Diff:
sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
---
@@ -318,28 +229,17 @@ object CompressionSchemeBenchmark extends
AllCompressionSchemes {
}
testData.rewind()
-// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
-// STRING Encode: Best/Avg Time(ms)Rate(M/s)
Per Row(ns) Relative
-//
---
-// PassThrough(1.000) 56 / 57 1197.9
0.8 1.0X
-// RunLengthEncoding(0.893) 4892 / 4937 13.7
72.9 0.0X
-// DictionaryEncoding(0.167)2968 / 2992 22.6
44.2 0.0X
runEncodeBenchmark("STRING Encode", iters, count, STRING, testData)
-
-// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
-// STRING Decode: Best/Avg Time(ms)Rate(M/s)
Per Row(ns) Relative
-//
---
-// PassThrough 2422 / 2449 27.7
36.1 1.0X
-// RunLengthEncoding2885 / 3018 23.3
43.0 0.8X
-// DictionaryEncoding 2716 / 2752 24.7
40.5 0.9X
runDecodeBenchmark("STRING Decode", iters, count, STRING, testData)
}
- def main(args: Array[String]): Unit = {
-bitEncodingBenchmark(1024)
-shortEncodingBenchmark(1024)
-intEncodingBenchmark(1024)
-longEncodingBenchmark(1024)
-stringEncodingBenchmark(1024)
+ override def benchmark(): Unit = {
+runBenchmark("encoding benchmark") {
--- End diff --
How about `Compression Scheme Benchmark`?
---
-
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org