Repository: spark
Updated Branches:
  refs/heads/master fd711ea13 -> a26e3ed5e


http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
index 1bc5c3c..d5cc3b3 100644
--- 
a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
+++ 
b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
@@ -302,7 +302,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver 
with Logging {
 
   override def processCmd(cmd: String): Int = {
     val cmd_trimmed: String = cmd.trim()
-    val cmd_lower = cmd_trimmed.toLowerCase(Locale.ENGLISH)
+    val cmd_lower = cmd_trimmed.toLowerCase(Locale.ROOT)
     val tokens: Array[String] = cmd_trimmed.split("\\s+")
     val cmd_1: String = cmd_trimmed.substring(tokens(0).length()).trim()
     if (cmd_lower.equals("quit") ||
@@ -310,7 +310,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver 
with Logging {
       sessionState.close()
       System.exit(0)
     }
-    if (tokens(0).toLowerCase(Locale.ENGLISH).equals("source") ||
+    if (tokens(0).toLowerCase(Locale.ROOT).equals("source") ||
       cmd_trimmed.startsWith("!") || isRemoteMode) {
       val start = System.currentTimeMillis()
       super.processCmd(cmd)

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
index f0e35df..806f2be 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql.hive
 import java.io.IOException
 import java.lang.reflect.InvocationTargetException
 import java.util
+import java.util.Locale
 
 import scala.collection.mutable
 import scala.util.control.NonFatal
@@ -499,7 +500,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, 
hadoopConf: Configurat
     // We can't use `filterKeys` here, as the map returned by `filterKeys` is 
not serializable,
     // while `CatalogTable` should be serializable.
     val propsWithoutPath = table.storage.properties.filter {
-      case (k, v) => k.toLowerCase != "path"
+      case (k, v) => k.toLowerCase(Locale.ROOT) != "path"
     }
     table.storage.copy(properties = propsWithoutPath ++ newPath.map("path" -> 
_))
   }
@@ -1060,7 +1061,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, 
hadoopConf: Configurat
     // Hive's metastore is case insensitive. However, Hive's createFunction 
does
     // not normalize the function name (unlike the getFunction part). So,
     // we are normalizing the function name.
-    val functionName = funcDefinition.identifier.funcName.toLowerCase
+    val functionName = 
funcDefinition.identifier.funcName.toLowerCase(Locale.ROOT)
     requireFunctionNotExists(db, functionName)
     val functionIdentifier = funcDefinition.identifier.copy(funcName = 
functionName)
     client.createFunction(db, funcDefinition.copy(identifier = 
functionIdentifier))

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala
index 9e3eb2d..c917f11 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.sql.hive
 
+import java.util.Locale
+
 import scala.util.{Failure, Success, Try}
 import scala.util.control.NonFatal
 
@@ -143,7 +145,7 @@ private[sql] class HiveSessionCatalog(
           // This function is not in functionRegistry, let's try to load it as 
a Hive's
           // built-in function.
           // Hive is case insensitive.
-          val functionName = funcName.unquotedString.toLowerCase
+          val functionName = funcName.unquotedString.toLowerCase(Locale.ROOT)
           if (!hiveFunctions.contains(functionName)) {
             failFunctionLookup(funcName.unquotedString)
           }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala
index 0465e9c..09a5eda 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.sql.hive
 
 import java.io.IOException
+import java.util.Locale
 
 import org.apache.hadoop.fs.{FileSystem, Path}
 import org.apache.hadoop.hive.common.StatsSetupConst
@@ -184,14 +185,14 @@ case class RelationConversions(
     conf: SQLConf,
     sessionCatalog: HiveSessionCatalog) extends Rule[LogicalPlan] {
   private def isConvertible(relation: CatalogRelation): Boolean = {
-    
(relation.tableMeta.storage.serde.getOrElse("").toLowerCase.contains("parquet") 
&&
-      conf.getConf(HiveUtils.CONVERT_METASTORE_PARQUET)) ||
-      
(relation.tableMeta.storage.serde.getOrElse("").toLowerCase.contains("orc") &&
-        conf.getConf(HiveUtils.CONVERT_METASTORE_ORC))
+    val serde = 
relation.tableMeta.storage.serde.getOrElse("").toLowerCase(Locale.ROOT)
+    serde.contains("parquet") && 
conf.getConf(HiveUtils.CONVERT_METASTORE_PARQUET) ||
+      serde.contains("orc") && conf.getConf(HiveUtils.CONVERT_METASTORE_ORC)
   }
 
   private def convert(relation: CatalogRelation): LogicalRelation = {
-    if 
(relation.tableMeta.storage.serde.getOrElse("").toLowerCase.contains("parquet"))
 {
+    val serde = 
relation.tableMeta.storage.serde.getOrElse("").toLowerCase(Locale.ROOT)
+    if (serde.contains("parquet")) {
       val options = Map(ParquetOptions.MERGE_SCHEMA ->
         
conf.getConf(HiveUtils.CONVERT_METASTORE_PARQUET_WITH_SCHEMA_MERGING).toString)
       sessionCatalog.metastoreCatalog

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
index afc2bf8..3de60c7 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
@@ -21,6 +21,7 @@ import java.io.File
 import java.net.{URL, URLClassLoader}
 import java.nio.charset.StandardCharsets
 import java.sql.Timestamp
+import java.util.Locale
 import java.util.concurrent.TimeUnit
 
 import scala.collection.mutable.HashMap
@@ -338,7 +339,7 @@ private[spark] object HiveUtils extends Logging {
               logWarning(s"Hive jar path '$path' does not exist.")
               Nil
             } else {
-              files.filter(_.getName.toLowerCase.endsWith(".jar"))
+              files.filter(_.getName.toLowerCase(Locale.ROOT).endsWith(".jar"))
             }
           case path =>
             new File(path) :: Nil

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
index 56ccac3..387ec4f 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.sql.hive.client
 
 import java.io.{File, PrintStream}
+import java.util.Locale
 
 import scala.collection.JavaConverters._
 import scala.collection.mutable.ArrayBuffer
@@ -153,7 +154,7 @@ private[hive] class HiveClientImpl(
         hadoopConf.iterator().asScala.foreach { entry =>
           val key = entry.getKey
           val value = entry.getValue
-          if (key.toLowerCase.contains("password")) {
+          if (key.toLowerCase(Locale.ROOT).contains("password")) {
             logDebug(s"Applying Hadoop and Hive config to Hive Conf: $key=xxx")
           } else {
             logDebug(s"Applying Hadoop and Hive config to Hive Conf: 
$key=$value")
@@ -168,7 +169,7 @@ private[hive] class HiveClientImpl(
         hiveConf.setClassLoader(initClassLoader)
         // 2: we set all spark confs to this hiveConf.
         sparkConf.getAll.foreach { case (k, v) =>
-          if (k.toLowerCase.contains("password")) {
+          if (k.toLowerCase(Locale.ROOT).contains("password")) {
             logDebug(s"Applying Spark config to Hive Conf: $k=xxx")
           } else {
             logDebug(s"Applying Spark config to Hive Conf: $k=$v")
@@ -177,7 +178,7 @@ private[hive] class HiveClientImpl(
         }
         // 3: we set all entries in config to this hiveConf.
         extraConfig.foreach { case (k, v) =>
-          if (k.toLowerCase.contains("password")) {
+          if (k.toLowerCase(Locale.ROOT).contains("password")) {
             logDebug(s"Applying extra config to HiveConf: $k=xxx")
           } else {
             logDebug(s"Applying extra config to HiveConf: $k=$v")
@@ -622,7 +623,7 @@ private[hive] class HiveClientImpl(
    */
   protected def runHive(cmd: String, maxRows: Int = 1000): Seq[String] = 
withHiveState {
     logDebug(s"Running hiveql '$cmd'")
-    if (cmd.toLowerCase.startsWith("set")) { logDebug(s"Changing config: 
$cmd") }
+    if (cmd.toLowerCase(Locale.ROOT).startsWith("set")) { logDebug(s"Changing 
config: $cmd") }
     try {
       val cmd_trimmed: String = cmd.trim()
       val tokens: Array[String] = cmd_trimmed.split("\\s+")

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala
index 2e35f39..7abb9f0 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.hive.client
 import java.lang.{Boolean => JBoolean, Integer => JInteger, Long => JLong}
 import java.lang.reflect.{InvocationTargetException, Method, Modifier}
 import java.net.URI
-import java.util.{ArrayList => JArrayList, List => JList, Map => JMap, Set => 
JSet}
+import java.util.{ArrayList => JArrayList, List => JList, Locale, Map => JMap, 
Set => JSet}
 import java.util.concurrent.TimeUnit
 
 import scala.collection.JavaConverters._
@@ -505,8 +505,8 @@ private[client] class Shim_v0_13 extends Shim_v0_12 {
 
   private def toHiveFunction(f: CatalogFunction, db: String): HiveFunction = {
     val resourceUris = f.resources.map { resource =>
-      new ResourceUri(
-        
ResourceType.valueOf(resource.resourceType.resourceType.toUpperCase()), 
resource.uri)
+      new ResourceUri(ResourceType.valueOf(
+        resource.resourceType.resourceType.toUpperCase(Locale.ROOT)), 
resource.uri)
     }
     new HiveFunction(
       f.identifier.funcName,

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveOptions.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveOptions.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveOptions.scala
index 1928510..5c51551 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveOptions.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveOptions.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.sql.hive.execution
 
+import java.util.Locale
+
 import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
 
 /**
@@ -29,7 +31,7 @@ class HiveOptions(@transient private val parameters: 
CaseInsensitiveMap[String])
 
   def this(parameters: Map[String, String]) = 
this(CaseInsensitiveMap(parameters))
 
-  val fileFormat = parameters.get(FILE_FORMAT).map(_.toLowerCase)
+  val fileFormat = parameters.get(FILE_FORMAT).map(_.toLowerCase(Locale.ROOT))
   val inputFormat = parameters.get(INPUT_FORMAT)
   val outputFormat = parameters.get(OUTPUT_FORMAT)
 
@@ -75,7 +77,7 @@ class HiveOptions(@transient private val parameters: 
CaseInsensitiveMap[String])
   }
 
   def serdeProperties: Map[String, String] = parameters.filterKeys {
-    k => !lowerCasedOptionNames.contains(k.toLowerCase)
+    k => !lowerCasedOptionNames.contains(k.toLowerCase(Locale.ROOT))
   }.map { case (k, v) => delimiterOptions.getOrElse(k, k) -> v }
 }
 
@@ -83,7 +85,7 @@ object HiveOptions {
   private val lowerCasedOptionNames = collection.mutable.Set[String]()
 
   private def newOption(name: String): String = {
-    lowerCasedOptionNames += name.toLowerCase
+    lowerCasedOptionNames += name.toLowerCase(Locale.ROOT)
     name
   }
 
@@ -99,5 +101,5 @@ object HiveOptions {
     // The following typo is inherited from Hive...
     "collectionDelim" -> "colelction.delim",
     "mapkeyDelim" -> "mapkey.delim",
-    "lineDelim" -> "line.delim").map { case (k, v) => k.toLowerCase -> v }
+    "lineDelim" -> "line.delim").map { case (k, v) => 
k.toLowerCase(Locale.ROOT) -> v }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcOptions.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcOptions.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcOptions.scala
index ccaa568..043eb69 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcOptions.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcOptions.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.sql.hive.orc
 
+import java.util.Locale
+
 import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
 
 /**
@@ -41,9 +43,9 @@ private[orc] class OrcOptions(@transient private val 
parameters: CaseInsensitive
     val codecName = parameters
       .get("compression")
       .orElse(orcCompressionConf)
-      .getOrElse("snappy").toLowerCase
+      .getOrElse("snappy").toLowerCase(Locale.ROOT)
     if (!shortOrcCompressionCodecNames.contains(codecName)) {
-      val availableCodecs = 
shortOrcCompressionCodecNames.keys.map(_.toLowerCase)
+      val availableCodecs = 
shortOrcCompressionCodecNames.keys.map(_.toLowerCase(Locale.ROOT))
       throw new IllegalArgumentException(s"Codec [$codecName] " +
         s"is not available. Available codecs are ${availableCodecs.mkString(", 
")}.")
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDDLCommandSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDDLCommandSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDDLCommandSuite.scala
index 490e02d..59cc660 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDDLCommandSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDDLCommandSuite.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.sql.hive
 
 import java.net.URI
+import java.util.Locale
 
 import org.apache.spark.sql.{AnalysisException, SaveMode}
 import org.apache.spark.sql.catalyst.TableIdentifier
@@ -49,7 +50,7 @@ class HiveDDLCommandSuite extends PlanTest with SQLTestUtils 
with TestHiveSingle
     val e = intercept[ParseException] {
       parser.parsePlan(sql)
     }
-    assert(e.getMessage.toLowerCase.contains("operation not allowed"))
+    assert(e.getMessage.toLowerCase(Locale.ROOT).contains("operation not 
allowed"))
   }
 
   private def analyzeCreateTable(sql: String): CatalogTable = {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala
index e48ce23..319d026 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala
@@ -18,18 +18,15 @@
 package org.apache.spark.sql.hive
 
 import java.io.File
-import java.util.concurrent.{Executors, TimeUnit}
 
 import scala.util.Random
 
 import org.scalatest.BeforeAndAfterEach
 
-import org.apache.spark.metrics.source.HiveCatalogMetrics
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.catalog._
 import org.apache.spark.sql.execution.datasources.FileStatusCache
 import org.apache.spark.sql.QueryTest
-import org.apache.spark.sql.hive.client.HiveClient
 import org.apache.spark.sql.hive.test.TestHiveSingleton
 import org.apache.spark.sql.internal.{HiveSerDe, SQLConf}
 import 
org.apache.spark.sql.internal.SQLConf.HiveCaseSensitiveInferenceMode.{Value => 
InferenceMode, _}

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
index e45cf97..abe5d83 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql.hive.execution
 import java.io._
 import java.nio.charset.StandardCharsets
 import java.util
+import java.util.Locale
 
 import scala.util.control.NonFatal
 
@@ -299,10 +300,11 @@ abstract class HiveComparisonTest
         // thus the tables referenced in those DDL commands cannot be 
extracted for use by our
         // test table auto-loading mechanism. In addition, the tests which use 
the SHOW TABLES
         // command expect these tables to exist.
-        val hasShowTableCommand = 
queryList.exists(_.toLowerCase.contains("show tables"))
+        val hasShowTableCommand =
+          queryList.exists(_.toLowerCase(Locale.ROOT).contains("show tables"))
         for (table <- Seq("src", "srcpart")) {
           val hasMatchingQuery = queryList.exists { query =>
-            val normalizedQuery = query.toLowerCase.stripSuffix(";")
+            val normalizedQuery = 
query.toLowerCase(Locale.ROOT).stripSuffix(";")
             normalizedQuery.endsWith(table) ||
               normalizedQuery.contains(s"from $table") ||
               normalizedQuery.contains(s"from default.$table")
@@ -444,7 +446,7 @@ abstract class HiveComparisonTest
           "create table",
           "drop index"
         )
-        !queryList.map(_.toLowerCase).exists { query =>
+        !queryList.map(_.toLowerCase(Locale.ROOT)).exists { query =>
           excludedSubstrings.exists(s => query.contains(s))
         }
       }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index 65a902f..cf33760 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -80,7 +80,7 @@ class HiveQuerySuite extends HiveComparisonTest with 
SQLTestUtils with BeforeAnd
 
   private def assertUnsupportedFeature(body: => Unit): Unit = {
     val e = intercept[ParseException] { body }
-    assert(e.getMessage.toLowerCase.contains("operation not allowed"))
+    assert(e.getMessage.toLowerCase(Locale.ROOT).contains("operation not 
allowed"))
   }
 
   // Testing the Broadcast based join for cartesian join (cross join)

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index d012797..75f3744 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql.hive.execution
 import java.io.File
 import java.nio.charset.StandardCharsets
 import java.sql.{Date, Timestamp}
+import java.util.Locale
 
 import com.google.common.io.Files
 import org.apache.hadoop.fs.Path
@@ -475,13 +476,13 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils 
with TestHiveSingleton {
           case None => // OK.
         }
         // Also make sure that the format and serde are as desired.
-        
assert(catalogTable.storage.inputFormat.get.toLowerCase.contains(format))
-        
assert(catalogTable.storage.outputFormat.get.toLowerCase.contains(format))
+        
assert(catalogTable.storage.inputFormat.get.toLowerCase(Locale.ROOT).contains(format))
+        
assert(catalogTable.storage.outputFormat.get.toLowerCase(Locale.ROOT).contains(format))
         val serde = catalogTable.storage.serde.get
         format match {
           case "sequence" | "text" => assert(serde.contains("LazySimpleSerDe"))
           case "rcfile" => assert(serde.contains("LazyBinaryColumnarSerDe"))
-          case _ => assert(serde.toLowerCase.contains(format))
+          case _ => assert(serde.toLowerCase(Locale.ROOT).contains(format))
         }
     }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/streaming/src/main/scala/org/apache/spark/streaming/dstream/InputDStream.scala
----------------------------------------------------------------------
diff --git 
a/streaming/src/main/scala/org/apache/spark/streaming/dstream/InputDStream.scala
 
b/streaming/src/main/scala/org/apache/spark/streaming/dstream/InputDStream.scala
index 9a760e2..931f015 100644
--- 
a/streaming/src/main/scala/org/apache/spark/streaming/dstream/InputDStream.scala
+++ 
b/streaming/src/main/scala/org/apache/spark/streaming/dstream/InputDStream.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.streaming.dstream
 
+import java.util.Locale
+
 import scala.reflect.ClassTag
 
 import org.apache.spark.SparkContext
@@ -60,7 +62,7 @@ abstract class InputDStream[T: ClassTag](_ssc: 
StreamingContext)
       .split("(?=[A-Z])")
       .filter(_.nonEmpty)
       .mkString(" ")
-      .toLowerCase
+      .toLowerCase(Locale.ROOT)
       .capitalize
     s"$newName [$id]"
   }
@@ -74,7 +76,7 @@ abstract class InputDStream[T: ClassTag](_ssc: 
StreamingContext)
   protected[streaming] override val baseScope: Option[String] = {
     val scopeName = Option(ssc.sc.getLocalProperty(SparkContext.RDD_SCOPE_KEY))
       .map { json => RDDOperationScope.fromJson(json).name + s" [$id]" }
-      .getOrElse(name.toLowerCase)
+      .getOrElse(name.toLowerCase(Locale.ROOT))
     Some(new RDDOperationScope(scopeName).toJson)
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/streaming/src/test/java/test/org/apache/spark/streaming/Java8APISuite.java
----------------------------------------------------------------------
diff --git 
a/streaming/src/test/java/test/org/apache/spark/streaming/Java8APISuite.java 
b/streaming/src/test/java/test/org/apache/spark/streaming/Java8APISuite.java
index 80513de..90d1f8c 100644
--- a/streaming/src/test/java/test/org/apache/spark/streaming/Java8APISuite.java
+++ b/streaming/src/test/java/test/org/apache/spark/streaming/Java8APISuite.java
@@ -101,7 +101,7 @@ public class Java8APISuite extends 
LocalJavaStreamingContext implements Serializ
     JavaDStream<String> mapped = stream.mapPartitions(in -> {
       String out = "";
       while (in.hasNext()) {
-        out = out + in.next().toUpperCase();
+        out = out + in.next().toUpperCase(Locale.ROOT);
       }
       return Arrays.asList(out).iterator();
     });
@@ -806,7 +806,8 @@ public class Java8APISuite extends 
LocalJavaStreamingContext implements Serializ
       ssc, inputData, 1);
     JavaPairDStream<String, String> pairStream = 
JavaPairDStream.fromJavaDStream(stream);
 
-    JavaPairDStream<String, String> mapped = 
pairStream.mapValues(String::toUpperCase);
+    JavaPairDStream<String, String> mapped =
+        pairStream.mapValues(s -> s.toUpperCase(Locale.ROOT));
     JavaTestUtils.attachTestOutputStream(mapped);
     List<List<Tuple2<String, String>>> result = JavaTestUtils.runStreams(ssc, 
2, 2);
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/streaming/src/test/java/test/org/apache/spark/streaming/JavaAPISuite.java
----------------------------------------------------------------------
diff --git 
a/streaming/src/test/java/test/org/apache/spark/streaming/JavaAPISuite.java 
b/streaming/src/test/java/test/org/apache/spark/streaming/JavaAPISuite.java
index 96f8d95..6c86cac 100644
--- a/streaming/src/test/java/test/org/apache/spark/streaming/JavaAPISuite.java
+++ b/streaming/src/test/java/test/org/apache/spark/streaming/JavaAPISuite.java
@@ -267,7 +267,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext 
implements Serializa
     JavaDStream<String> mapped = stream.mapPartitions(in -> {
         StringBuilder out = new StringBuilder();
         while (in.hasNext()) {
-          out.append(in.next().toUpperCase(Locale.ENGLISH));
+          out.append(in.next().toUpperCase(Locale.ROOT));
         }
         return Arrays.asList(out.toString()).iterator();
       });
@@ -1315,7 +1315,7 @@ public class JavaAPISuite extends 
LocalJavaStreamingContext implements Serializa
     JavaPairDStream<String, String> pairStream = 
JavaPairDStream.fromJavaDStream(stream);
 
     JavaPairDStream<String, String> mapped =
-      pairStream.mapValues(s -> s.toUpperCase(Locale.ENGLISH));
+      pairStream.mapValues(s -> s.toUpperCase(Locale.ROOT));
 
     JavaTestUtils.attachTestOutputStream(mapped);
     List<List<Tuple2<String, String>>> result = JavaTestUtils.runStreams(ssc, 
2, 2);

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
----------------------------------------------------------------------
diff --git 
a/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
 
b/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
index 5645996..eb996c9 100644
--- 
a/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
+++ 
b/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.streaming
 
 import java.io.{File, NotSerializableException}
+import java.util.Locale
 import java.util.concurrent.{CountDownLatch, TimeUnit}
 import java.util.concurrent.atomic.AtomicInteger
 
@@ -745,7 +746,7 @@ class StreamingContextSuite extends SparkFunSuite with 
BeforeAndAfter with Timeo
         val ex = intercept[IllegalStateException] {
           body
         }
-        assert(ex.getMessage.toLowerCase().contains(expectedErrorMsg))
+        
assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(expectedErrorMsg))
       }
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to