[SPARK-20156][CORE][SQL][STREAMING][MLLIB] Java String toLowerCase "Turkish 
locale bug" causes Spark problems

## What changes were proposed in this pull request?

Add Locale.ROOT to internal calls to String `toLowerCase`, `toUpperCase`, to 
avoid inadvertent locale-sensitive variation in behavior (aka the "Turkish 
locale problem").

The change looks large but it is just adding `Locale.ROOT` (the locale with no 
country or language specified) to every call to these methods.

## How was this patch tested?

Existing tests.

Author: Sean Owen <so...@cloudera.com>

Closes #17527 from srowen/SPARK-20156.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/a26e3ed5
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/a26e3ed5
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/a26e3ed5

Branch: refs/heads/master
Commit: a26e3ed5e414d0a350cfe65dd511b154868b9f1d
Parents: fd711ea
Author: Sean Owen <so...@cloudera.com>
Authored: Mon Apr 10 20:11:56 2017 +0100
Committer: Sean Owen <so...@cloudera.com>
Committed: Mon Apr 10 20:11:56 2017 +0100

----------------------------------------------------------------------
 .../apache/spark/network/util/JavaUtils.java    |  5 ++-
 .../spark/network/util/TransportConf.java       |  5 ++-
 .../apache/spark/status/api/v1/TaskSorting.java |  3 +-
 .../scala/org/apache/spark/SparkContext.scala   |  2 +-
 .../main/scala/org/apache/spark/SparkEnv.scala  |  4 +-
 .../executor/CoarseGrainedExecutorBackend.scala |  3 +-
 .../org/apache/spark/io/CompressionCodec.scala  |  4 +-
 .../apache/spark/metrics/sink/ConsoleSink.scala |  4 +-
 .../org/apache/spark/metrics/sink/CsvSink.scala |  2 +-
 .../spark/metrics/sink/GraphiteSink.scala       |  6 +--
 .../apache/spark/metrics/sink/Slf4jSink.scala   |  4 +-
 .../spark/scheduler/EventLoggingListener.scala  |  3 +-
 .../spark/scheduler/SchedulableBuilder.scala    |  5 ++-
 .../spark/scheduler/TaskSchedulerImpl.scala     | 18 ++++-----
 .../spark/serializer/KryoSerializer.scala       |  4 +-
 .../spark/ui/exec/ExecutorThreadDumpPage.scala  |  4 +-
 .../org/apache/spark/ui/jobs/JobPage.scala      |  4 +-
 .../scala/org/apache/spark/ShuffleSuite.scala   |  4 +-
 .../apache/spark/broadcast/BroadcastSuite.scala |  4 +-
 .../internal/config/ConfigEntrySuite.scala      |  3 +-
 .../storage/BlockManagerReplicationSuite.scala  |  6 ++-
 .../org/apache/spark/ui/StagePageSuite.scala    |  5 ++-
 .../org/apache/spark/ui/UISeleniumSuite.scala   |  5 ++-
 .../scala/org/apache/spark/ui/UISuite.scala     | 11 +++---
 .../spark/examples/ml/DecisionTreeExample.scala |  4 +-
 .../apache/spark/examples/ml/GBTExample.scala   |  4 +-
 .../spark/examples/ml/RandomForestExample.scala |  4 +-
 .../spark/examples/mllib/LDAExample.scala       |  4 +-
 .../sql/kafka010/KafkaSourceProvider.scala      | 22 ++++++-----
 .../spark/sql/kafka010/KafkaRelationSuite.scala |  3 +-
 .../spark/sql/kafka010/KafkaSinkSuite.scala     | 24 ++++++------
 .../spark/sql/kafka010/KafkaSourceSuite.scala   |  6 +--
 .../streaming/kafka010/ConsumerStrategy.scala   |  9 +++--
 .../spark/streaming/kafka/KafkaUtils.scala      |  4 +-
 .../ml/classification/LogisticRegression.scala  |  4 +-
 .../org/apache/spark/ml/clustering/LDA.scala    |  5 ++-
 .../r/GeneralizedLinearRegressionWrapper.scala  |  6 ++-
 .../apache/spark/ml/recommendation/ALS.scala    |  9 +++--
 .../GeneralizedLinearRegression.scala           | 38 ++++++++++--------
 .../org/apache/spark/ml/tree/treeParams.scala   | 41 +++++++++++++-------
 .../org/apache/spark/mllib/clustering/LDA.scala |  4 +-
 .../spark/mllib/tree/impurity/Impurity.scala    |  4 +-
 .../main/scala/org/apache/spark/repl/Main.scala |  3 +-
 .../org/apache/spark/deploy/yarn/Client.scala   |  4 +-
 .../sql/catalyst/analysis/ResolveHints.scala    |  4 +-
 .../catalyst/catalog/ExternalCatalogUtils.scala |  7 +++-
 .../sql/catalyst/catalog/SessionCatalog.scala   |  3 +-
 .../catalyst/catalog/functionResources.scala    |  4 +-
 .../sql/catalyst/expressions/Expression.scala   |  4 +-
 .../catalyst/expressions/mathExpressions.scala  |  6 ++-
 .../expressions/regexpExpressions.scala         |  3 +-
 .../expressions/windowExpressions.scala         |  4 +-
 .../spark/sql/catalyst/json/JacksonParser.scala |  5 ++-
 .../spark/sql/catalyst/parser/AstBuilder.scala  | 12 ++++--
 .../spark/sql/catalyst/plans/joinTypes.scala    |  4 +-
 .../streaming/InternalOutputModes.scala         |  4 +-
 .../sql/catalyst/util/CaseInsensitiveMap.scala  |  9 +++--
 .../sql/catalyst/util/CompressionCodecs.scala   |  4 +-
 .../spark/sql/catalyst/util/DateTimeUtils.scala |  4 +-
 .../spark/sql/catalyst/util/ParseMode.scala     |  4 +-
 .../sql/catalyst/util/StringKeyHashMap.scala    |  4 +-
 .../org/apache/spark/sql/internal/SQLConf.scala |  6 +--
 .../org/apache/spark/sql/types/DataType.scala   |  8 +++-
 .../apache/spark/sql/types/DecimalType.scala    |  4 +-
 .../sql/streaming/JavaOutputModeSuite.java      |  6 ++-
 .../sql/catalyst/analysis/AnalysisTest.scala    |  5 ++-
 .../analysis/UnsupportedOperationsSuite.scala   |  7 ++--
 .../catalyst/expressions/ScalaUDFSuite.scala    |  4 +-
 .../streaming/InternalOutputModesSuite.scala    |  4 +-
 .../apache/spark/sql/DataFrameNaFunctions.scala |  3 +-
 .../org/apache/spark/sql/DataFrameReader.scala  |  4 +-
 .../org/apache/spark/sql/DataFrameWriter.scala  |  6 +--
 .../spark/sql/RelationalGroupedDataset.scala    |  4 +-
 .../org/apache/spark/sql/api/r/SQLUtils.scala   | 24 ++++++------
 .../spark/sql/execution/SparkSqlParser.scala    | 20 +++++-----
 .../sql/execution/WholeStageCodegenExec.scala   |  6 ++-
 .../spark/sql/execution/command/ddl.scala       |  6 ++-
 .../spark/sql/execution/command/functions.scala |  4 +-
 .../sql/execution/datasources/DataSource.scala  | 16 ++++----
 .../datasources/InMemoryFileIndex.scala         |  1 -
 .../datasources/PartitioningUtils.scala         |  4 +-
 .../execution/datasources/csv/CSVOptions.scala  |  4 +-
 .../spark/sql/execution/datasources/ddl.scala   |  4 +-
 .../datasources/jdbc/JDBCOptions.scala          |  6 +--
 .../execution/datasources/jdbc/JdbcUtils.scala  |  3 +-
 .../datasources/parquet/ParquetOptions.scala    |  8 +++-
 .../spark/sql/execution/datasources/rules.scala |  5 ++-
 .../state/HDFSBackedStateStoreProvider.scala    |  3 +-
 .../apache/spark/sql/internal/HiveSerDe.scala   |  4 +-
 .../apache/spark/sql/internal/SharedState.scala |  1 -
 .../spark/sql/streaming/DataStreamReader.scala  |  4 +-
 .../spark/sql/streaming/DataStreamWriter.scala  |  4 +-
 .../org/apache/spark/sql/JavaDatasetSuite.java  |  2 +-
 .../apache/spark/sql/SQLQueryTestSuite.scala    |  3 +-
 .../sql/execution/QueryExecutionSuite.scala     | 13 ++++---
 .../sql/execution/command/DDLCommandSuite.scala |  7 +++-
 .../spark/sql/execution/command/DDLSuite.scala  |  5 ++-
 .../datasources/parquet/ParquetIOSuite.scala    |  4 +-
 .../ParquetPartitionDiscoverySuite.scala        |  8 ++--
 .../spark/sql/sources/FilteredScanSuite.scala   |  9 +++--
 .../sql/streaming/FileStreamSinkSuite.scala     |  4 +-
 .../streaming/StreamingAggregationSuite.scala   |  4 +-
 .../test/DataStreamReaderWriterSuite.scala      |  7 ++--
 .../sql/test/DataFrameReaderWriterSuite.scala   | 10 +++--
 .../hive/service/auth/HiveAuthFactory.java      |  5 ++-
 .../org/apache/hive/service/auth/SaslQOP.java   |  3 +-
 .../java/org/apache/hive/service/cli/Type.java  |  3 +-
 .../hive/thriftserver/HiveThriftServer2.scala   |  2 +-
 .../hive/thriftserver/SparkSQLCLIDriver.scala   |  4 +-
 .../spark/sql/hive/HiveExternalCatalog.scala    |  5 ++-
 .../spark/sql/hive/HiveSessionCatalog.scala     |  4 +-
 .../apache/spark/sql/hive/HiveStrategies.scala  | 11 +++---
 .../org/apache/spark/sql/hive/HiveUtils.scala   |  3 +-
 .../spark/sql/hive/client/HiveClientImpl.scala  |  9 +++--
 .../apache/spark/sql/hive/client/HiveShim.scala |  6 +--
 .../spark/sql/hive/execution/HiveOptions.scala  | 10 +++--
 .../apache/spark/sql/hive/orc/OrcOptions.scala  |  6 ++-
 .../spark/sql/hive/HiveDDLCommandSuite.scala    |  3 +-
 .../sql/hive/HiveSchemaInferenceSuite.scala     |  3 --
 .../sql/hive/execution/HiveComparisonTest.scala |  8 ++--
 .../sql/hive/execution/HiveQuerySuite.scala     |  2 +-
 .../sql/hive/execution/SQLQuerySuite.scala      |  7 ++--
 .../spark/streaming/dstream/InputDStream.scala  |  6 ++-
 .../apache/spark/streaming/Java8APISuite.java   |  5 ++-
 .../apache/spark/streaming/JavaAPISuite.java    |  4 +-
 .../spark/streaming/StreamingContextSuite.scala |  3 +-
 126 files changed, 482 insertions(+), 299 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java
----------------------------------------------------------------------
diff --git 
a/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java
 
b/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java
index 51d7fda..afc59ef 100644
--- 
a/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java
+++ 
b/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.ReadableByteChannel;
 import java.nio.charset.StandardCharsets;
+import java.util.Locale;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -210,7 +211,7 @@ public class JavaUtils {
    * The unit is also considered the default if the given string does not 
specify a unit.
    */
   public static long timeStringAs(String str, TimeUnit unit) {
-    String lower = str.toLowerCase().trim();
+    String lower = str.toLowerCase(Locale.ROOT).trim();
 
     try {
       Matcher m = Pattern.compile("(-?[0-9]+)([a-z]+)?").matcher(lower);
@@ -258,7 +259,7 @@ public class JavaUtils {
    * provided, a direct conversion to the provided unit is attempted.
    */
   public static long byteStringAs(String str, ByteUnit unit) {
-    String lower = str.toLowerCase().trim();
+    String lower = str.toLowerCase(Locale.ROOT).trim();
 
     try {
       Matcher m = Pattern.compile("([0-9]+)([a-z]+)?").matcher(lower);

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
----------------------------------------------------------------------
diff --git 
a/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
 
b/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
index c226d8f..a25078e 100644
--- 
a/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
+++ 
b/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
@@ -17,6 +17,7 @@
 
 package org.apache.spark.network.util;
 
+import java.util.Locale;
 import java.util.Properties;
 
 import com.google.common.primitives.Ints;
@@ -75,7 +76,9 @@ public class TransportConf {
   }
 
   /** IO mode: nio or epoll */
-  public String ioMode() { return conf.get(SPARK_NETWORK_IO_MODE_KEY, 
"NIO").toUpperCase(); }
+  public String ioMode() {
+    return conf.get(SPARK_NETWORK_IO_MODE_KEY, "NIO").toUpperCase(Locale.ROOT);
+  }
 
   /** If true, we will prefer allocating off-heap byte buffers within Netty. */
   public boolean preferDirectBufs() {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java 
b/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
index b38639e..dff4f5d 100644
--- a/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
+++ b/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
@@ -21,6 +21,7 @@ import org.apache.spark.util.EnumUtil;
 
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.Locale;
 import java.util.Set;
 
 public enum TaskSorting {
@@ -35,7 +36,7 @@ public enum TaskSorting {
   }
 
   public static TaskSorting fromString(String str) {
-    String lower = str.toLowerCase();
+    String lower = str.toLowerCase(Locale.ROOT);
     for (TaskSorting t: values()) {
       if (t.alternateNames.contains(lower)) {
         return t;

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/SparkContext.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala 
b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 0225fd6..99efc48 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -361,7 +361,7 @@ class SparkContext(config: SparkConf) extends Logging {
    */
   def setLogLevel(logLevel: String) {
     // let's allow lowercase or mixed case too
-    val upperCased = logLevel.toUpperCase(Locale.ENGLISH)
+    val upperCased = logLevel.toUpperCase(Locale.ROOT)
     require(SparkContext.VALID_LOG_LEVELS.contains(upperCased),
       s"Supplied level $logLevel did not match one of:" +
         s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}")

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/SparkEnv.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/SparkEnv.scala 
b/core/src/main/scala/org/apache/spark/SparkEnv.scala
index 539dbb5..f4a59f0 100644
--- a/core/src/main/scala/org/apache/spark/SparkEnv.scala
+++ b/core/src/main/scala/org/apache/spark/SparkEnv.scala
@@ -19,6 +19,7 @@ package org.apache.spark
 
 import java.io.File
 import java.net.Socket
+import java.util.Locale
 
 import scala.collection.mutable
 import scala.util.Properties
@@ -319,7 +320,8 @@ object SparkEnv extends Logging {
       "sort" -> 
classOf[org.apache.spark.shuffle.sort.SortShuffleManager].getName,
       "tungsten-sort" -> 
classOf[org.apache.spark.shuffle.sort.SortShuffleManager].getName)
     val shuffleMgrName = conf.get("spark.shuffle.manager", "sort")
-    val shuffleMgrClass = 
shortShuffleMgrNames.getOrElse(shuffleMgrName.toLowerCase, shuffleMgrName)
+    val shuffleMgrClass =
+      shortShuffleMgrNames.getOrElse(shuffleMgrName.toLowerCase(Locale.ROOT), 
shuffleMgrName)
     val shuffleManager = instantiateClass[ShuffleManager](shuffleMgrClass)
 
     val useLegacyMemoryManager = conf.getBoolean("spark.memory.useLegacyMode", 
false)

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
 
b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
index ba0096d..b2b26ee 100644
--- 
a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
+++ 
b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
@@ -19,6 +19,7 @@ package org.apache.spark.executor
 
 import java.net.URL
 import java.nio.ByteBuffer
+import java.util.Locale
 import java.util.concurrent.atomic.AtomicBoolean
 
 import scala.collection.mutable
@@ -72,7 +73,7 @@ private[spark] class CoarseGrainedExecutorBackend(
   def extractLogUrls: Map[String, String] = {
     val prefix = "SPARK_LOG_URL_"
     sys.env.filterKeys(_.startsWith(prefix))
-      .map(e => (e._1.substring(prefix.length).toLowerCase, e._2))
+      .map(e => (e._1.substring(prefix.length).toLowerCase(Locale.ROOT), e._2))
   }
 
   override def receive: PartialFunction[Any, Unit] = {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala 
b/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala
index c216fe4..0cb16f0 100644
--- a/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala
+++ b/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.io
 
 import java.io._
+import java.util.Locale
 
 import com.ning.compress.lzf.{LZFInputStream, LZFOutputStream}
 import net.jpountz.lz4.LZ4BlockOutputStream
@@ -66,7 +67,8 @@ private[spark] object CompressionCodec {
   }
 
   def createCodec(conf: SparkConf, codecName: String): CompressionCodec = {
-    val codecClass = 
shortCompressionCodecNames.getOrElse(codecName.toLowerCase, codecName)
+    val codecClass =
+      shortCompressionCodecNames.getOrElse(codecName.toLowerCase(Locale.ROOT), 
codecName)
     val codec = try {
       val ctor = 
Utils.classForName(codecClass).getConstructor(classOf[SparkConf])
       Some(ctor.newInstance(conf).asInstanceOf[CompressionCodec])

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/metrics/sink/ConsoleSink.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/metrics/sink/ConsoleSink.scala 
b/core/src/main/scala/org/apache/spark/metrics/sink/ConsoleSink.scala
index 81b9056..fce556f 100644
--- a/core/src/main/scala/org/apache/spark/metrics/sink/ConsoleSink.scala
+++ b/core/src/main/scala/org/apache/spark/metrics/sink/ConsoleSink.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.metrics.sink
 
-import java.util.Properties
+import java.util.{Locale, Properties}
 import java.util.concurrent.TimeUnit
 
 import com.codahale.metrics.{ConsoleReporter, MetricRegistry}
@@ -39,7 +39,7 @@ private[spark] class ConsoleSink(val property: Properties, 
val registry: MetricR
   }
 
   val pollUnit: TimeUnit = Option(property.getProperty(CONSOLE_KEY_UNIT)) 
match {
-    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
+    case Some(s) => TimeUnit.valueOf(s.toUpperCase(Locale.ROOT))
     case None => TimeUnit.valueOf(CONSOLE_DEFAULT_UNIT)
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/metrics/sink/CsvSink.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/metrics/sink/CsvSink.scala 
b/core/src/main/scala/org/apache/spark/metrics/sink/CsvSink.scala
index 9d5f2ae..88bba2f 100644
--- a/core/src/main/scala/org/apache/spark/metrics/sink/CsvSink.scala
+++ b/core/src/main/scala/org/apache/spark/metrics/sink/CsvSink.scala
@@ -42,7 +42,7 @@ private[spark] class CsvSink(val property: Properties, val 
registry: MetricRegis
   }
 
   val pollUnit: TimeUnit = Option(property.getProperty(CSV_KEY_UNIT)) match {
-    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
+    case Some(s) => TimeUnit.valueOf(s.toUpperCase(Locale.ROOT))
     case None => TimeUnit.valueOf(CSV_DEFAULT_UNIT)
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/metrics/sink/GraphiteSink.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/metrics/sink/GraphiteSink.scala 
b/core/src/main/scala/org/apache/spark/metrics/sink/GraphiteSink.scala
index 22454e5..23e3182 100644
--- a/core/src/main/scala/org/apache/spark/metrics/sink/GraphiteSink.scala
+++ b/core/src/main/scala/org/apache/spark/metrics/sink/GraphiteSink.scala
@@ -18,7 +18,7 @@
 package org.apache.spark.metrics.sink
 
 import java.net.InetSocketAddress
-import java.util.Properties
+import java.util.{Locale, Properties}
 import java.util.concurrent.TimeUnit
 
 import com.codahale.metrics.MetricRegistry
@@ -59,7 +59,7 @@ private[spark] class GraphiteSink(val property: Properties, 
val registry: Metric
   }
 
   val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
-    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
+    case Some(s) => TimeUnit.valueOf(s.toUpperCase(Locale.ROOT))
     case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
   }
 
@@ -67,7 +67,7 @@ private[spark] class GraphiteSink(val property: Properties, 
val registry: Metric
 
   MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)
 
-  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase) 
match {
+  val graphite = 
propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase(Locale.ROOT)) match {
     case Some("udp") => new GraphiteUDP(new InetSocketAddress(host, port))
     case Some("tcp") | None => new Graphite(new InetSocketAddress(host, port))
     case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala 
b/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala
index 773e074..7fa4ba7 100644
--- a/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala
+++ b/core/src/main/scala/org/apache/spark/metrics/sink/Slf4jSink.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.metrics.sink
 
-import java.util.Properties
+import java.util.{Locale, Properties}
 import java.util.concurrent.TimeUnit
 
 import com.codahale.metrics.{MetricRegistry, Slf4jReporter}
@@ -42,7 +42,7 @@ private[spark] class Slf4jSink(
   }
 
   val pollUnit: TimeUnit = Option(property.getProperty(SLF4J_KEY_UNIT)) match {
-    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
+    case Some(s) => TimeUnit.valueOf(s.toUpperCase(Locale.ROOT))
     case None => TimeUnit.valueOf(SLF4J_DEFAULT_UNIT)
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala 
b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
index af9bdef..aecb3a9 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
@@ -20,6 +20,7 @@ package org.apache.spark.scheduler
 import java.io._
 import java.net.URI
 import java.nio.charset.StandardCharsets
+import java.util.Locale
 
 import scala.collection.mutable
 import scala.collection.mutable.ArrayBuffer
@@ -316,7 +317,7 @@ private[spark] object EventLoggingListener extends Logging {
   }
 
   private def sanitize(str: String): String = {
-    str.replaceAll("[ :/]", "-").replaceAll("[.${}'\"]", "_").toLowerCase
+    str.replaceAll("[ :/]", "-").replaceAll("[.${}'\"]", 
"_").toLowerCase(Locale.ROOT)
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/scheduler/SchedulableBuilder.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/SchedulableBuilder.scala 
b/core/src/main/scala/org/apache/spark/scheduler/SchedulableBuilder.scala
index 20cedaf..4171034 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/SchedulableBuilder.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/SchedulableBuilder.scala
@@ -18,7 +18,7 @@
 package org.apache.spark.scheduler
 
 import java.io.{FileInputStream, InputStream}
-import java.util.{NoSuchElementException, Properties}
+import java.util.{Locale, NoSuchElementException, Properties}
 
 import scala.util.control.NonFatal
 import scala.xml.{Node, XML}
@@ -142,7 +142,8 @@ private[spark] class FairSchedulableBuilder(val rootPool: 
Pool, conf: SparkConf)
       defaultValue: SchedulingMode,
       fileName: String): SchedulingMode = {
 
-    val xmlSchedulingMode = (poolNode \ 
SCHEDULING_MODE_PROPERTY).text.trim.toUpperCase
+    val xmlSchedulingMode =
+      (poolNode \ SCHEDULING_MODE_PROPERTY).text.trim.toUpperCase(Locale.ROOT)
     val warningMessage = s"Unsupported schedulingMode: $xmlSchedulingMode 
found in " +
       s"Fair Scheduler configuration file: $fileName, using " +
       s"the default schedulingMode: $defaultValue for pool: $poolName"

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala 
b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
index 07aea77..c849a16 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
@@ -18,7 +18,7 @@
 package org.apache.spark.scheduler
 
 import java.nio.ByteBuffer
-import java.util.{Timer, TimerTask}
+import java.util.{Locale, Timer, TimerTask}
 import java.util.concurrent.TimeUnit
 import java.util.concurrent.atomic.AtomicLong
 
@@ -56,8 +56,7 @@ private[spark] class TaskSchedulerImpl private[scheduler](
     val maxTaskFailures: Int,
     private[scheduler] val blacklistTrackerOpt: Option[BlacklistTracker],
     isLocal: Boolean = false)
-  extends TaskScheduler with Logging
-{
+  extends TaskScheduler with Logging {
 
   import TaskSchedulerImpl._
 
@@ -135,12 +134,13 @@ private[spark] class TaskSchedulerImpl private[scheduler](
   private var schedulableBuilder: SchedulableBuilder = null
   // default scheduler is FIFO
   private val schedulingModeConf = conf.get(SCHEDULER_MODE_PROPERTY, 
SchedulingMode.FIFO.toString)
-  val schedulingMode: SchedulingMode = try {
-    SchedulingMode.withName(schedulingModeConf.toUpperCase)
-  } catch {
-    case e: java.util.NoSuchElementException =>
-      throw new SparkException(s"Unrecognized $SCHEDULER_MODE_PROPERTY: 
$schedulingModeConf")
-  }
+  val schedulingMode: SchedulingMode =
+    try {
+      SchedulingMode.withName(schedulingModeConf.toUpperCase(Locale.ROOT))
+    } catch {
+      case e: java.util.NoSuchElementException =>
+        throw new SparkException(s"Unrecognized $SCHEDULER_MODE_PROPERTY: 
$schedulingModeConf")
+    }
 
   val rootPool: Pool = new Pool("", schedulingMode, 0, 0)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala 
b/core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala
index 6fc66e2..e15166d 100644
--- a/core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala
+++ b/core/src/main/scala/org/apache/spark/serializer/KryoSerializer.scala
@@ -19,6 +19,7 @@ package org.apache.spark.serializer
 
 import java.io._
 import java.nio.ByteBuffer
+import java.util.Locale
 import javax.annotation.Nullable
 
 import scala.collection.JavaConverters._
@@ -244,7 +245,8 @@ class KryoDeserializationStream(
       kryo.readClassAndObject(input).asInstanceOf[T]
     } catch {
       // DeserializationStream uses the EOF exception to indicate stopping 
condition.
-      case e: KryoException if e.getMessage.toLowerCase.contains("buffer 
underflow") =>
+      case e: KryoException
+        if e.getMessage.toLowerCase(Locale.ROOT).contains("buffer underflow") 
=>
         throw new EOFException
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/ui/exec/ExecutorThreadDumpPage.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/ui/exec/ExecutorThreadDumpPage.scala 
b/core/src/main/scala/org/apache/spark/ui/exec/ExecutorThreadDumpPage.scala
index dbcc640..6ce3f51 100644
--- a/core/src/main/scala/org/apache/spark/ui/exec/ExecutorThreadDumpPage.scala
+++ b/core/src/main/scala/org/apache/spark/ui/exec/ExecutorThreadDumpPage.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.ui.exec
 
+import java.util.Locale
 import javax.servlet.http.HttpServletRequest
 
 import scala.xml.{Node, Text}
@@ -42,7 +43,8 @@ private[ui] class ExecutorThreadDumpPage(parent: 
ExecutorsTab) extends WebUIPage
           val v1 = if (threadTrace1.threadName.contains("Executor task 
launch")) 1 else 0
           val v2 = if (threadTrace2.threadName.contains("Executor task 
launch")) 1 else 0
           if (v1 == v2) {
-            threadTrace1.threadName.toLowerCase < 
threadTrace2.threadName.toLowerCase
+            threadTrace1.threadName.toLowerCase(Locale.ROOT) <
+              threadTrace2.threadName.toLowerCase(Locale.ROOT)
           } else {
             v1 > v2
           }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala 
b/core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala
index 0ff9e5e9..3131c4a 100644
--- a/core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala
+++ b/core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.ui.jobs
 
-import java.util.Date
+import java.util.{Date, Locale}
 import javax.servlet.http.HttpServletRequest
 
 import scala.collection.mutable.{Buffer, ListBuffer}
@@ -77,7 +77,7 @@ private[ui] class JobPage(parent: JobsTab) extends 
WebUIPage("job") {
          |  'content': '<div class="job-timeline-content" 
data-toggle="tooltip"' +
          |   'data-placement="top" data-html="true"' +
          |   'data-title="${jsEscapedName} (Stage 
${stageId}.${attemptId})<br>' +
-         |   'Status: ${status.toUpperCase}<br>' +
+         |   'Status: ${status.toUpperCase(Locale.ROOT)}<br>' +
          |   'Submitted: ${UIUtils.formatDate(new Date(submissionTime))}' +
          |   '${
                  if (status != "running") {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/test/scala/org/apache/spark/ShuffleSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/ShuffleSuite.scala 
b/core/src/test/scala/org/apache/spark/ShuffleSuite.scala
index e626ed3..58b8659 100644
--- a/core/src/test/scala/org/apache/spark/ShuffleSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ShuffleSuite.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark
 
-import java.util.Properties
+import java.util.{Locale, Properties}
 import java.util.concurrent.{Callable, CyclicBarrier, Executors, 
ExecutorService}
 
 import org.scalatest.Matchers
@@ -239,7 +239,7 @@ abstract class ShuffleSuite extends SparkFunSuite with 
Matchers with LocalSparkC
     }
 
     assert(thrown.getClass === classOf[SparkException])
-    assert(thrown.getMessage.toLowerCase.contains("serializable"))
+    assert(thrown.getMessage.toLowerCase(Locale.ROOT).contains("serializable"))
   }
 
   test("shuffle with different compression settings (SPARK-3426)") {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/test/scala/org/apache/spark/broadcast/BroadcastSuite.scala
----------------------------------------------------------------------
diff --git 
a/core/src/test/scala/org/apache/spark/broadcast/BroadcastSuite.scala 
b/core/src/test/scala/org/apache/spark/broadcast/BroadcastSuite.scala
index 82760fe..46f9ac6 100644
--- a/core/src/test/scala/org/apache/spark/broadcast/BroadcastSuite.scala
+++ b/core/src/test/scala/org/apache/spark/broadcast/BroadcastSuite.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.broadcast
 
+import java.util.Locale
+
 import scala.util.Random
 
 import org.scalatest.Assertions
@@ -130,7 +132,7 @@ class BroadcastSuite extends SparkFunSuite with 
LocalSparkContext with Encryptio
     val thrown = intercept[IllegalStateException] {
       sc.broadcast(Seq(1, 2, 3))
     }
-    assert(thrown.getMessage.toLowerCase.contains("stopped"))
+    assert(thrown.getMessage.toLowerCase(Locale.ROOT).contains("stopped"))
   }
 
   test("Forbid broadcasting RDD directly") {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/test/scala/org/apache/spark/internal/config/ConfigEntrySuite.scala
----------------------------------------------------------------------
diff --git 
a/core/src/test/scala/org/apache/spark/internal/config/ConfigEntrySuite.scala 
b/core/src/test/scala/org/apache/spark/internal/config/ConfigEntrySuite.scala
index e2ba0d2..b72cd8b 100644
--- 
a/core/src/test/scala/org/apache/spark/internal/config/ConfigEntrySuite.scala
+++ 
b/core/src/test/scala/org/apache/spark/internal/config/ConfigEntrySuite.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.internal.config
 
+import java.util.Locale
 import java.util.concurrent.TimeUnit
 
 import org.apache.spark.{SparkConf, SparkFunSuite}
@@ -132,7 +133,7 @@ class ConfigEntrySuite extends SparkFunSuite {
     val conf = new SparkConf()
     val transformationConf = ConfigBuilder(testKey("transformation"))
       .stringConf
-      .transform(_.toLowerCase())
+      .transform(_.toLowerCase(Locale.ROOT))
       .createWithDefault("FOO")
 
     assert(conf.get(transformationConf) === "foo")

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/test/scala/org/apache/spark/storage/BlockManagerReplicationSuite.scala
----------------------------------------------------------------------
diff --git 
a/core/src/test/scala/org/apache/spark/storage/BlockManagerReplicationSuite.scala
 
b/core/src/test/scala/org/apache/spark/storage/BlockManagerReplicationSuite.scala
index 13020ac..c100803 100644
--- 
a/core/src/test/scala/org/apache/spark/storage/BlockManagerReplicationSuite.scala
+++ 
b/core/src/test/scala/org/apache/spark/storage/BlockManagerReplicationSuite.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.storage
 
+import java.util.Locale
+
 import scala.collection.mutable.ArrayBuffer
 import scala.concurrent.duration._
 import scala.language.implicitConversions
@@ -374,8 +376,8 @@ trait BlockManagerReplicationBehavior extends SparkFunSuite
 
     storageLevels.foreach { storageLevel =>
       // Put the block into one of the stores
-      val blockId = new TestBlockId(
-        "block-with-" + storageLevel.description.replace(" ", "-").toLowerCase)
+      val blockId = TestBlockId(
+        "block-with-" + storageLevel.description.replace(" ", 
"-").toLowerCase(Locale.ROOT))
       val testValue = Array.fill[Byte](blockSize)(1)
       stores(0).putSingle(blockId, testValue, storageLevel)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala 
b/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala
index 38030e0..499d47b 100644
--- a/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.ui
 
+import java.util.Locale
 import javax.servlet.http.HttpServletRequest
 
 import scala.xml.Node
@@ -37,14 +38,14 @@ class StagePageSuite extends SparkFunSuite with 
LocalSparkContext {
 
   test("peak execution memory should displayed") {
     val conf = new SparkConf(false)
-    val html = renderStagePage(conf).toString().toLowerCase
+    val html = renderStagePage(conf).toString().toLowerCase(Locale.ROOT)
     val targetString = "peak execution memory"
     assert(html.contains(targetString))
   }
 
   test("SPARK-10543: peak execution memory should be per-task rather than 
cumulative") {
     val conf = new SparkConf(false)
-    val html = renderStagePage(conf).toString().toLowerCase
+    val html = renderStagePage(conf).toString().toLowerCase(Locale.ROOT)
     // verify min/25/50/75/max show task value not cumulative values
     assert(html.contains(s"<td>$peakExecutionMemory.0 b</td>" * 5))
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala 
b/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala
index f4c561c..bdd1488 100644
--- a/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.ui
 
 import java.net.{HttpURLConnection, URL}
+import java.util.Locale
 import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
 
 import scala.io.Source
@@ -453,8 +454,8 @@ class UISeleniumSuite extends SparkFunSuite with WebBrowser 
with Matchers with B
       eventually(timeout(10 seconds), interval(50 milliseconds)) {
         goToUi(sc, "/jobs")
         findAll(cssSelector("tbody tr a")).foreach { link =>
-          link.text.toLowerCase should include ("count")
-          link.text.toLowerCase should not include "unknown"
+          link.text.toLowerCase(Locale.ROOT) should include ("count")
+          link.text.toLowerCase(Locale.ROOT) should not include "unknown"
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/core/src/test/scala/org/apache/spark/ui/UISuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/ui/UISuite.scala 
b/core/src/test/scala/org/apache/spark/ui/UISuite.scala
index f1be0f6..0c3d4ca 100644
--- a/core/src/test/scala/org/apache/spark/ui/UISuite.scala
+++ b/core/src/test/scala/org/apache/spark/ui/UISuite.scala
@@ -19,6 +19,7 @@ package org.apache.spark.ui
 
 import java.net.{BindException, ServerSocket}
 import java.net.{URI, URL}
+import java.util.Locale
 import javax.servlet.http.{HttpServlet, HttpServletRequest, 
HttpServletResponse}
 
 import scala.io.Source
@@ -72,10 +73,10 @@ class UISuite extends SparkFunSuite {
       eventually(timeout(10 seconds), interval(50 milliseconds)) {
         val html = Source.fromURL(sc.ui.get.webUrl).mkString
         assert(!html.contains("random data that should not be present"))
-        assert(html.toLowerCase.contains("stages"))
-        assert(html.toLowerCase.contains("storage"))
-        assert(html.toLowerCase.contains("environment"))
-        assert(html.toLowerCase.contains("executors"))
+        assert(html.toLowerCase(Locale.ROOT).contains("stages"))
+        assert(html.toLowerCase(Locale.ROOT).contains("storage"))
+        assert(html.toLowerCase(Locale.ROOT).contains("environment"))
+        assert(html.toLowerCase(Locale.ROOT).contains("executors"))
       }
     }
   }
@@ -85,7 +86,7 @@ class UISuite extends SparkFunSuite {
       // test if visible from http://localhost:4040
       eventually(timeout(10 seconds), interval(50 milliseconds)) {
         val html = Source.fromURL("http://localhost:4040";).mkString
-        assert(html.toLowerCase.contains("stages"))
+        assert(html.toLowerCase(Locale.ROOT).contains("stages"))
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
index 1745281..f736cee 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
@@ -18,6 +18,8 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
+import java.util.Locale
+
 import scala.collection.mutable
 import scala.language.reflectiveCalls
 
@@ -203,7 +205,7 @@ object DecisionTreeExample {
       .getOrCreate()
 
     params.checkpointDir.foreach(spark.sparkContext.setCheckpointDir)
-    val algo = params.algo.toLowerCase
+    val algo = params.algo.toLowerCase(Locale.ROOT)
 
     println(s"DecisionTreeExample with parameters:\n$params")
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
index db55298..ed598d0 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
@@ -18,6 +18,8 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
+import java.util.Locale
+
 import scala.collection.mutable
 import scala.language.reflectiveCalls
 
@@ -140,7 +142,7 @@ object GBTExample {
       .getOrCreate()
 
     params.checkpointDir.foreach(spark.sparkContext.setCheckpointDir)
-    val algo = params.algo.toLowerCase
+    val algo = params.algo.toLowerCase(Locale.ROOT)
 
     println(s"GBTExample with parameters:\n$params")
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
index a9e07c0..8fd46c3 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
@@ -18,6 +18,8 @@
 // scalastyle:off println
 package org.apache.spark.examples.ml
 
+import java.util.Locale
+
 import scala.collection.mutable
 import scala.language.reflectiveCalls
 
@@ -146,7 +148,7 @@ object RandomForestExample {
       .getOrCreate()
 
     params.checkpointDir.foreach(spark.sparkContext.setCheckpointDir)
-    val algo = params.algo.toLowerCase
+    val algo = params.algo.toLowerCase(Locale.ROOT)
 
     println(s"RandomForestExample with parameters:\n$params")
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
index b923e62..cd77ecf 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
@@ -18,6 +18,8 @@
 // scalastyle:off println
 package org.apache.spark.examples.mllib
 
+import java.util.Locale
+
 import org.apache.log4j.{Level, Logger}
 import scopt.OptionParser
 
@@ -131,7 +133,7 @@ object LDAExample {
     // Run LDA.
     val lda = new LDA()
 
-    val optimizer = params.algorithm.toLowerCase match {
+    val optimizer = params.algorithm.toLowerCase(Locale.ROOT) match {
       case "em" => new EMLDAOptimizer
       // add (1.0 / actualCorpusSize) to MiniBatchFraction be more robust on 
tiny datasets.
       case "online" => new OnlineLDAOptimizer().setMiniBatchFraction(0.05 + 
1.0 / actualCorpusSize)

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSourceProvider.scala
----------------------------------------------------------------------
diff --git 
a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSourceProvider.scala
 
b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSourceProvider.scala
index 58b5269..ab1ce34 100644
--- 
a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSourceProvider.scala
+++ 
b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSourceProvider.scala
@@ -18,7 +18,7 @@
 package org.apache.spark.sql.kafka010
 
 import java.{util => ju}
-import java.util.UUID
+import java.util.{Locale, UUID}
 
 import scala.collection.JavaConverters._
 
@@ -74,11 +74,11 @@ private[kafka010] class KafkaSourceProvider extends 
DataSourceRegister
     // id. Hence, we should generate a unique id for each query.
     val uniqueGroupId = 
s"spark-kafka-source-${UUID.randomUUID}-${metadataPath.hashCode}"
 
-    val caseInsensitiveParams = parameters.map { case (k, v) => 
(k.toLowerCase, v) }
+    val caseInsensitiveParams = parameters.map { case (k, v) => 
(k.toLowerCase(Locale.ROOT), v) }
     val specifiedKafkaParams =
       parameters
         .keySet
-        .filter(_.toLowerCase.startsWith("kafka."))
+        .filter(_.toLowerCase(Locale.ROOT).startsWith("kafka."))
         .map { k => k.drop(6).toString -> parameters(k) }
         .toMap
 
@@ -115,11 +115,11 @@ private[kafka010] class KafkaSourceProvider extends 
DataSourceRegister
     // partial data since Kafka will assign partitions to multiple consumers 
having the same group
     // id. Hence, we should generate a unique id for each query.
     val uniqueGroupId = s"spark-kafka-relation-${UUID.randomUUID}"
-    val caseInsensitiveParams = parameters.map { case (k, v) => 
(k.toLowerCase, v) }
+    val caseInsensitiveParams = parameters.map { case (k, v) => 
(k.toLowerCase(Locale.ROOT), v) }
     val specifiedKafkaParams =
       parameters
         .keySet
-        .filter(_.toLowerCase.startsWith("kafka."))
+        .filter(_.toLowerCase(Locale.ROOT).startsWith("kafka."))
         .map { k => k.drop(6).toString -> parameters(k) }
         .toMap
 
@@ -192,7 +192,7 @@ private[kafka010] class KafkaSourceProvider extends 
DataSourceRegister
   }
 
   private def kafkaParamsForProducer(parameters: Map[String, String]): 
Map[String, String] = {
-    val caseInsensitiveParams = parameters.map { case (k, v) => 
(k.toLowerCase, v) }
+    val caseInsensitiveParams = parameters.map { case (k, v) => 
(k.toLowerCase(Locale.ROOT), v) }
     if 
(caseInsensitiveParams.contains(s"kafka.${ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG}"))
 {
       throw new IllegalArgumentException(
         s"Kafka option '${ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG}' is not 
supported as keys "
@@ -207,7 +207,7 @@ private[kafka010] class KafkaSourceProvider extends 
DataSourceRegister
     }
     parameters
       .keySet
-      .filter(_.toLowerCase.startsWith("kafka."))
+      .filter(_.toLowerCase(Locale.ROOT).startsWith("kafka."))
       .map { k => k.drop(6).toString -> parameters(k) }
       .toMap + (ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG -> 
classOf[ByteArraySerializer].getName,
         ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG -> 
classOf[ByteArraySerializer].getName)
@@ -272,7 +272,7 @@ private[kafka010] class KafkaSourceProvider extends 
DataSourceRegister
 
   private def validateGeneralOptions(parameters: Map[String, String]): Unit = {
     // Validate source options
-    val caseInsensitiveParams = parameters.map { case (k, v) => 
(k.toLowerCase, v) }
+    val caseInsensitiveParams = parameters.map { case (k, v) => 
(k.toLowerCase(Locale.ROOT), v) }
     val specifiedStrategies =
       caseInsensitiveParams.filter { case (k, _) => 
STRATEGY_OPTION_KEYS.contains(k) }.toSeq
 
@@ -451,8 +451,10 @@ private[kafka010] object KafkaSourceProvider {
       offsetOptionKey: String,
       defaultOffsets: KafkaOffsetRangeLimit): KafkaOffsetRangeLimit = {
     params.get(offsetOptionKey).map(_.trim) match {
-      case Some(offset) if offset.toLowerCase == "latest" => 
LatestOffsetRangeLimit
-      case Some(offset) if offset.toLowerCase == "earliest" => 
EarliestOffsetRangeLimit
+      case Some(offset) if offset.toLowerCase(Locale.ROOT) == "latest" =>
+        LatestOffsetRangeLimit
+      case Some(offset) if offset.toLowerCase(Locale.ROOT) == "earliest" =>
+        EarliestOffsetRangeLimit
       case Some(json) => 
SpecificOffsetRangeLimit(JsonUtils.partitionOffsets(json))
       case None => defaultOffsets
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
----------------------------------------------------------------------
diff --git 
a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
 
b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
index 68bc3e3..91893df 100644
--- 
a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
+++ 
b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaRelationSuite.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.sql.kafka010
 
+import java.util.Locale
 import java.util.concurrent.atomic.AtomicInteger
 
 import org.apache.kafka.common.TopicPartition
@@ -195,7 +196,7 @@ class KafkaRelationSuite extends QueryTest with 
BeforeAndAfter with SharedSQLCon
         reader.load()
       }
       expectedMsgs.foreach { m =>
-        assert(ex.getMessage.toLowerCase.contains(m.toLowerCase))
+        
assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(m.toLowerCase(Locale.ROOT)))
       }
     }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSinkSuite.scala
----------------------------------------------------------------------
diff --git 
a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSinkSuite.scala
 
b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSinkSuite.scala
index 4905356..4bd052d 100644
--- 
a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSinkSuite.scala
+++ 
b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSinkSuite.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.sql.kafka010
 
+import java.util.Locale
 import java.util.concurrent.atomic.AtomicInteger
 
 import org.apache.kafka.clients.producer.ProducerConfig
@@ -75,7 +76,7 @@ class KafkaSinkSuite extends StreamTest with SharedSQLContext 
{
         .option("kafka.bootstrap.servers", testUtils.brokerAddress)
         .save()
     }
-    assert(ex.getMessage.toLowerCase.contains(
+    assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(
       "null topic present in the data"))
   }
 
@@ -92,7 +93,7 @@ class KafkaSinkSuite extends StreamTest with SharedSQLContext 
{
         .mode(SaveMode.Ignore)
         .save()
     }
-    assert(ex.getMessage.toLowerCase.contains(
+    assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(
       s"save mode ignore not allowed for kafka"))
 
     // Test bad save mode Overwrite
@@ -103,7 +104,7 @@ class KafkaSinkSuite extends StreamTest with 
SharedSQLContext {
         .mode(SaveMode.Overwrite)
         .save()
     }
-    assert(ex.getMessage.toLowerCase.contains(
+    assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(
       s"save mode overwrite not allowed for kafka"))
   }
 
@@ -233,7 +234,7 @@ class KafkaSinkSuite extends StreamTest with 
SharedSQLContext {
       writer.stop()
     }
     assert(ex.getMessage
-      .toLowerCase
+      .toLowerCase(Locale.ROOT)
       .contains("topic option required when no 'topic' attribute is present"))
 
     try {
@@ -248,7 +249,8 @@ class KafkaSinkSuite extends StreamTest with 
SharedSQLContext {
     } finally {
       writer.stop()
     }
-    assert(ex.getMessage.toLowerCase.contains("required attribute 'value' not 
found"))
+    assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(
+      "required attribute 'value' not found"))
   }
 
   test("streaming - write data with valid schema but wrong types") {
@@ -270,7 +272,7 @@ class KafkaSinkSuite extends StreamTest with 
SharedSQLContext {
     } finally {
       writer.stop()
     }
-    assert(ex.getMessage.toLowerCase.contains("topic type must be a string"))
+    assert(ex.getMessage.toLowerCase(Locale.ROOT).contains("topic type must be 
a string"))
 
     try {
       /* value field wrong type */
@@ -284,7 +286,7 @@ class KafkaSinkSuite extends StreamTest with 
SharedSQLContext {
     } finally {
       writer.stop()
     }
-    assert(ex.getMessage.toLowerCase.contains(
+    assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(
       "value attribute type must be a string or binarytype"))
 
     try {
@@ -299,7 +301,7 @@ class KafkaSinkSuite extends StreamTest with 
SharedSQLContext {
     } finally {
       writer.stop()
     }
-    assert(ex.getMessage.toLowerCase.contains(
+    assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(
       "key attribute type must be a string or binarytype"))
   }
 
@@ -318,7 +320,7 @@ class KafkaSinkSuite extends StreamTest with 
SharedSQLContext {
     } finally {
       writer.stop()
     }
-    assert(ex.getMessage.toLowerCase.contains("job aborted"))
+    assert(ex.getMessage.toLowerCase(Locale.ROOT).contains("job aborted"))
   }
 
   test("streaming - exception on config serializer") {
@@ -330,7 +332,7 @@ class KafkaSinkSuite extends StreamTest with 
SharedSQLContext {
         input.toDF(),
         withOptions = Map("kafka.key.serializer" -> "foo"))()
     }
-    assert(ex.getMessage.toLowerCase.contains(
+    assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(
       "kafka option 'key.serializer' is not supported"))
 
     ex = intercept[IllegalArgumentException] {
@@ -338,7 +340,7 @@ class KafkaSinkSuite extends StreamTest with 
SharedSQLContext {
         input.toDF(),
         withOptions = Map("kafka.value.serializer" -> "foo"))()
     }
-    assert(ex.getMessage.toLowerCase.contains(
+    assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(
       "kafka option 'value.serializer' is not supported"))
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSourceSuite.scala
----------------------------------------------------------------------
diff --git 
a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSourceSuite.scala
 
b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSourceSuite.scala
index 0046ba7..2034b9b 100644
--- 
a/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSourceSuite.scala
+++ 
b/external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSourceSuite.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.kafka010
 import java.io._
 import java.nio.charset.StandardCharsets.UTF_8
 import java.nio.file.{Files, Paths}
-import java.util.Properties
+import java.util.{Locale, Properties}
 import java.util.concurrent.ConcurrentLinkedQueue
 import java.util.concurrent.atomic.AtomicInteger
 
@@ -491,7 +491,7 @@ class KafkaSourceSuite extends KafkaSourceTest {
         reader.load()
       }
       expectedMsgs.foreach { m =>
-        assert(ex.getMessage.toLowerCase.contains(m.toLowerCase))
+        
assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(m.toLowerCase(Locale.ROOT)))
       }
     }
 
@@ -524,7 +524,7 @@ class KafkaSourceSuite extends KafkaSourceTest {
           .option(s"$key", value)
         reader.load()
       }
-      assert(ex.getMessage.toLowerCase.contains("not supported"))
+      assert(ex.getMessage.toLowerCase(Locale.ROOT).contains("not supported"))
     }
 
     testUnsupportedConfig("kafka.group.id")

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/ConsumerStrategy.scala
----------------------------------------------------------------------
diff --git 
a/external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/ConsumerStrategy.scala
 
b/external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/ConsumerStrategy.scala
index 778c06e..d2100fc 100644
--- 
a/external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/ConsumerStrategy.scala
+++ 
b/external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/ConsumerStrategy.scala
@@ -17,7 +17,8 @@
 
 package org.apache.spark.streaming.kafka010
 
-import java.{ lang => jl, util => ju }
+import java.{lang => jl, util => ju}
+import java.util.Locale
 
 import scala.collection.JavaConverters._
 
@@ -93,7 +94,8 @@ private case class Subscribe[K, V](
       // but cant seek to a position before poll, because poll is what gets 
subscription partitions
       // So, poll, suppress the first exception, then seek
       val aor = kafkaParams.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)
-      val shouldSuppress = aor != null && aor.asInstanceOf[String].toUpperCase 
== "NONE"
+      val shouldSuppress =
+        aor != null && aor.asInstanceOf[String].toUpperCase(Locale.ROOT) == 
"NONE"
       try {
         consumer.poll(0)
       } catch {
@@ -145,7 +147,8 @@ private case class SubscribePattern[K, V](
     if (!toSeek.isEmpty) {
       // work around KAFKA-3370 when reset is none, see explanation in 
Subscribe above
       val aor = kafkaParams.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)
-      val shouldSuppress = aor != null && aor.asInstanceOf[String].toUpperCase 
== "NONE"
+      val shouldSuppress =
+        aor != null && aor.asInstanceOf[String].toUpperCase(Locale.ROOT) == 
"NONE"
       try {
         consumer.poll(0)
       } catch {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaUtils.scala
----------------------------------------------------------------------
diff --git 
a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaUtils.scala
 
b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaUtils.scala
index d5aef81..7823072 100644
--- 
a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaUtils.scala
+++ 
b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaUtils.scala
@@ -20,7 +20,7 @@ package org.apache.spark.streaming.kafka
 import java.io.OutputStream
 import java.lang.{Integer => JInt, Long => JLong, Number => JNumber}
 import java.nio.charset.StandardCharsets
-import java.util.{List => JList, Map => JMap, Set => JSet}
+import java.util.{List => JList, Locale, Map => JMap, Set => JSet}
 
 import scala.collection.JavaConverters._
 import scala.reflect.ClassTag
@@ -206,7 +206,7 @@ object KafkaUtils {
       kafkaParams: Map[String, String],
       topics: Set[String]
     ): Map[TopicAndPartition, Long] = {
-    val reset = kafkaParams.get("auto.offset.reset").map(_.toLowerCase)
+    val reset = 
kafkaParams.get("auto.offset.reset").map(_.toLowerCase(Locale.ROOT))
     val result = for {
       topicPartitions <- kc.getPartitions(topics).right
       leaderOffsets <- (if (reset == Some("smallest")) {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
index 7b56bce..965ce3d 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.ml.classification
 
+import java.util.Locale
+
 import scala.collection.mutable
 
 import breeze.linalg.{DenseVector => BDV}
@@ -654,7 +656,7 @@ object LogisticRegression extends 
DefaultParamsReadable[LogisticRegression] {
   override def load(path: String): LogisticRegression = super.load(path)
 
   private[classification] val supportedFamilyNames =
-    Array("auto", "binomial", "multinomial").map(_.toLowerCase)
+    Array("auto", "binomial", "multinomial").map(_.toLowerCase(Locale.ROOT))
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala 
b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
index 55720e2..2f50dc7 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.ml.clustering
 
+import java.util.Locale
+
 import org.apache.hadoop.fs.Path
 import org.json4s.DefaultFormats
 import org.json4s.JsonAST.JObject
@@ -173,7 +175,8 @@ private[clustering] trait LDAParams extends Params with 
HasFeaturesCol with HasM
   @Since("1.6.0")
   final val optimizer = new Param[String](this, "optimizer", "Optimizer or 
inference" +
     " algorithm used to estimate the LDA model. Supported: " + 
supportedOptimizers.mkString(", "),
-    (o: String) => 
ParamValidators.inArray(supportedOptimizers).apply(o.toLowerCase))
+    (o: String) =>
+      
ParamValidators.inArray(supportedOptimizers).apply(o.toLowerCase(Locale.ROOT)))
 
   /** @group getParam */
   @Since("1.6.0")

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala
index c49416b..4bd4aa7 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.ml.r
 
+import java.util.Locale
+
 import org.apache.hadoop.fs.Path
 import org.json4s._
 import org.json4s.JsonDSL._
@@ -91,7 +93,7 @@ private[r] object GeneralizedLinearRegressionWrapper
       .setRegParam(regParam)
       .setFeaturesCol(rFormula.getFeaturesCol)
     // set variancePower and linkPower if family is tweedie; otherwise, set 
link function
-    if (family.toLowerCase == "tweedie") {
+    if (family.toLowerCase(Locale.ROOT) == "tweedie") {
       glr.setVariancePower(variancePower).setLinkPower(linkPower)
     } else {
       glr.setLink(link)
@@ -151,7 +153,7 @@ private[r] object GeneralizedLinearRegressionWrapper
     val rDeviance: Double = summary.deviance
     val rResidualDegreeOfFreedomNull: Long = 
summary.residualDegreeOfFreedomNull
     val rResidualDegreeOfFreedom: Long = summary.residualDegreeOfFreedom
-    val rAic: Double = if (family.toLowerCase == "tweedie" &&
+    val rAic: Double = if (family.toLowerCase(Locale.ROOT) == "tweedie" &&
       !Array(0.0, 1.0, 2.0).exists(x => math.abs(x - variancePower) < 1e-8)) {
       0.0
     } else {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala 
b/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
index 60dd736..a20ef72 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala
@@ -19,6 +19,7 @@ package org.apache.spark.ml.recommendation
 
 import java.{util => ju}
 import java.io.IOException
+import java.util.Locale
 
 import scala.collection.mutable
 import scala.reflect.ClassTag
@@ -40,8 +41,7 @@ import org.apache.spark.ml.util._
 import org.apache.spark.mllib.linalg.CholeskyDecomposition
 import org.apache.spark.mllib.optimization.NNLS
 import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.{DataFrame, Dataset, Row}
-import org.apache.spark.sql.catalyst.encoders.RowEncoder
+import org.apache.spark.sql.{DataFrame, Dataset}
 import org.apache.spark.sql.functions._
 import org.apache.spark.sql.types._
 import org.apache.spark.storage.StorageLevel
@@ -118,10 +118,11 @@ private[recommendation] trait ALSModelParams extends 
Params with HasPredictionCo
     "useful in cross-validation or production scenarios, for handling 
user/item ids the model " +
     "has not seen in the training data. Supported values: " +
     s"${ALSModel.supportedColdStartStrategies.mkString(",")}.",
-    (s: String) => 
ALSModel.supportedColdStartStrategies.contains(s.toLowerCase))
+    (s: String) =>
+      
ALSModel.supportedColdStartStrategies.contains(s.toLowerCase(Locale.ROOT)))
 
   /** @group expertGetParam */
-  def getColdStartStrategy: String = $(coldStartStrategy).toLowerCase
+  def getColdStartStrategy: String = 
$(coldStartStrategy).toLowerCase(Locale.ROOT)
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
index 3be8b53..33137b0 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.ml.regression
 
+import java.util.Locale
+
 import breeze.stats.{distributions => dist}
 import org.apache.hadoop.fs.Path
 
@@ -57,7 +59,7 @@ private[regression] trait GeneralizedLinearRegressionBase 
extends PredictorParam
   final val family: Param[String] = new Param(this, "family",
     "The name of family which is a description of the error distribution to be 
used in the " +
       s"model. Supported options: ${supportedFamilyNames.mkString(", ")}.",
-    (value: String) => supportedFamilyNames.contains(value.toLowerCase))
+    (value: String) => 
supportedFamilyNames.contains(value.toLowerCase(Locale.ROOT)))
 
   /** @group getParam */
   @Since("2.0.0")
@@ -99,7 +101,7 @@ private[regression] trait GeneralizedLinearRegressionBase 
extends PredictorParam
   final val link: Param[String] = new Param(this, "link", "The name of link 
function " +
     "which provides the relationship between the linear predictor and the mean 
of the " +
     s"distribution function. Supported options: 
${supportedLinkNames.mkString(", ")}",
-    (value: String) => supportedLinkNames.contains(value.toLowerCase))
+    (value: String) => 
supportedLinkNames.contains(value.toLowerCase(Locale.ROOT)))
 
   /** @group getParam */
   @Since("2.0.0")
@@ -148,7 +150,7 @@ private[regression] trait GeneralizedLinearRegressionBase 
extends PredictorParam
       schema: StructType,
       fitting: Boolean,
       featuresDataType: DataType): StructType = {
-    if ($(family).toLowerCase == "tweedie") {
+    if ($(family).toLowerCase(Locale.ROOT) == "tweedie") {
       if (isSet(link)) {
         logWarning("When family is tweedie, use param linkPower to specify 
link function. " +
           "Setting param link will take no effect.")
@@ -460,13 +462,15 @@ object GeneralizedLinearRegression extends 
DefaultParamsReadable[GeneralizedLine
      */
     def apply(params: GeneralizedLinearRegressionBase): FamilyAndLink = {
       val familyObj = Family.fromParams(params)
-      val linkObj = if ((params.getFamily.toLowerCase != "tweedie" &&
-        params.isSet(params.link)) || (params.getFamily.toLowerCase == 
"tweedie" &&
-        params.isSet(params.linkPower))) {
-        Link.fromParams(params)
-      } else {
-        familyObj.defaultLink
-      }
+      val linkObj =
+        if ((params.getFamily.toLowerCase(Locale.ROOT) != "tweedie" &&
+              params.isSet(params.link)) ||
+            (params.getFamily.toLowerCase(Locale.ROOT) == "tweedie" &&
+              params.isSet(params.linkPower))) {
+          Link.fromParams(params)
+        } else {
+          familyObj.defaultLink
+        }
       new FamilyAndLink(familyObj, linkObj)
     }
   }
@@ -519,7 +523,7 @@ object GeneralizedLinearRegression extends 
DefaultParamsReadable[GeneralizedLine
      * @param params the parameter map containing family name and variance 
power
      */
     def fromParams(params: GeneralizedLinearRegressionBase): Family = {
-      params.getFamily.toLowerCase match {
+      params.getFamily.toLowerCase(Locale.ROOT) match {
         case Gaussian.name => Gaussian
         case Binomial.name => Binomial
         case Poisson.name => Poisson
@@ -795,7 +799,7 @@ object GeneralizedLinearRegression extends 
DefaultParamsReadable[GeneralizedLine
      * @param params the parameter map containing family, link and linkPower
      */
     def fromParams(params: GeneralizedLinearRegressionBase): Link = {
-      if (params.getFamily.toLowerCase == "tweedie") {
+      if (params.getFamily.toLowerCase(Locale.ROOT) == "tweedie") {
         params.getLinkPower match {
           case 0.0 => Log
           case 1.0 => Identity
@@ -804,7 +808,7 @@ object GeneralizedLinearRegression extends 
DefaultParamsReadable[GeneralizedLine
           case others => new Power(others)
         }
       } else {
-        params.getLink.toLowerCase match {
+        params.getLink.toLowerCase(Locale.ROOT) match {
           case Identity.name => Identity
           case Logit.name => Logit
           case Log.name => Log
@@ -1253,8 +1257,8 @@ class GeneralizedLinearRegressionSummary 
private[regression] (
    */
   @Since("2.0.0")
   lazy val dispersion: Double = if (
-    model.getFamily.toLowerCase == Binomial.name ||
-      model.getFamily.toLowerCase == Poisson.name) {
+    model.getFamily.toLowerCase(Locale.ROOT) == Binomial.name ||
+      model.getFamily.toLowerCase(Locale.ROOT) == Poisson.name) {
     1.0
   } else {
     val rss = pearsonResiduals.agg(sum(pow(col("pearsonResiduals"), 
2.0))).first().getDouble(0)
@@ -1357,8 +1361,8 @@ class GeneralizedLinearRegressionTrainingSummary 
private[regression] (
   @Since("2.0.0")
   lazy val pValues: Array[Double] = {
     if (isNormalSolver) {
-      if (model.getFamily.toLowerCase == Binomial.name ||
-        model.getFamily.toLowerCase == Poisson.name) {
+      if (model.getFamily.toLowerCase(Locale.ROOT) == Binomial.name ||
+        model.getFamily.toLowerCase(Locale.ROOT) == Poisson.name) {
         tValues.map { x => 2.0 * (1.0 - dist.Gaussian(0.0, 
1.0).cdf(math.abs(x))) }
       } else {
         tValues.map { x =>

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala 
b/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala
index 5eb707d..cd1950b 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.ml.tree
 
+import java.util.Locale
+
 import scala.util.Try
 
 import org.apache.spark.ml.PredictorParams
@@ -218,7 +220,8 @@ private[ml] trait TreeClassifierParams extends Params {
   final val impurity: Param[String] = new Param[String](this, "impurity", 
"Criterion used for" +
     " information gain calculation (case-insensitive). Supported options:" +
     s" ${TreeClassifierParams.supportedImpurities.mkString(", ")}",
-    (value: String) => 
TreeClassifierParams.supportedImpurities.contains(value.toLowerCase))
+    (value: String) =>
+      
TreeClassifierParams.supportedImpurities.contains(value.toLowerCase(Locale.ROOT)))
 
   setDefault(impurity -> "gini")
 
@@ -230,7 +233,7 @@ private[ml] trait TreeClassifierParams extends Params {
   def setImpurity(value: String): this.type = set(impurity, value)
 
   /** @group getParam */
-  final def getImpurity: String = $(impurity).toLowerCase
+  final def getImpurity: String = $(impurity).toLowerCase(Locale.ROOT)
 
   /** Convert new impurity to old impurity. */
   private[ml] def getOldImpurity: OldImpurity = {
@@ -247,7 +250,8 @@ private[ml] trait TreeClassifierParams extends Params {
 
 private[ml] object TreeClassifierParams {
   // These options should be lowercase.
-  final val supportedImpurities: Array[String] = Array("entropy", 
"gini").map(_.toLowerCase)
+  final val supportedImpurities: Array[String] =
+    Array("entropy", "gini").map(_.toLowerCase(Locale.ROOT))
 }
 
 private[ml] trait DecisionTreeClassifierParams
@@ -267,7 +271,8 @@ private[ml] trait TreeRegressorParams extends Params {
   final val impurity: Param[String] = new Param[String](this, "impurity", 
"Criterion used for" +
     " information gain calculation (case-insensitive). Supported options:" +
     s" ${TreeRegressorParams.supportedImpurities.mkString(", ")}",
-    (value: String) => 
TreeRegressorParams.supportedImpurities.contains(value.toLowerCase))
+    (value: String) =>
+      
TreeRegressorParams.supportedImpurities.contains(value.toLowerCase(Locale.ROOT)))
 
   setDefault(impurity -> "variance")
 
@@ -279,7 +284,7 @@ private[ml] trait TreeRegressorParams extends Params {
   def setImpurity(value: String): this.type = set(impurity, value)
 
   /** @group getParam */
-  final def getImpurity: String = $(impurity).toLowerCase
+  final def getImpurity: String = $(impurity).toLowerCase(Locale.ROOT)
 
   /** Convert new impurity to old impurity. */
   private[ml] def getOldImpurity: OldImpurity = {
@@ -295,7 +300,8 @@ private[ml] trait TreeRegressorParams extends Params {
 
 private[ml] object TreeRegressorParams {
   // These options should be lowercase.
-  final val supportedImpurities: Array[String] = 
Array("variance").map(_.toLowerCase)
+  final val supportedImpurities: Array[String] =
+    Array("variance").map(_.toLowerCase(Locale.ROOT))
 }
 
 private[ml] trait DecisionTreeRegressorParams extends DecisionTreeParams
@@ -417,7 +423,8 @@ private[ml] trait RandomForestParams extends 
TreeEnsembleParams {
       s" Supported options: 
${RandomForestParams.supportedFeatureSubsetStrategies.mkString(", ")}" +
       s", (0.0-1.0], [1-n].",
     (value: String) =>
-      
RandomForestParams.supportedFeatureSubsetStrategies.contains(value.toLowerCase)
+      RandomForestParams.supportedFeatureSubsetStrategies.contains(
+        value.toLowerCase(Locale.ROOT))
       || Try(value.toInt).filter(_ > 0).isSuccess
       || Try(value.toDouble).filter(_ > 0).filter(_ <= 1.0).isSuccess)
 
@@ -431,13 +438,13 @@ private[ml] trait RandomForestParams extends 
TreeEnsembleParams {
   def setFeatureSubsetStrategy(value: String): this.type = 
set(featureSubsetStrategy, value)
 
   /** @group getParam */
-  final def getFeatureSubsetStrategy: String = 
$(featureSubsetStrategy).toLowerCase
+  final def getFeatureSubsetStrategy: String = 
$(featureSubsetStrategy).toLowerCase(Locale.ROOT)
 }
 
 private[spark] object RandomForestParams {
   // These options should be lowercase.
   final val supportedFeatureSubsetStrategies: Array[String] =
-    Array("auto", "all", "onethird", "sqrt", "log2").map(_.toLowerCase)
+    Array("auto", "all", "onethird", "sqrt", 
"log2").map(_.toLowerCase(Locale.ROOT))
 }
 
 private[ml] trait RandomForestClassifierParams
@@ -509,7 +516,8 @@ private[ml] trait GBTParams extends TreeEnsembleParams with 
HasMaxIter {
 private[ml] object GBTClassifierParams {
   // The losses below should be lowercase.
   /** Accessor for supported loss settings: logistic */
-  final val supportedLossTypes: Array[String] = 
Array("logistic").map(_.toLowerCase)
+  final val supportedLossTypes: Array[String] =
+    Array("logistic").map(_.toLowerCase(Locale.ROOT))
 }
 
 private[ml] trait GBTClassifierParams extends GBTParams with 
TreeClassifierParams {
@@ -523,12 +531,13 @@ private[ml] trait GBTClassifierParams extends GBTParams 
with TreeClassifierParam
   val lossType: Param[String] = new Param[String](this, "lossType", "Loss 
function which GBT" +
     " tries to minimize (case-insensitive). Supported options:" +
     s" ${GBTClassifierParams.supportedLossTypes.mkString(", ")}",
-    (value: String) => 
GBTClassifierParams.supportedLossTypes.contains(value.toLowerCase))
+    (value: String) =>
+      
GBTClassifierParams.supportedLossTypes.contains(value.toLowerCase(Locale.ROOT)))
 
   setDefault(lossType -> "logistic")
 
   /** @group getParam */
-  def getLossType: String = $(lossType).toLowerCase
+  def getLossType: String = $(lossType).toLowerCase(Locale.ROOT)
 
   /** (private[ml]) Convert new loss to old loss. */
   override private[ml] def getOldLossType: OldClassificationLoss = {
@@ -544,7 +553,8 @@ private[ml] trait GBTClassifierParams extends GBTParams 
with TreeClassifierParam
 private[ml] object GBTRegressorParams {
   // The losses below should be lowercase.
   /** Accessor for supported loss settings: squared (L2), absolute (L1) */
-  final val supportedLossTypes: Array[String] = Array("squared", 
"absolute").map(_.toLowerCase)
+  final val supportedLossTypes: Array[String] =
+    Array("squared", "absolute").map(_.toLowerCase(Locale.ROOT))
 }
 
 private[ml] trait GBTRegressorParams extends GBTParams with 
TreeRegressorParams {
@@ -558,12 +568,13 @@ private[ml] trait GBTRegressorParams extends GBTParams 
with TreeRegressorParams
   val lossType: Param[String] = new Param[String](this, "lossType", "Loss 
function which GBT" +
     " tries to minimize (case-insensitive). Supported options:" +
     s" ${GBTRegressorParams.supportedLossTypes.mkString(", ")}",
-    (value: String) => 
GBTRegressorParams.supportedLossTypes.contains(value.toLowerCase))
+    (value: String) =>
+      
GBTRegressorParams.supportedLossTypes.contains(value.toLowerCase(Locale.ROOT)))
 
   setDefault(lossType -> "squared")
 
   /** @group getParam */
-  def getLossType: String = $(lossType).toLowerCase
+  def getLossType: String = $(lossType).toLowerCase(Locale.ROOT)
 
   /** (private[ml]) Convert new loss to old loss. */
   override private[ml] def getOldLossType: OldLoss = {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala
index 6c5f529..4aa6472 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.mllib.clustering
 
+import java.util.Locale
+
 import breeze.linalg.{DenseVector => BDV}
 
 import org.apache.spark.annotation.{DeveloperApi, Since}
@@ -306,7 +308,7 @@ class LDA private (
   @Since("1.4.0")
   def setOptimizer(optimizerName: String): this.type = {
     this.ldaOptimizer =
-      optimizerName.toLowerCase match {
+      optimizerName.toLowerCase(Locale.ROOT) match {
         case "em" => new EMLDAOptimizer
         case "online" => new OnlineLDAOptimizer
         case other =>

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala
index 98a3021..4c77468 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.mllib.tree.impurity
 
+import java.util.Locale
+
 import org.apache.spark.annotation.{DeveloperApi, Since}
 
 /**
@@ -184,7 +186,7 @@ private[spark] object ImpurityCalculator {
    * the given stats.
    */
   def getCalculator(impurity: String, stats: Array[Double]): 
ImpurityCalculator = {
-    impurity.toLowerCase match {
+    impurity.toLowerCase(Locale.ROOT) match {
       case "gini" => new GiniCalculator(stats)
       case "entropy" => new EntropyCalculator(stats)
       case "variance" => new VarianceCalculator(stats)

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/repl/scala-2.11/src/main/scala/org/apache/spark/repl/Main.scala
----------------------------------------------------------------------
diff --git a/repl/scala-2.11/src/main/scala/org/apache/spark/repl/Main.scala 
b/repl/scala-2.11/src/main/scala/org/apache/spark/repl/Main.scala
index 7f2ec01..39fc621 100644
--- a/repl/scala-2.11/src/main/scala/org/apache/spark/repl/Main.scala
+++ b/repl/scala-2.11/src/main/scala/org/apache/spark/repl/Main.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.repl
 
 import java.io.File
+import java.util.Locale
 
 import scala.tools.nsc.GenericRunnerSettings
 
@@ -88,7 +89,7 @@ object Main extends Logging {
     }
 
     val builder = SparkSession.builder.config(conf)
-    if (conf.get(CATALOG_IMPLEMENTATION.key, "hive").toLowerCase == "hive") {
+    if (conf.get(CATALOG_IMPLEMENTATION.key, "hive").toLowerCase(Locale.ROOT) 
== "hive") {
       if (SparkSession.hiveClassesArePresent) {
         // In the case that the property is not set at all, builder's config
         // does not have this value set to 'hive' yet. The original default

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
----------------------------------------------------------------------
diff --git 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
index 3218d22..424bbca 100644
--- 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
+++ 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
@@ -21,7 +21,7 @@ import java.io.{File, FileOutputStream, IOException, 
OutputStreamWriter}
 import java.net.{InetAddress, UnknownHostException, URI}
 import java.nio.ByteBuffer
 import java.nio.charset.StandardCharsets
-import java.util.{Properties, UUID}
+import java.util.{Locale, Properties, UUID}
 import java.util.zip.{ZipEntry, ZipOutputStream}
 
 import scala.collection.JavaConverters._
@@ -532,7 +532,7 @@ private[spark] class Client(
           try {
             jarsStream.setLevel(0)
             jarsDir.listFiles().foreach { f =>
-              if (f.isFile && f.getName.toLowerCase().endsWith(".jar") && 
f.canRead) {
+              if (f.isFile && 
f.getName.toLowerCase(Locale.ROOT).endsWith(".jar") && f.canRead) {
                 jarsStream.putNextEntry(new ZipEntry(f.getName))
                 Files.copy(f, jarsStream)
                 jarsStream.closeEntry()

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveHints.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveHints.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveHints.scala
index f8004ca..c4827b8 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveHints.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveHints.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.sql.catalyst.analysis
 
+import java.util.Locale
+
 import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.rules.Rule
 import org.apache.spark.sql.catalyst.trees.CurrentOrigin
@@ -83,7 +85,7 @@ object ResolveHints {
     }
 
     def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
-      case h: Hint if BROADCAST_HINT_NAMES.contains(h.name.toUpperCase) =>
+      case h: Hint if 
BROADCAST_HINT_NAMES.contains(h.name.toUpperCase(Locale.ROOT)) =>
         applyBroadcastHint(h.child, h.parameters.toSet)
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogUtils.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogUtils.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogUtils.scala
index 254eedf..3ca9e6a 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogUtils.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogUtils.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.sql.catalyst.catalog
 
 import java.net.URI
+import java.util.Locale
 
 import org.apache.hadoop.fs.Path
 import org.apache.hadoop.util.Shell
@@ -167,8 +168,10 @@ object CatalogUtils {
    */
   def maskCredentials(options: Map[String, String]): Map[String, String] = {
     options.map {
-      case (key, _) if key.toLowerCase == "password" => (key, "###")
-      case (key, value) if key.toLowerCase == "url" && 
value.toLowerCase.contains("password") =>
+      case (key, _) if key.toLowerCase(Locale.ROOT) == "password" => (key, 
"###")
+      case (key, value)
+        if key.toLowerCase(Locale.ROOT) == "url" &&
+          value.toLowerCase(Locale.ROOT).contains("password") =>
         (key, "###")
       case o => o
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
index 6f8c6ee..faedf5f 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.sql.catalyst.catalog
 
 import java.net.URI
+import java.util.Locale
 import javax.annotation.concurrent.GuardedBy
 
 import scala.collection.mutable
@@ -1098,7 +1099,7 @@ class SessionCatalog(
     name.database.isEmpty &&
       functionRegistry.functionExists(name.funcName) &&
       !FunctionRegistry.builtin.functionExists(name.funcName) &&
-      !hiveFunctions.contains(name.funcName.toLowerCase)
+      !hiveFunctions.contains(name.funcName.toLowerCase(Locale.ROOT))
   }
 
   protected def failFunctionLookup(name: String): Nothing = {

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/functionResources.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/functionResources.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/functionResources.scala
index 8e46b96..67bf2d0 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/functionResources.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/functionResources.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.sql.catalyst.catalog
 
+import java.util.Locale
+
 import org.apache.spark.sql.AnalysisException
 
 /** A trait that represents the type of a resourced needed by a function. */
@@ -33,7 +35,7 @@ object ArchiveResource extends FunctionResourceType("archive")
 
 object FunctionResourceType {
   def fromString(resourceType: String): FunctionResourceType = {
-    resourceType.toLowerCase match {
+    resourceType.toLowerCase(Locale.ROOT) match {
       case "jar" => JarResource
       case "file" => FileResource
       case "archive" => ArchiveResource

http://git-wip-us.apache.org/repos/asf/spark/blob/a26e3ed5/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
index 1db26d9..b847ef7 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.sql.catalyst.expressions
 
+import java.util.Locale
+
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
 import org.apache.spark.sql.catalyst.expressions.codegen._
@@ -184,7 +186,7 @@ abstract class Expression extends TreeNode[Expression] {
    * Returns a user-facing string representation of this expression's name.
    * This should usually match the name of the function in SQL.
    */
-  def prettyName: String = nodeName.toLowerCase
+  def prettyName: String = nodeName.toLowerCase(Locale.ROOT)
 
   protected def flatArguments: Iterator[Any] = productIterator.flatMap {
     case t: Traversable[_] => t


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to