Repository: spark
Updated Branches:
  refs/heads/branch-1.6 efa1e4a25 -> 52e921c7c


[SPARK-11561][SQL] Rename text data source's column name to value.

Author: Reynold Xin <r...@databricks.com>

Closes #9527 from rxin/SPARK-11561.

(cherry picked from commit 3a652f691b220fada0286f8d0a562c5657973d4d)
Signed-off-by: Reynold Xin <r...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/52e921c7
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/52e921c7
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/52e921c7

Branch: refs/heads/branch-1.6
Commit: 52e921c7cc6ba6dddb923221c42f52eb4cd98f0f
Parents: efa1e4a
Author: Reynold Xin <r...@databricks.com>
Authored: Fri Nov 6 14:47:41 2015 -0800
Committer: Reynold Xin <r...@databricks.com>
Committed: Fri Nov 6 14:47:49 2015 -0800

----------------------------------------------------------------------
 .../spark/sql/execution/datasources/text/DefaultSource.scala   | 6 ++----
 .../spark/sql/execution/datasources/text/TextSuite.scala       | 2 +-
 2 files changed, 3 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/52e921c7/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/DefaultSource.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/DefaultSource.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/DefaultSource.scala
index 52c4421..4b8b8e4 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/DefaultSource.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/DefaultSource.scala
@@ -30,14 +30,12 @@ import org.apache.spark.deploy.SparkHadoopUtil
 import org.apache.spark.mapred.SparkHadoopMapRedUtil
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.catalyst.expressions.{UnsafeRow, GenericMutableRow}
+import org.apache.spark.sql.catalyst.expressions.UnsafeRow
 import org.apache.spark.sql.catalyst.expressions.codegen.{UnsafeRowWriter, 
BufferHolder}
-import org.apache.spark.sql.columnar.MutableUnsafeRow
 import org.apache.spark.sql.{AnalysisException, Row, SQLContext}
 import org.apache.spark.sql.execution.datasources.PartitionSpec
 import org.apache.spark.sql.sources._
 import org.apache.spark.sql.types.{StringType, StructType}
-import org.apache.spark.unsafe.types.UTF8String
 import org.apache.spark.util.SerializableConfiguration
 
 /**
@@ -78,7 +76,7 @@ private[sql] class TextRelation(
   extends HadoopFsRelation(maybePartitionSpec) {
 
   /** Data schema is always a single column, named "text". */
-  override def dataSchema: StructType = new StructType().add("text", 
StringType)
+  override def dataSchema: StructType = new StructType().add("value", 
StringType)
 
   /** This is an internal data source that outputs internal row format. */
   override val needConversion: Boolean = false

http://git-wip-us.apache.org/repos/asf/spark/blob/52e921c7/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala
index 0a2306c..914e516 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala
@@ -65,7 +65,7 @@ class TextSuite extends QueryTest with SharedSQLContext {
   /** Verifies data and schema. */
   private def verifyFrame(df: DataFrame): Unit = {
     // schema
-    assert(df.schema == new StructType().add("text", StringType))
+    assert(df.schema == new StructType().add("value", StringType))
 
     // verify content
     val data = df.collect()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to