val sqlContext = new org.apache.spark.sql.SQLContext(sc)
val personPath = "/hdd/spark/person.json"
val person = sqlContext.jsonFile(personPath)
person.printSchema()
person.registerTempTable("person")
val addressPath = "/hdd/spark/address.json"
val address = sqlContext.jsonFile(addressPath)
address.printSchema()
address.registerTempTable("address")
sqlContext.cacheTable("person")
sqlContext.cacheTable("address")
val rs2 = sqlContext.sql("SELECT p.id, p.name, a.city FROM person p, address
a where p.id = a.id limit 10").collect.foreach(println)

person.json
{"id:"1","name":"Mr. X"}

address.json
{"city:"Earth","id":"1"}



--
View this message in context: 
http://apache-spark-user-list.1001560.n3.nabble.com/spark-sql-join-sql-fails-after-sqlCtx-cacheTable-tp16893p16914.html
Sent from the Apache Spark User List mailing list archive at Nabble.com.

---------------------------------------------------------------------
To unsubscribe, e-mail: user-unsubscr...@spark.apache.org
For additional commands, e-mail: user-h...@spark.apache.org

Reply via email to