[ https://issues.apache.org/jira/browse/SPARK-10741?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Ian updated SPARK-10741: ------------------------ Description: Failed Query with Having Clause {code} def testParquetHaving() { val ddl = """CREATE TABLE IF NOT EXISTS test ( c1 string, c2 int ) STORED AS PARQUET""" val failedHaving = """ SELECT c1, avg ( c2 ) as c_avg | FROM test | GROUP BY c1 | HAVING ( avg ( c2 ) > 5) ORDER BY c1""".stripMargin TestHive.sql(ddl) TestHive.sql(failedHaving).collect } {code} org.apache.spark.sql.AnalysisException: resolved attribute(s) c2#16 missing from c1#17,c2#18 in operator !Aggregate [c1#17], [cast((avg(cast(c2#16 as bigint)) > cast(5 as double)) as boolean) AS havingCondition#12,c1#17,avg(cast(c2#18 as bigint)) AS c_avg#9]; at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$class.failAnalysis(CheckAnalysis.scala:37) at org.apache.spark.sql.catalyst.analysis.Analyzer.failAnalysis(Analyzer.scala:44) at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:154) at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:49) was: Failed Query with Having Clause def testParquetHaving() { val ddl = """CREATE TABLE IF NOT EXISTS test ( c1 string, c2 int ) STORED AS PARQUET""" val failedHaving = """ SELECT c1, avg ( c2 ) as c_avg | FROM test | GROUP BY c1 | HAVING ( avg ( c2 ) > 5) ORDER BY c1""".stripMargin TestHive.sql(ddl) TestHive.sql(failedHaving).collect } org.apache.spark.sql.AnalysisException: resolved attribute(s) c2#16 missing from c1#17,c2#18 in operator !Aggregate [c1#17], [cast((avg(cast(c2#16 as bigint)) > cast(5 as double)) as boolean) AS havingCondition#12,c1#17,avg(cast(c2#18 as bigint)) AS c_avg#9]; at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$class.failAnalysis(CheckAnalysis.scala:37) at org.apache.spark.sql.catalyst.analysis.Analyzer.failAnalysis(Analyzer.scala:44) at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:154) at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:49) > Hive Query Having/OrderBy against Parquet table is not working > --------------------------------------------------------------- > > Key: SPARK-10741 > URL: https://issues.apache.org/jira/browse/SPARK-10741 > Project: Spark > Issue Type: Bug > Components: SQL > Affects Versions: 1.5.0 > Reporter: Ian > > Failed Query with Having Clause > {code} > def testParquetHaving() { > val ddl = > """CREATE TABLE IF NOT EXISTS test ( c1 string, c2 int ) STORED AS > PARQUET""" > val failedHaving = > """ SELECT c1, avg ( c2 ) as c_avg > | FROM test > | GROUP BY c1 > | HAVING ( avg ( c2 ) > 5) ORDER BY c1""".stripMargin > TestHive.sql(ddl) > TestHive.sql(failedHaving).collect > } > {code} > org.apache.spark.sql.AnalysisException: resolved attribute(s) c2#16 missing > from c1#17,c2#18 in operator !Aggregate [c1#17], [cast((avg(cast(c2#16 as > bigint)) > cast(5 as double)) as boolean) AS > havingCondition#12,c1#17,avg(cast(c2#18 as bigint)) AS c_avg#9]; > at > org.apache.spark.sql.catalyst.analysis.CheckAnalysis$class.failAnalysis(CheckAnalysis.scala:37) > at > org.apache.spark.sql.catalyst.analysis.Analyzer.failAnalysis(Analyzer.scala:44) > at > org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:154) > at > org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:49) -- This message was sent by Atlassian JIRA (v6.3.4#6332) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org