[ 
https://issues.apache.org/jira/browse/SPARK-19186?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Hyukjin Kwon resolved SPARK-19186.
----------------------------------
    Resolution: Not A Problem

^ I agree with this. Also, up to my knowledge, we can deal with the dialect in 
favour of SPARK-17614, assuming the exception came from 
https://github.com/apache/spark/blob/branch-2.1/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala#L60-L62
 within Spark.

I am resolving this per the issue described in this JIRA. Please reopen this if 
I misunderstood.

> Hash symbol in middle of Sybase database table name causes Spark Exception
> --------------------------------------------------------------------------
>
>                 Key: SPARK-19186
>                 URL: https://issues.apache.org/jira/browse/SPARK-19186
>             Project: Spark
>          Issue Type: Bug
>    Affects Versions: 2.1.0
>            Reporter: Adrian Schulewitz
>            Priority: Minor
>
> If I use a table name without a '#' symbol in the middle then no exception 
> occurs but with one an exception is thrown. According to Sybase 15 
> documentation a '#' is a legal character.
>     val testSql = "SELECT * FROM CTP#ADR_TYPE_DBF"
>     val conf = new SparkConf().setAppName("MUREX DMart Simple Reader via 
> SQL").setMaster("local[2]")
>     val sess = SparkSession
>                   .builder()
>                   .appName("MUREX DMart Simple SQL Reader")
>                   .config(conf)
>                   .getOrCreate()
>     import sess.implicits._
>     val df = sess.read
>                     .format("jdbc")
>                     .option("url", 
> "jdbc:jtds:sybase://auq7064s.unix.anz:4020/mxdmart56")
>                     .option("driver", "net.sourceforge.jtds.jdbc.Driver")
>                     .option("dbtable", "CTP#ADR_TYPE_DBF")
>                     .option("UDT_DEALCRD_REP", "mxdmart56")
>                     .option("user", "INSTAL")
>                     .option("password", "INSTALL")
>                     .load()
>     df.createOrReplaceTempView("trades")
>     val resultsDF = sess.sql(testSql)
>     resultsDF.show()
> 17/01/12 14:30:01 INFO SharedState: Warehouse path is 
> 'file:/C:/DEVELOPMENT/Projects/MUREX/trunk/murex-eom-reporting/spark-warehouse/'.
> 17/01/12 14:30:04 INFO SparkSqlParser: Parsing command: trades
> 17/01/12 14:30:04 INFO SparkSqlParser: Parsing command: SELECT * FROM 
> CTP#ADR_TYPE_DBF
> Exception in thread "main" 
> org.apache.spark.sql.catalyst.parser.ParseException: 
> extraneous input '#' expecting {<EOF>, ',', 'SELECT', 'FROM', 'ADD', 'AS', 
> 'ALL', 'DISTINCT', 'WHERE', 'GROUP', 'BY', 'GROUPING', 'SETS', 'CUBE', 
> 'ROLLUP', 'ORDER', 'HAVING', 'LIMIT', 'AT', 'OR', 'AND', 'IN', NOT, 'NO', 
> 'EXISTS', 'BETWEEN', 'LIKE', RLIKE, 'IS', 'NULL', 'TRUE', 'FALSE', 'NULLS', 
> 'ASC', 'DESC', 'FOR', 'INTERVAL', 'CASE', 'WHEN', 'THEN', 'ELSE', 'END', 
> 'JOIN', 'CROSS', 'OUTER', 'INNER', 'LEFT', 'RIGHT', 'FULL', 'NATURAL', 
> 'LATERAL', 'WINDOW', 'OVER', 'PARTITION', 'RANGE', 'ROWS', 'UNBOUNDED', 
> 'PRECEDING', 'FOLLOWING', 'CURRENT', 'FIRST', 'LAST', 'ROW', 'WITH', 
> 'VALUES', 'CREATE', 'TABLE', 'VIEW', 'REPLACE', 'INSERT', 'DELETE', 'INTO', 
> 'DESCRIBE', 'EXPLAIN', 'FORMAT', 'LOGICAL', 'CODEGEN', 'CAST', 'SHOW', 
> 'TABLES', 'COLUMNS', 'COLUMN', 'USE', 'PARTITIONS', 'FUNCTIONS', 'DROP', 
> 'UNION', 'EXCEPT', 'MINUS', 'INTERSECT', 'TO', 'TABLESAMPLE', 'STRATIFY', 
> 'ALTER', 'RENAME', 'ARRAY', 'MAP', 'STRUCT', 'COMMENT', 'SET', 'RESET', 
> 'DATA', 'START', 'TRANSACTION', 'COMMIT', 'ROLLBACK', 'MACRO', 'IF', 'DIV', 
> 'PERCENT', 'BUCKET', 'OUT', 'OF', 'SORT', 'CLUSTER', 'DISTRIBUTE', 
> 'OVERWRITE', 'TRANSFORM', 'REDUCE', 'USING', 'SERDE', 'SERDEPROPERTIES', 
> 'RECORDREADER', 'RECORDWRITER', 'DELIMITED', 'FIELDS', 'TERMINATED', 
> 'COLLECTION', 'ITEMS', 'KEYS', 'ESCAPED', 'LINES', 'SEPARATED', 'FUNCTION', 
> 'EXTENDED', 'REFRESH', 'CLEAR', 'CACHE', 'UNCACHE', 'LAZY', 'FORMATTED', 
> 'GLOBAL', TEMPORARY, 'OPTIONS', 'UNSET', 'TBLPROPERTIES', 'DBPROPERTIES', 
> 'BUCKETS', 'SKEWED', 'STORED', 'DIRECTORIES', 'LOCATION', 'EXCHANGE', 
> 'ARCHIVE', 'UNARCHIVE', 'FILEFORMAT', 'TOUCH', 'COMPACT', 'CONCATENATE', 
> 'CHANGE', 'CASCADE', 'RESTRICT', 'CLUSTERED', 'SORTED', 'PURGE', 
> 'INPUTFORMAT', 'OUTPUTFORMAT', DATABASE, DATABASES, 'DFS', 'TRUNCATE', 
> 'ANALYZE', 'COMPUTE', 'LIST', 'STATISTICS', 'PARTITIONED', 'EXTERNAL', 
> 'DEFINED', 'REVOKE', 'GRANT', 'LOCK', 'UNLOCK', 'MSCK', 'REPAIR', 'RECOVER', 
> 'EXPORT', 'IMPORT', 'LOAD', 'ROLE', 'ROLES', 'COMPACTIONS', 'PRINCIPALS', 
> 'TRANSACTIONS', 'INDEX', 'INDEXES', 'LOCKS', 'OPTION', 'ANTI', 'LOCAL', 
> 'INPATH', 'CURRENT_DATE', 'CURRENT_TIMESTAMP', IDENTIFIER, 
> BACKQUOTED_IDENTIFIER}(line 1, pos 17)
> == SQL ==
> SELECT * FROM CTP#ADR_TYPE_DBF
> -----------------^^^
>       at 
> org.apache.spark.sql.catalyst.parser.ParseException.withCommand(ParseDriver.scala:197)
>       at 
> org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parse(ParseDriver.scala:99)
>       at 
> org.apache.spark.sql.execution.SparkSqlParser.parse(SparkSqlParser.scala:45)
>       at 
> org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parsePlan(ParseDriver.scala:53)
>       at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:592)
>       at 
> com.anz.murex.hcp.poc.hcp.api.MurexDatamartSqlReader$.main(MurexDatamartSqlReader.scala:94)
>       at 
> com.anz.murex.hcp.poc.hcp.api.MurexDatamartSqlReader.main(MurexDatamartSqlReader.scala)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:498)
>       at com.intellij.rt.execution.application.AppMain.main(AppMain.java:147)
> 17/01/12 14:30:04 INFO SparkContext: Invoking stop() from shutdown hook



--
This message was sent by Atlassian JIRA
(v6.3.15#6346)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to