dongjoon-hyun commented on a change in pull request #26290: 
[SPARK-29120][SQL][TESTS] Port create_view.sql
URL: https://github.com/apache/spark/pull/26290#discussion_r340487877
 
 

 ##########
 File path: 
sql/core/src/test/resources/sql-tests/inputs/postgreSQL/create_view.sql
 ##########
 @@ -0,0 +1,778 @@
+-- Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
+--
+-- CREATE VIEW
+-- 
https://github.com/postgres/postgres/blob/REL_12_STABLE/src/test/regress/sql/create_view.sql
+
+-- Spark doesn't support geometric types
+-- CREATE VIEW street AS
+--    SELECT r.name, r.thepath, c.cname AS cname
+--    FROM ONLY road r, real_city c
+--    WHERE c.outline ## r.thepath;
+
+-- Spark doesn't support geometric types
+-- CREATE VIEW iexit AS
+--    SELECT ih.name, ih.thepath,
+--     interpt_pp(ih.thepath, r.thepath) AS exit
+--    FROM ihighway ih, ramp r
+--    WHERE ih.thepath ## r.thepath;
+
+CREATE TABLE emp (
+  name string,
+  age int,
+  -- Spark doesn't support a geometric type `point`
+  -- location point
+  salary int,
+  manager string
+) USING parquet;
+
+CREATE VIEW toyemp AS
+   SELECT name, age, /* location ,*/ 12*salary AS annualsal
+   FROM emp;
+
+-- Spark doesn't support the COMMENT clause that is not defined in the SQL 
standard
+-- Test comments
+-- COMMENT ON VIEW noview IS 'no view';
+-- COMMENT ON VIEW toyemp IS 'is a view';
+-- COMMENT ON VIEW toyemp IS NULL;
+
+DROP VIEW toyemp;
+DROP TABLE emp;
+
+-- These views are left around mainly to exercise special cases in pg_dump.
+
+-- [SPARK-19842] Informational Referential Integrity Constraints Support in 
Spark
+-- CREATE TABLE view_base_table (key int PRIMARY KEY, data varchar(20));
+--
+-- CREATE VIEW key_dependent_view AS
+--    SELECT * FROM view_base_table GROUP BY key;
+--
+-- ALTER TABLE view_base_table DROP CONSTRAINT view_base_table_pkey;  -- fails
+
+-- CREATE VIEW key_dependent_view_no_cols AS
+--    SELECT FROM view_base_table GROUP BY key HAVING length(data) > 0;
+
+--
+-- CREATE OR REPLACE VIEW
+--
+
+CREATE TABLE viewtest_tbl (a int, b int) using parquet;
+-- [SPARK-29386] Copy data between a file and a table
+-- COPY viewtest_tbl FROM stdin;
+-- 5   10
+-- 10  15
+-- 15  20
+-- 20  25
+-- \.
+INSERT INTO viewtest_tbl VALUES (5, 10), (10, 15), (15, 20), (20, 25);
+
+CREATE OR REPLACE VIEW viewtest AS
+       SELECT * FROM viewtest_tbl;
+
+CREATE OR REPLACE VIEW viewtest AS
+       SELECT * FROM viewtest_tbl WHERE a > 10;
+
+SELECT * FROM viewtest;
+
+CREATE OR REPLACE VIEW viewtest AS
+       SELECT a, b FROM viewtest_tbl WHERE a > 5 ORDER BY b DESC;
+
+SELECT * FROM viewtest;
+
+-- should fail
+-- Spark can accept the DDL query below
 
 Review comment:
   Ur, do you now why `PostgreSQL` disallow this DDL query?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to