This is an automated email from the ASF dual-hosted git repository.

sandy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new c6cea73e3cbe [SPARK-53591][SDP] Simplify Pipeline Spec Pattern Glob 
Matching
c6cea73e3cbe is described below

commit c6cea73e3cbeb5d615410d14b4dd097b5c1d87f9
Author: Jacky Wang <[email protected]>
AuthorDate: Tue Sep 23 09:19:37 2025 -0700

    [SPARK-53591][SDP] Simplify Pipeline Spec Pattern Glob Matching
    
    ### What changes were proposed in this pull request?
    
    In SDP, the recommended scaffolding is to put pipeline definition files in 
the `transformations` and any sub folder.
    
    Currently if users have both sql and py pipeline definition files, they 
would need to do something like below to specify all of them in the pipeline 
spec:
    ```yml
    libraries:
      glob:
        include: transformations/*/.py
      glob:
        include: transformations/*/.sql
    ```
    This is cumbersome and requires more work from the user. `transformations` 
should only contain pipeline source files ending in `.py` or `.sql` so ideally, 
users shouldn't even need to specify the file extensions.
    
    PR introduces changes to support the below pattern for source file matching 
and throw exception to discourage user from using the above pattern because 
they shouldn't put other file types in this directory.
    
    ```yml
    libraries:
      glob:
        include: transformations/** # matches recursively
    ```
     
    
    ### Why are the changes needed?
    
    Simplify the user experience of needing to manually supply the glob with 
file extensions.
    
    ### Does this PR introduce _any_ user-facing change?
    
    Yes, but SDP not released.
    
    ### How was this patch tested?
    
    New and existing tests and running CLI manually
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    NO
    
    Closes #52348 from JiaqiWang18/SPARK-53591-restrict-sdp-glob-matching.
    
    Authored-by: Jacky Wang <[email protected]>
    Signed-off-by: Sandy Ryza <[email protected]>
---
 python/pyspark/errors/error-conditions.json        |  5 ++
 python/pyspark/pipelines/cli.py                    | 43 +++++++++++-
 python/pyspark/pipelines/init_cli.py               |  4 +-
 python/pyspark/pipelines/tests/test_cli.py         | 70 +++++++++++++++++++-
 .../sql/connect/pipelines/EndToEndAPISuite.scala   |  9 ++-
 .../apache/spark/sql/pipelines/utils/APITest.scala | 77 +++++++++++-----------
 6 files changed, 162 insertions(+), 46 deletions(-)

diff --git a/python/pyspark/errors/error-conditions.json 
b/python/pyspark/errors/error-conditions.json
index 8d1a5e55dad0..30c6efd4e32a 100644
--- a/python/pyspark/errors/error-conditions.json
+++ b/python/pyspark/errors/error-conditions.json
@@ -901,6 +901,11 @@
       "No pipeline.yaml or pipeline.yml file provided in arguments or found in 
directory `<dir_path>` or readable ancestor directories."
     ]
   },
+  "PIPELINE_SPEC_INVALID_GLOB_PATTERN": {
+    "message": [
+      "Invalid glob pattern `<glob_pattern>` in libraries. Only file paths, or 
folder paths ending with /** are allowed."
+    ]
+  },
   "PIPELINE_SPEC_MISSING_REQUIRED_FIELD": {
     "message": [
       "Pipeline spec missing required field `<field_name>`."
diff --git a/python/pyspark/pipelines/cli.py b/python/pyspark/pipelines/cli.py
index dcfda1959a2e..88fc228f045c 100644
--- a/python/pyspark/pipelines/cli.py
+++ b/python/pyspark/pipelines/cli.py
@@ -23,6 +23,7 @@ Example usage:
 """
 from contextlib import contextmanager
 import argparse
+import glob
 import importlib.util
 import os
 import yaml
@@ -58,6 +59,32 @@ class LibrariesGlob:
     include: str
 
 
+def validate_patch_glob_pattern(glob_pattern: str) -> str:
+    """Validates that a glob pattern is allowed.
+
+    Only allows:
+    - File paths (paths without wildcards except for the filename)
+    - Folder paths ending with /** (recursive directory patterns)
+
+    Disallows complex glob patterns like transformations/**/*.py
+    """
+    # Check if it's a simple file path (no wildcards at all)
+    if not glob.has_magic(glob_pattern):
+        return glob_pattern
+
+    # Check if it's a folder path ending with /**
+    if glob_pattern.endswith("/**"):
+        prefix = glob_pattern[:-3]
+        if not glob.has_magic(prefix):
+            # append "/*" to match everything under the directory recursively
+            return glob_pattern + "/*"
+
+    raise PySparkException(
+        errorClass="PIPELINE_SPEC_INVALID_GLOB_PATTERN",
+        messageParameters={"glob_pattern": glob_pattern},
+    )
+
+
 @dataclass(frozen=True)
 class PipelineSpec:
     """Spec for a pipeline.
@@ -75,6 +102,16 @@ class PipelineSpec:
     configuration: Mapping[str, str]
     libraries: Sequence[LibrariesGlob]
 
+    def __post_init__(self) -> None:
+        """Validate libraries automatically after instantiation."""
+        validated = [
+            LibrariesGlob(validate_patch_glob_pattern(lib.include)) for lib in 
self.libraries
+        ]
+
+        # If normalization changed anything, patch into frozen dataclass
+        if tuple(validated) != tuple(self.libraries):
+            object.__setattr__(self, "libraries", tuple(validated))
+
 
 def find_pipeline_spec(current_dir: Path) -> Path:
     """Looks in the current directory and its ancestors for a pipeline spec 
file."""
@@ -180,7 +217,11 @@ def register_definitions(
             log_with_curr_timestamp(f"Loading definitions. Root directory: 
'{path}'.")
             for libraries_glob in spec.libraries:
                 glob_expression = libraries_glob.include
-                matching_files = [p for p in path.glob(glob_expression) if 
p.is_file()]
+                matching_files = [
+                    p
+                    for p in path.glob(glob_expression)
+                    if p.is_file() and "__pycache__" not in p.parts  # ignore 
generated python cache
+                ]
                 log_with_curr_timestamp(
                     f"Found {len(matching_files)} files matching glob 
'{glob_expression}'"
                 )
diff --git a/python/pyspark/pipelines/init_cli.py 
b/python/pyspark/pipelines/init_cli.py
index 89b998bd4f32..47be703f7795 100644
--- a/python/pyspark/pipelines/init_cli.py
+++ b/python/pyspark/pipelines/init_cli.py
@@ -21,9 +21,7 @@ SPEC = """
 name: {{ name }}
 libraries:
   - glob:
-      include: transformations/**/*.py
-  - glob:
-      include: transformations/**/*.sql
+      include: transformations/**
 """
 
 PYTHON_EXAMPLE = """from pyspark import pipelines as dp
diff --git a/python/pyspark/pipelines/tests/test_cli.py 
b/python/pyspark/pipelines/tests/test_cli.py
index fc238fac1786..fbc6d3a90ac8 100644
--- a/python/pyspark/pipelines/tests/test_cli.py
+++ b/python/pyspark/pipelines/tests/test_cli.py
@@ -240,7 +240,7 @@ class CLIUtilityTests(unittest.TestCase):
             catalog=None,
             database=None,
             configuration={},
-            libraries=[LibrariesGlob(include="subdir1/*")],
+            libraries=[LibrariesGlob(include="subdir1/**")],
         )
         with tempfile.TemporaryDirectory() as temp_dir:
             outer_dir = Path(temp_dir)
@@ -283,7 +283,7 @@ class CLIUtilityTests(unittest.TestCase):
             catalog=None,
             database=None,
             configuration={},
-            libraries=[LibrariesGlob(include="*")],
+            libraries=[LibrariesGlob(include="./**")],
         )
         with tempfile.TemporaryDirectory() as temp_dir:
             outer_dir = Path(temp_dir)
@@ -301,7 +301,7 @@ class CLIUtilityTests(unittest.TestCase):
             catalog=None,
             database=None,
             configuration={},
-            libraries=[LibrariesGlob(include="*")],
+            libraries=[LibrariesGlob(include="./**")],
         )
         with tempfile.TemporaryDirectory() as temp_dir:
             outer_dir = Path(temp_dir)
@@ -451,6 +451,70 @@ class CLIUtilityTests(unittest.TestCase):
         result = parse_table_list("table1, table2 , table3")
         self.assertEqual(result, ["table1", "table2", "table3"])
 
+    def test_valid_glob_patterns(self):
+        """Test that valid glob patterns are accepted."""
+        from pyspark.pipelines.cli import validate_patch_glob_pattern
+
+        cases = {
+            # Simple file paths
+            "src/main.py": "src/main.py",
+            "data/file.sql": "data/file.sql",
+            # Folder paths ending with /** (normalized)
+            "src/**": "src/**/*",
+            "transformations/**": "transformations/**/*",
+            "notebooks/production/**": "notebooks/production/**/*",
+        }
+
+        for pattern, expected in cases.items():
+            with self.subTest(pattern=pattern):
+                self.assertEqual(validate_patch_glob_pattern(pattern), 
expected)
+
+    def test_invalid_glob_patterns(self):
+        """Test that invalid glob patterns are rejected."""
+        from pyspark.pipelines.cli import validate_patch_glob_pattern
+
+        invalid_patterns = [
+            "transformations/**/*.py",
+            "src/**/utils/*.py",
+            "*/main.py",
+            "src/*/test/*.py",
+            "**/*.py",
+            "data/*/file.sql",
+        ]
+
+        for pattern in invalid_patterns:
+            with self.subTest(pattern=pattern):
+                with self.assertRaises(PySparkException) as context:
+                    validate_patch_glob_pattern(pattern)
+                self.assertEqual(
+                    context.exception.getCondition(), 
"PIPELINE_SPEC_INVALID_GLOB_PATTERN"
+                )
+                self.assertEqual(
+                    context.exception.getMessageParameters(), {"glob_pattern": 
pattern}
+                )
+
+    def test_pipeline_spec_with_invalid_glob_pattern(self):
+        """Test that pipeline spec with invalid glob pattern is rejected."""
+        with tempfile.NamedTemporaryFile(mode="w") as tmpfile:
+            tmpfile.write(
+                """
+                {
+                    "name": "test_pipeline",
+                    "libraries": [
+                        {"glob": {"include": "transformations/**/*.py"}}
+                    ]
+                }
+                """
+            )
+            tmpfile.flush()
+            with self.assertRaises(PySparkException) as context:
+                load_pipeline_spec(Path(tmpfile.name))
+            self.assertEqual(context.exception.getCondition(), 
"PIPELINE_SPEC_INVALID_GLOB_PATTERN")
+            self.assertEqual(
+                context.exception.getMessageParameters(),
+                {"glob_pattern": "transformations/**/*.py"},
+            )
+
 
 if __name__ == "__main__":
     try:
diff --git 
a/sql/connect/server/src/test/scala/org/apache/spark/sql/connect/pipelines/EndToEndAPISuite.scala
 
b/sql/connect/server/src/test/scala/org/apache/spark/sql/connect/pipelines/EndToEndAPISuite.scala
index 0901c7ef21c9..923a85cb36f1 100644
--- 
a/sql/connect/server/src/test/scala/org/apache/spark/sql/connect/pipelines/EndToEndAPISuite.scala
+++ 
b/sql/connect/server/src/test/scala/org/apache/spark/sql/connect/pipelines/EndToEndAPISuite.scala
@@ -55,8 +55,13 @@ class EndToEndAPISuite extends PipelineTest with APITest 
with SparkConnectServer
     // Create each source file in the temporary directory
     sources.foreach { file =>
       val filePath = Paths.get(file.name)
-      val fileName = filePath.getFileName.toString
-      val tempFilePath = projectDir.resolve(fileName)
+      val tempFilePath = projectDir.resolve(filePath)
+
+      // Create any necessary parent directories
+      val parentDir = tempFilePath.getParent
+      if (parentDir != null) {
+        Files.createDirectories(parentDir)
+      }
 
       // Create the file with the specified contents
       Files.write(tempFilePath, file.contents.getBytes("UTF-8"))
diff --git 
a/sql/pipelines/src/test/scala/org/apache/spark/sql/pipelines/utils/APITest.scala
 
b/sql/pipelines/src/test/scala/org/apache/spark/sql/pipelines/utils/APITest.scala
index 211deacd9830..efba7aba0a41 100644
--- 
a/sql/pipelines/src/test/scala/org/apache/spark/sql/pipelines/utils/APITest.scala
+++ 
b/sql/pipelines/src/test/scala/org/apache/spark/sql/pipelines/utils/APITest.scala
@@ -95,17 +95,17 @@ trait APITest
   /* SQL Language Tests */
   test("SQL Pipeline with mv, st, and flows") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("mv.sql", "st.sql"))
+      TestPipelineSpec(include = Seq("transformations/mvs/**", 
"transformations/st.sql"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "st.sql",
+        name = "transformations/st.sql",
         contents = s"""
                      |CREATE STREAMING TABLE st;
                      |CREATE FLOW f AS INSERT INTO st BY NAME SELECT * FROM 
STREAM mv WHERE id > 2;
                      |""".stripMargin),
       PipelineSourceFile(
-        name = "mv.sql",
+        name = "transformations/mvs/mv.sql",
         contents = s"""
                      |CREATE MATERIALIZED VIEW mv
                      |AS SELECT * FROM RANGE(5);
@@ -118,11 +118,11 @@ trait APITest
 
   test("SQL Pipeline with CTE") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("*.sql"))
+      TestPipelineSpec(include = Seq("transformations/**"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.sql",
+        name = "transformations/definition.sql",
         contents = """
                      |CREATE MATERIALIZED VIEW a AS SELECT 1;
                      |CREATE MATERIALIZED VIEW d AS
@@ -143,11 +143,11 @@ trait APITest
 
   test("SQL Pipeline with subquery") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("definition.sql"))
+      TestPipelineSpec(include = Seq("transformations/**"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.sql",
+        name = "transformations/definition.sql",
         contents = """
                      |CREATE MATERIALIZED VIEW a AS SELECT * FROM RANGE(5);
                      |CREATE MATERIALIZED VIEW b AS SELECT * FROM RANGE(5)
@@ -161,11 +161,11 @@ trait APITest
 
   test("SQL Pipeline with join") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("definition.sql"))
+      TestPipelineSpec(include = Seq("transformations/**"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.sql",
+        name = "transformations/definition.sql",
         contents = """
                   |CREATE TEMPORARY VIEW a AS SELECT id FROM range(1,3);
                   |CREATE TEMPORARY VIEW b AS SELECT id FROM range(1,3);
@@ -180,11 +180,11 @@ trait APITest
 
   test("SQL Pipeline with aggregation") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("definition.sql"))
+      TestPipelineSpec(include = Seq("transformations/**"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.sql",
+        name = "transformations/definition.sql",
         contents = """
          |CREATE MATERIALIZED VIEW a AS SELECT id AS value, (id % 2) AS isOdd 
FROM range(1,10);
          |CREATE MATERIALIZED VIEW b AS SELECT isOdd, max(value) AS
@@ -198,11 +198,11 @@ trait APITest
 
   test("SQL Pipeline with table properties") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("definition.sql"))
+      TestPipelineSpec(include = Seq("transformations/**"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.sql",
+        name = "transformations/definition.sql",
         contents = """
            |CREATE MATERIALIZED VIEW mv TBLPROPERTIES ('prop1'='foo1', 
'prop2'='bar2')
            |AS SELECT 1;
@@ -226,11 +226,11 @@ trait APITest
 
   test("SQL Pipeline with schema") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("definition.sql"))
+      TestPipelineSpec(include = Seq("transformations/**"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.sql",
+        name = "transformations/definition.sql",
         contents = """
                |CREATE MATERIALIZED VIEW a (id LONG COMMENT 'comment') AS 
SELECT * FROM RANGE(5);
                |CREATE STREAMING TABLE b (id LONG COMMENT 'comment') AS SELECT 
* FROM STREAM a;
@@ -253,17 +253,17 @@ trait APITest
   /* Mixed Language Tests */
   test("Pipeline with Python and SQL") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("definition.sql", "definition.py"))
+      TestPipelineSpec(include = Seq("transformations/**"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.sql",
+        name = "transformations/definition.sql",
         contents = """
                      |CREATE STREAMING TABLE c;
                      |CREATE MATERIALIZED VIEW a AS SELECT * FROM RANGE(5);
                      |""".stripMargin),
       PipelineSourceFile(
-        name = "definition.py",
+        name = "transformations/definition.py",
         contents = """
                      |from pyspark import pipelines as dp
                      |from pyspark.sql import DataFrame, SparkSession
@@ -287,11 +287,12 @@ trait APITest
 
   test("Pipeline referencing internal datasets") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("mv.py", "st.py", "definition.sql"))
+      TestPipelineSpec(include =
+        Seq("transformations/mv.py", "transformations/st.py", 
"transformations/definition.sql"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "mv.py",
+        name = "transformations/mv.py",
         contents = """
                      |from pyspark import pipelines as dp
                      |from pyspark.sql import DataFrame, SparkSession
@@ -303,7 +304,7 @@ trait APITest
                      |  return spark.range(5)
                      |""".stripMargin),
       PipelineSourceFile(
-        name = "st.py",
+        name = "transformations/st.py",
         contents = """
                      |from pyspark import pipelines as dp
                      |from pyspark.sql import DataFrame, SparkSession
@@ -319,7 +320,7 @@ trait APITest
                      |  return spark.readStream.table("src")
                      |""".stripMargin),
       PipelineSourceFile(
-        name = "definition.sql",
+        name = "transformations/definition.sql",
         contents = """
                      |CREATE STREAMING TABLE c;
                      |CREATE FLOW f AS INSERT INTO c BY NAME SELECT * FROM 
STREAM b WHERE id > 2;
@@ -334,14 +335,15 @@ trait APITest
 
   test("Pipeline referencing external datasets") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("definition.py", "definition.sql"))
+      TestPipelineSpec(include =
+        Seq("transformations/definition.py", "transformations/definition.sql"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     spark.sql(
       s"CREATE TABLE src " +
         s"AS SELECT * FROM RANGE(5)")
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.py",
+        name = "transformations/definition.py",
         contents = """
                      |from pyspark import pipelines as dp
                      |from pyspark.sql import DataFrame, SparkSession
@@ -357,7 +359,7 @@ trait APITest
                      |  return spark.readStream.table("src")
                      |""".stripMargin),
       PipelineSourceFile(
-        name = "definition.sql",
+        name = "transformations/definition.sql",
         contents = """
                      |CREATE STREAMING TABLE c;
                      |CREATE FLOW f AS INSERT INTO c BY NAME SELECT * FROM 
STREAM b WHERE id > 2;
@@ -373,11 +375,11 @@ trait APITest
   /* Python Language Tests */
   test("Python Pipeline with materialized_view, create_streaming_table, and 
append_flow") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("st.py", "mv.py"))
+      TestPipelineSpec(include = Seq("transformations/**"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "st.py",
+        name = "transformations/st.py",
         contents = s"""
            |from pyspark import pipelines as dp
            |from pyspark.sql import DataFrame, SparkSession
@@ -395,7 +397,7 @@ trait APITest
            |  return spark.readStream.table("src")
            |""".stripMargin),
       PipelineSourceFile(
-        name = "mv.py",
+        name = "transformations/mv.py",
         contents = s"""
            |from pyspark import pipelines as dp
            |from pyspark.sql import DataFrame, SparkSession
@@ -418,14 +420,14 @@ trait APITest
 
   test("Python Pipeline with temporary_view") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("definition.py"))
+      TestPipelineSpec(include = Seq("transformations/definition.py"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     spark.sql(
       s"CREATE TABLE src " +
         s"AS SELECT * FROM RANGE(5)")
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.py",
+        name = "transformations/definition.py",
         contents = """
                      |from pyspark import pipelines as dp
                      |from pyspark.sql import DataFrame, SparkSession
@@ -454,11 +456,11 @@ trait APITest
 
   test("Python Pipeline with partition columns") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("*.py"))
+      TestPipelineSpec(include = Seq("transformations/**"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec)
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.py",
+        name = "transformations/definition.py",
         contents = """
                      |from pyspark import pipelines as dp
                      |from pyspark.sql import DataFrame, SparkSession
@@ -489,11 +491,11 @@ trait APITest
 
   test("Pipeline with dry run") {
     val pipelineSpec =
-      TestPipelineSpec(include = Seq("definition.sql"))
+      TestPipelineSpec(include = Seq("transformations/definition.sql"))
     val pipelineConfig = TestPipelineConfiguration(pipelineSpec, dryRun = true)
     val sources = Seq(
       PipelineSourceFile(
-        name = "definition.sql",
+        name = "transformations/definition.sql",
         contents = """
                      |CREATE MATERIALIZED VIEW a AS SELECT * FROM RANGE(5);
                      |CREATE MATERIALIZED VIEW b AS SELECT * FROM a WHERE id > 
2;
@@ -548,7 +550,8 @@ trait APITest
 
   private def runSelectiveRefreshTest(tc: SelectiveRefreshTestCase): Unit = {
     test(tc.name) {
-      val pipelineSpec = TestPipelineSpec(include = Seq("st.sql", "mv.sql"))
+      val pipelineSpec = TestPipelineSpec(include =
+        Seq("transformations/st.sql", "transformations/mv.sql"))
       val externalTable = s"source_data"
       // create initial source table
       spark.sql(s"DROP TABLE IF EXISTS $externalTable")
@@ -556,13 +559,13 @@ trait APITest
 
       val sources = Seq(
         PipelineSourceFile(
-          name = "st.sql",
+          name = "transformations/st.sql",
           contents = s"""
                         |CREATE STREAMING TABLE a AS SELECT * FROM STREAM 
$externalTable;
                         |CREATE STREAMING TABLE b AS SELECT * FROM STREAM 
$externalTable;
                         |""".stripMargin),
         PipelineSourceFile(
-          name = "mv.sql",
+          name = "transformations/mv.sql",
           contents = """
                        |CREATE MATERIALIZED VIEW mv AS SELECT * FROM a;
                        |""".stripMargin))


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to