This is an automated email from the ASF dual-hosted git repository.

sivabalan pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/asf-site by this push:
     new b0e57453d3a [HUDI-6685] Fix code typo in pyspark 'Insert Overwrite' 
section of Quick Start Guide. (#9432)
b0e57453d3a is described below

commit b0e57453d3aa1393838e177cfa15a18217da9629
Author: Amrish Lal <amrish.k....@gmail.com>
AuthorDate: Mon Aug 14 19:45:54 2023 -0700

    [HUDI-6685] Fix code typo in pyspark 'Insert Overwrite' section of Quick 
Start Guide. (#9432)
---
 website/docs/quick-start-guide.md                          | 4 ++--
 website/versioned_docs/version-0.12.0/quick-start-guide.md | 4 ++--
 website/versioned_docs/version-0.12.1/quick-start-guide.md | 4 ++--
 website/versioned_docs/version-0.12.2/quick-start-guide.md | 4 ++--
 website/versioned_docs/version-0.12.3/quick-start-guide.md | 4 ++--
 website/versioned_docs/version-0.13.0/quick-start-guide.md | 4 ++--
 website/versioned_docs/version-0.13.1/quick-start-guide.md | 4 ++--
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/website/docs/quick-start-guide.md 
b/website/docs/quick-start-guide.md
index 4e6a6e55e5c..3cad1cadc3e 100644
--- a/website/docs/quick-start-guide.md
+++ b/website/docs/quick-start-guide.md
@@ -1573,11 +1573,11 @@ spark.
 
 ```python
 # pyspark
-self.spark.read.format("hudi"). \
+spark.read.format("hudi"). \
     load(basePath). \
     select(["uuid", "partitionpath"]). \
     sort(["partitionpath", "uuid"]). \
-    show(n=100, truncate=False) \
+    show(n=100, truncate=False)
     
 inserts = 
sc._jvm.org.apache.hudi.QuickstartUtils.convertToStringList(dataGen.generateInserts(10))
 
 df = spark.read.json(spark.sparkContext.parallelize(inserts, 2)). \
diff --git a/website/versioned_docs/version-0.12.0/quick-start-guide.md 
b/website/versioned_docs/version-0.12.0/quick-start-guide.md
index 9a18bcf358e..73df9aac567 100644
--- a/website/versioned_docs/version-0.12.0/quick-start-guide.md
+++ b/website/versioned_docs/version-0.12.0/quick-start-guide.md
@@ -1443,11 +1443,11 @@ spark.
 
 ```python
 # pyspark
-self.spark.read.format("hudi"). \
+spark.read.format("hudi"). \
     load(basePath). \
     select(["uuid", "partitionpath"]). \
     sort(["partitionpath", "uuid"]). \
-    show(n=100, truncate=False) \
+    show(n=100, truncate=False)
     
 inserts = 
sc._jvm.org.apache.hudi.QuickstartUtils.convertToStringList(dataGen.generateInserts(10))
 
 df = spark.read.json(spark.sparkContext.parallelize(inserts, 2)). \
diff --git a/website/versioned_docs/version-0.12.1/quick-start-guide.md 
b/website/versioned_docs/version-0.12.1/quick-start-guide.md
index 8f5fc45cd3d..60658958a60 100644
--- a/website/versioned_docs/version-0.12.1/quick-start-guide.md
+++ b/website/versioned_docs/version-0.12.1/quick-start-guide.md
@@ -1443,11 +1443,11 @@ spark.
 
 ```python
 # pyspark
-self.spark.read.format("hudi"). \
+spark.read.format("hudi"). \
     load(basePath). \
     select(["uuid", "partitionpath"]). \
     sort(["partitionpath", "uuid"]). \
-    show(n=100, truncate=False) \
+    show(n=100, truncate=False)
     
 inserts = 
sc._jvm.org.apache.hudi.QuickstartUtils.convertToStringList(dataGen.generateInserts(10))
 
 df = spark.read.json(spark.sparkContext.parallelize(inserts, 2)). \
diff --git a/website/versioned_docs/version-0.12.2/quick-start-guide.md 
b/website/versioned_docs/version-0.12.2/quick-start-guide.md
index e0f3e60554d..0a4eda6cbe0 100644
--- a/website/versioned_docs/version-0.12.2/quick-start-guide.md
+++ b/website/versioned_docs/version-0.12.2/quick-start-guide.md
@@ -1475,11 +1475,11 @@ spark.
 
 ```python
 # pyspark
-self.spark.read.format("hudi"). \
+spark.read.format("hudi"). \
     load(basePath). \
     select(["uuid", "partitionpath"]). \
     sort(["partitionpath", "uuid"]). \
-    show(n=100, truncate=False) \
+    show(n=100, truncate=False)
     
 inserts = 
sc._jvm.org.apache.hudi.QuickstartUtils.convertToStringList(dataGen.generateInserts(10))
 
 df = spark.read.json(spark.sparkContext.parallelize(inserts, 2)). \
diff --git a/website/versioned_docs/version-0.12.3/quick-start-guide.md 
b/website/versioned_docs/version-0.12.3/quick-start-guide.md
index f21a01bd8ac..0df6150d905 100644
--- a/website/versioned_docs/version-0.12.3/quick-start-guide.md
+++ b/website/versioned_docs/version-0.12.3/quick-start-guide.md
@@ -1475,11 +1475,11 @@ spark.
 
 ```python
 # pyspark
-self.spark.read.format("hudi"). \
+spark.read.format("hudi"). \
     load(basePath). \
     select(["uuid", "partitionpath"]). \
     sort(["partitionpath", "uuid"]). \
-    show(n=100, truncate=False) \
+    show(n=100, truncate=False)
     
 inserts = 
sc._jvm.org.apache.hudi.QuickstartUtils.convertToStringList(dataGen.generateInserts(10))
 
 df = spark.read.json(spark.sparkContext.parallelize(inserts, 2)). \
diff --git a/website/versioned_docs/version-0.13.0/quick-start-guide.md 
b/website/versioned_docs/version-0.13.0/quick-start-guide.md
index f1b54bc5e04..453090dcdc0 100644
--- a/website/versioned_docs/version-0.13.0/quick-start-guide.md
+++ b/website/versioned_docs/version-0.13.0/quick-start-guide.md
@@ -1474,11 +1474,11 @@ spark.
 
 ```python
 # pyspark
-self.spark.read.format("hudi"). \
+spark.read.format("hudi"). \
     load(basePath). \
     select(["uuid", "partitionpath"]). \
     sort(["partitionpath", "uuid"]). \
-    show(n=100, truncate=False) \
+    show(n=100, truncate=False)
     
 inserts = 
sc._jvm.org.apache.hudi.QuickstartUtils.convertToStringList(dataGen.generateInserts(10))
 
 df = spark.read.json(spark.sparkContext.parallelize(inserts, 2)). \
diff --git a/website/versioned_docs/version-0.13.1/quick-start-guide.md 
b/website/versioned_docs/version-0.13.1/quick-start-guide.md
index 889903bde91..045707abf77 100644
--- a/website/versioned_docs/version-0.13.1/quick-start-guide.md
+++ b/website/versioned_docs/version-0.13.1/quick-start-guide.md
@@ -1474,11 +1474,11 @@ spark.
 
 ```python
 # pyspark
-self.spark.read.format("hudi"). \
+spark.read.format("hudi"). \
     load(basePath). \
     select(["uuid", "partitionpath"]). \
     sort(["partitionpath", "uuid"]). \
-    show(n=100, truncate=False) \
+    show(n=100, truncate=False)
     
 inserts = 
sc._jvm.org.apache.hudi.QuickstartUtils.convertToStringList(dataGen.generateInserts(10))
 
 df = spark.read.json(spark.sparkContext.parallelize(inserts, 2)). \

Reply via email to