This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch branch-4.1
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-4.1 by this push:
     new a49ffded2ad4 [SPARK-54376][SDP] Mark most pipeline configuration 
options as internal
a49ffded2ad4 is described below

commit a49ffded2ad4a6b578b01e3d2d2bd1c2baeb3ca4
Author: Sandy Ryza <[email protected]>
AuthorDate: Mon Nov 17 08:25:10 2025 -0800

    [SPARK-54376][SDP] Mark most pipeline configuration options as internal
    
    ### What changes were proposed in this pull request?
    
    Marks all declarative pipelines configuration options as internal, except 
for `spark.sql.pipelines.maxFlowRetryAttempts`.
    
    ### Why are the changes needed?
    
    When implementing Declarative Pipelines, we made several quantities 
configurable. However, documented configurations are essentially public APIs, 
and it's too early to commit yet to supporting all of these. We should mark 
most of them internal except where we think users will really need them.
    
    ### Does this PR introduce _any_ user-facing change?
    
    Yes, to unreleased software.
    
    ### How was this patch tested?
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    Closes #53090 from sryza/internal-configs.
    
    Authored-by: Sandy Ryza <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
    (cherry picked from commit 1db267e3bd02003d2f88a97f67509642c50f6bd0)
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 .../src/main/scala/org/apache/spark/sql/internal/SQLConf.scala      | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 3ec8366f8141..cda75f22323e 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -6439,6 +6439,7 @@ object SQLConf {
 
   val PIPELINES_STREAM_STATE_POLLING_INTERVAL = {
     buildConf("spark.sql.pipelines.execution.streamstate.pollingInterval")
+      .internal()
       .doc(
         "Interval in seconds at which the stream state is polled for changes. 
This is used to " +
           "check if the stream has failed and needs to be restarted."
@@ -6450,6 +6451,7 @@ object SQLConf {
 
   val PIPELINES_WATCHDOG_MIN_RETRY_TIME_IN_SECONDS = {
     buildConf("spark.sql.pipelines.execution.watchdog.minRetryTime")
+      .internal()
       .doc(
         "Initial duration in seconds between the time when we notice a flow 
has failed and " +
           "when we try to restart the flow. The interval between flow restarts 
doubles with " +
@@ -6464,6 +6466,7 @@ object SQLConf {
 
   val PIPELINES_WATCHDOG_MAX_RETRY_TIME_IN_SECONDS = {
     buildConf("spark.sql.pipelines.execution.watchdog.maxRetryTime")
+      .internal()
       .doc(
         "Maximum time interval in seconds at which flows will be restarted."
       )
@@ -6474,6 +6477,7 @@ object SQLConf {
 
   val PIPELINES_MAX_CONCURRENT_FLOWS = {
     buildConf("spark.sql.pipelines.execution.maxConcurrentFlows")
+      .internal()
       .doc(
         "Max number of flows to execute at once. Used to tune performance for 
triggered " +
           "pipelines. Has no effect on continuous pipelines."
@@ -6486,6 +6490,7 @@ object SQLConf {
 
   val PIPELINES_TIMEOUT_MS_FOR_TERMINATION_JOIN_AND_LOCK = {
     buildConf("spark.sql.pipelines.timeoutMsForTerminationJoinAndLock")
+      .internal()
       .doc("Timeout in milliseconds to grab a lock for stopping update - 
default is 1hr.")
       .version("4.1.0")
       .timeConf(TimeUnit.MILLISECONDS)
@@ -6503,6 +6508,7 @@ object SQLConf {
 
   val PIPELINES_EVENT_QUEUE_CAPACITY = {
     buildConf("spark.sql.pipelines.event.queue.capacity")
+      .internal()
       .doc("Capacity of the event queue used in pipelined execution. When the 
queue is full, " +
         "non-terminal FlowProgressEvents will be dropped.")
       .version("4.1.0")


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to