This is an automated email from the ASF dual-hosted git repository.

hvanhovell pushed a commit to branch branch-3.5
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.5 by this push:
     new 57554e3f081 [SPARK-44713][CONNECT][SQL] Move shared classes to sql/api
57554e3f081 is described below

commit 57554e3f081bbfa94ed5b46ff72616cc18e59da1
Author: Herman van Hovell <her...@databricks.com>
AuthorDate: Tue Aug 8 15:04:07 2023 +0200

    [SPARK-44713][CONNECT][SQL] Move shared classes to sql/api
    
    ### What changes were proposed in this pull request?
    This PR deduplicates the following classes:
    - `org.apache.spark.sql.SaveMode`
    - `org.apache.spark.api.java.function.FlatMapGroupsWithStateFunction`
    - `org.apache.spark.api.java.function.MapGroupsWithStateFunction`
    - `org.apache.spark.sql.streaming.GroupState`
    
    These classes were all duplicates in the Scala Client. I have moved the 
original versions to `sql/api` and I removed the connect equivalents.
    
    ### Why are the changes needed?
    Duplication is always good :).
    
    ### Does this PR introduce _any_ user-facing change?
    No.
    
    ### How was this patch tested?
    Compilation.
    
    Closes #42386 from hvanhovell/SPARK-44713.
    
    Authored-by: Herman van Hovell <her...@databricks.com>
    Signed-off-by: Herman van Hovell <her...@databricks.com>
    (cherry picked from commit c46d4caa59865e9b99e02f6adc79f49f9ebc8f7f)
    Signed-off-by: Herman van Hovell <her...@databricks.com>
---
 .../main/java/org/apache/spark/sql/SaveMode.java   |  58 ----
 .../function/FlatMapGroupsWithStateFunction.java   |  39 ---
 .../java/function/MapGroupsWithStateFunction.java  |  38 ---
 .../apache/spark/sql/streaming/GroupState.scala    | 336 ---------------------
 project/MimaExcludes.scala                         |  13 +-
 .../function/FlatMapGroupsWithStateFunction.java   |   0
 .../java/function/MapGroupsWithStateFunction.java  |   0
 .../main/java/org/apache/spark/sql/SaveMode.java   |   0
 .../apache/spark/sql/streaming/GroupState.scala    |   0
 9 files changed, 6 insertions(+), 478 deletions(-)

diff --git 
a/connector/connect/client/jvm/src/main/java/org/apache/spark/sql/SaveMode.java 
b/connector/connect/client/jvm/src/main/java/org/apache/spark/sql/SaveMode.java
deleted file mode 100644
index 95af157687c..00000000000
--- 
a/connector/connect/client/jvm/src/main/java/org/apache/spark/sql/SaveMode.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.spark.sql;
-
-import org.apache.spark.annotation.Stable;
-
-/**
- * SaveMode is used to specify the expected behavior of saving a DataFrame to 
a data source.
- *
- * @since 3.4.0
- */
-@Stable
-public enum SaveMode {
-  /**
-   * Append mode means that when saving a DataFrame to a data source, if 
data/table already exists,
-   * contents of the DataFrame are expected to be appended to existing data.
-   *
-   * @since 3.4.0
-   */
-  Append,
-  /**
-   * Overwrite mode means that when saving a DataFrame to a data source,
-   * if data/table already exists, existing data is expected to be overwritten 
by the contents of
-   * the DataFrame.
-   *
-   * @since 3.4.0
-   */
-  Overwrite,
-  /**
-   * ErrorIfExists mode means that when saving a DataFrame to a data source, 
if data already exists,
-   * an exception is expected to be thrown.
-   *
-   * @since 3.4.0
-   */
-  ErrorIfExists,
-  /**
-   * Ignore mode means that when saving a DataFrame to a data source, if data 
already exists,
-   * the save operation is expected to not save the contents of the DataFrame 
and to not
-   * change the existing data.
-   *
-   * @since 3.4.0
-   */
-  Ignore
-}
diff --git 
a/connector/connect/common/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
 
b/connector/connect/common/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
deleted file mode 100644
index c917c8d28be..00000000000
--- 
a/connector/connect/common/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.api.java.function;
-
-import java.io.Serializable;
-import java.util.Iterator;
-
-import org.apache.spark.annotation.Evolving;
-import org.apache.spark.annotation.Experimental;
-import org.apache.spark.sql.streaming.GroupState;
-
-/**
- * ::Experimental::
- * Base interface for a map function used in
- * {@code org.apache.spark.sql.KeyValueGroupedDataset.flatMapGroupsWithState(
- * FlatMapGroupsWithStateFunction, org.apache.spark.sql.streaming.OutputMode,
- * org.apache.spark.sql.Encoder, org.apache.spark.sql.Encoder)}
- * @since 3.5.0
- */
-@Experimental
-@Evolving
-public interface FlatMapGroupsWithStateFunction<K, V, S, R> extends 
Serializable {
-    Iterator<R> call(K key, Iterator<V> values, GroupState<S> state) throws 
Exception;
-}
diff --git 
a/connector/connect/common/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
 
b/connector/connect/common/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
deleted file mode 100644
index ae179ad7d27..00000000000
--- 
a/connector/connect/common/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.api.java.function;
-
-import java.io.Serializable;
-import java.util.Iterator;
-
-import org.apache.spark.annotation.Evolving;
-import org.apache.spark.annotation.Experimental;
-import org.apache.spark.sql.streaming.GroupState;
-
-/**
- * ::Experimental::
- * Base interface for a map function used in
- * {@code org.apache.spark.sql.KeyValueGroupedDataset.mapGroupsWithState(
- * MapGroupsWithStateFunction, org.apache.spark.sql.Encoder, 
org.apache.spark.sql.Encoder)}
- * @since 3.5.0
- */
-@Experimental
-@Evolving
-public interface MapGroupsWithStateFunction<K, V, S, R> extends Serializable {
-    R call(K key, Iterator<V> values, GroupState<S> state) throws Exception;
-}
diff --git 
a/connector/connect/common/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala
 
b/connector/connect/common/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala
deleted file mode 100644
index bd418a89534..00000000000
--- 
a/connector/connect/common/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.sql.streaming
-
-import org.apache.spark.annotation.{Evolving, Experimental}
-import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState
-
-/**
- * :: Experimental ::
- *
- * Wrapper class for interacting with per-group state data in 
`mapGroupsWithState` and
- * `flatMapGroupsWithState` operations on `KeyValueGroupedDataset`.
- *
- * Detail description on `[map/flatMap]GroupsWithState` operation
- * -------------------------------------------------------------- Both, 
`mapGroupsWithState` and
- * `flatMapGroupsWithState` in `KeyValueGroupedDataset` will invoke the 
user-given function on
- * each group (defined by the grouping function in `Dataset.groupByKey()`) 
while maintaining a
- * user-defined per-group state between invocations. For a static batch 
Dataset, the function will
- * be invoked once per group. For a streaming Dataset, the function will be 
invoked for each group
- * repeatedly in every trigger. That is, in every batch of the 
`StreamingQuery`, the function will
- * be invoked once for each group that has data in the trigger. Furthermore, 
if timeout is set,
- * then the function will be invoked on timed-out groups (more detail below).
- *
- * The function is invoked with the following parameters.
- *   - The key of the group.
- *   - An iterator containing all the values for this group.
- *   - A user-defined state object set by previous invocations of the given 
function.
- *
- * In case of a batch Dataset, there is only one invocation and the state 
object will be empty as
- * there is no prior state. Essentially, for batch Datasets, 
`[map/flatMap]GroupsWithState` is
- * equivalent to `[map/flatMap]Groups` and any updates to the state and/or 
timeouts have no
- * effect.
- *
- * The major difference between `mapGroupsWithState` and 
`flatMapGroupsWithState` is that the
- * former allows the function to return one and only one record, whereas the 
latter allows the
- * function to return any number of records (including no records). 
Furthermore, the
- * `flatMapGroupsWithState` is associated with an operation output mode, which 
can be either
- * `Append` or `Update`. Semantically, this defines whether the output records 
of one trigger is
- * effectively replacing the previously output records (from previous 
triggers) or is appending to
- * the list of previously output records. Essentially, this defines how the 
Result Table (refer to
- * the semantics in the programming guide) is updated, and allows us to reason 
about the semantics
- * of later operations.
- *
- * Important points to note about the function (both mapGroupsWithState and
- * flatMapGroupsWithState).
- *   - In a trigger, the function will be called only the groups present in 
the batch. So do not
- *     assume that the function will be called in every trigger for every 
group that has state.
- *   - There is no guaranteed ordering of values in the iterator in the 
function, neither with
- *     batch, nor with streaming Datasets.
- *   - All the data will be shuffled before applying the function.
- *   - If timeout is set, then the function will also be called with no 
values. See more details
- *     on `GroupStateTimeout` below.
- *
- * Important points to note about using `GroupState`.
- *   - The value of the state cannot be null. So updating state with null will 
throw
- *     `IllegalArgumentException`.
- *   - Operations on `GroupState` are not thread-safe. This is to avoid memory 
barriers.
- *   - If `remove()` is called, then `exists()` will return `false`, `get()` 
will throw
- *     `NoSuchElementException` and `getOption()` will return `None`
- *   - After that, if `update(newState)` is called, then `exists()` will again 
return `true`,
- *     `get()` and `getOption()`will return the updated value.
- *
- * Important points to note about using `GroupStateTimeout`.
- *   - The timeout type is a global param across all the groups (set as 
`timeout` param in
- *     `[map|flatMap]GroupsWithState`, but the exact timeout 
duration/timestamp is configurable
- *     per group by calling `setTimeout...()` in `GroupState`.
- *   - Timeouts can be either based on processing time (i.e.
- *     `GroupStateTimeout.ProcessingTimeTimeout`) or event time (i.e.
- *     `GroupStateTimeout.EventTimeTimeout`).
- *   - With `ProcessingTimeTimeout`, the timeout duration can be set by calling
- *     `GroupState.setTimeoutDuration`. The timeout will occur when the clock 
has advanced by the
- *     set duration. Guarantees provided by this timeout with a duration of D 
ms are as follows:
- *     - Timeout will never occur before the clock time has advanced by D ms
- *     - Timeout will occur eventually when there is a trigger in the query 
(i.e. after D ms). So
- *       there is no strict upper bound on when the timeout would occur. For 
example, the trigger
- *       interval of the query will affect when the timeout actually occurs. 
If there is no data
- *       in the stream (for any group) for a while, then there will not be any 
trigger and timeout
- *       function call will not occur until there is data.
- *     - Since the processing time timeout is based on the clock time, it is 
affected by the
- *       variations in the system clock (i.e. time zone changes, clock skew, 
etc.).
- *   - With `EventTimeTimeout`, the user also has to specify the event time 
watermark in the query
- *     using `Dataset.withWatermark()`. With this setting, data that is older 
than the watermark
- *     is filtered out. The timeout can be set for a group by setting a 
timeout timestamp
- *     using`GroupState.setTimeoutTimestamp()`, and the timeout would occur 
when the watermark
- *     advances beyond the set timestamp. You can control the timeout delay by 
two parameters -
- *     (i) watermark delay and an additional duration beyond the timestamp in 
the event (which is
- *     guaranteed to be newer than watermark due to the filtering). Guarantees 
provided by this
- *     timeout are as follows:
- *     - Timeout will never occur before the watermark has exceeded the set 
timeout.
- *     - Similar to processing time timeouts, there is no strict upper bound 
on the delay when the
- *       timeout actually occurs. The watermark can advance only when there is 
data in the stream
- *       and the event time of the data has actually advanced.
- *   - When the timeout occurs for a group, the function is called for that 
group with no values,
- *     and `GroupState.hasTimedOut()` set to true.
- *   - The timeout is reset every time the function is called on a group, that 
is, when the group
- *     has new data, or the group has timed out. So the user has to set the 
timeout duration every
- *     time the function is called, otherwise, there will not be any timeout 
set.
- *
- * `[map/flatMap]GroupsWithState` can take a user defined initial state as an 
additional argument.
- * This state will be applied when the first batch of the streaming query is 
processed. If there
- * are no matching rows in the data for the keys present in the initial state, 
the state is still
- * applied and the function will be invoked with the values being an empty 
iterator.
- *
- * Scala example of using GroupState in `mapGroupsWithState`:
- * {{{
- * // A mapping function that maintains an integer state for string keys and 
returns a string.
- * // Additionally, it sets a timeout to remove the state if it has not 
received data for an hour.
- * def mappingFunction(key: String, value: Iterator[Int], state: 
GroupState[Int]): String = {
- *
- *   if (state.hasTimedOut) {                // If called when timing out, 
remove the state
- *     state.remove()
- *
- *   } else if (state.exists) {              // If state exists, use it for 
processing
- *     val existingState = state.get         // Get the existing state
- *     val shouldRemove = ...                // Decide whether to remove the 
state
- *     if (shouldRemove) {
- *       state.remove()                      // Remove the state
- *
- *     } else {
- *       val newState = ...
- *       state.update(newState)              // Set the new state
- *       state.setTimeoutDuration("1 hour")  // Set the timeout
- *     }
- *
- *   } else {
- *     val initialState = ...
- *     state.update(initialState)            // Set the initial state
- *     state.setTimeoutDuration("1 hour")    // Set the timeout
- *   }
- *   ...
- *   // return something
- * }
- *
- * dataset
- *   .groupByKey(...)
- *   
.mapGroupsWithState(GroupStateTimeout.ProcessingTimeTimeout)(mappingFunction)
- * }}}
- *
- * Java example of using `GroupState`:
- * {{{
- * // A mapping function that maintains an integer state for string keys and 
returns a string.
- * // Additionally, it sets a timeout to remove the state if it has not 
received data for an hour.
- * MapGroupsWithStateFunction<String, Integer, Integer, String> 
mappingFunction =
- *    new MapGroupsWithStateFunction<String, Integer, Integer, String>() {
- *
- *      @Override
- *      public String call(String key, Iterator<Integer> value, 
GroupState<Integer> state) {
- *        if (state.hasTimedOut()) {            // If called when timing out, 
remove the state
- *          state.remove();
- *
- *        } else if (state.exists()) {            // If state exists, use it 
for processing
- *          int existingState = state.get();      // Get the existing state
- *          boolean shouldRemove = ...;           // Decide whether to remove 
the state
- *          if (shouldRemove) {
- *            state.remove();                     // Remove the state
- *
- *          } else {
- *            int newState = ...;
- *            state.update(newState);             // Set the new state
- *            state.setTimeoutDuration("1 hour"); // Set the timeout
- *          }
- *
- *        } else {
- *          int initialState = ...;               // Set the initial state
- *          state.update(initialState);
- *          state.setTimeoutDuration("1 hour");   // Set the timeout
- *        }
- *        ...
- *         // return something
- *      }
- *    };
- *
- * dataset
- *     .groupByKey(...)
- *     .mapGroupsWithState(
- *         mappingFunction, Encoders.INT, Encoders.STRING, 
GroupStateTimeout.ProcessingTimeTimeout);
- * }}}
- *
- * @tparam S
- *   User-defined type of the state to be stored for each group. Must be 
encodable into Spark SQL
- *   types (see `Encoder` for more details).
- * @since 3.5.0
- */
-@Experimental
-@Evolving
-trait GroupState[S] extends LogicalGroupState[S] {
-
-  /** Whether state exists or not. */
-  def exists: Boolean
-
-  /** Get the state value if it exists, or throw NoSuchElementException. */
-  @throws[NoSuchElementException]("when state does not exist")
-  def get: S
-
-  /** Get the state value as a scala Option. */
-  def getOption: Option[S]
-
-  /** Update the value of the state. */
-  def update(newState: S): Unit
-
-  /** Remove this state. */
-  def remove(): Unit
-
-  /**
-   * Whether the function has been called because the key has timed out.
-   * @note
-   *   This can return true only when timeouts are enabled in 
`[map/flatMap]GroupsWithState`.
-   */
-  def hasTimedOut: Boolean
-
-  /**
-   * Set the timeout duration in ms for this key.
-   *
-   * @note
-   *   [[GroupStateTimeout Processing time timeout]] must be enabled in
-   *   `[map/flatMap]GroupsWithState` for calling this method.
-   * @note
-   *   This method has no effect when used in a batch query.
-   */
-  @throws[IllegalArgumentException]("if 'durationMs' is not positive")
-  @throws[UnsupportedOperationException](
-    "if processing time timeout has not been enabled in 
[map|flatMap]GroupsWithState")
-  def setTimeoutDuration(durationMs: Long): Unit
-
-  /**
-   * Set the timeout duration for this key as a string. For example, "1 hour", 
"2 days", etc.
-   *
-   * @note
-   *   [[GroupStateTimeout Processing time timeout]] must be enabled in
-   *   `[map/flatMap]GroupsWithState` for calling this method.
-   * @note
-   *   This method has no effect when used in a batch query.
-   */
-  @throws[IllegalArgumentException]("if 'duration' is not a valid duration")
-  @throws[UnsupportedOperationException](
-    "if processing time timeout has not been enabled in 
[map|flatMap]GroupsWithState")
-  def setTimeoutDuration(duration: String): Unit
-
-  /**
-   * Set the timeout timestamp for this key as milliseconds in epoch time. 
This timestamp cannot
-   * be older than the current watermark.
-   *
-   * @note
-   *   [[GroupStateTimeout Event time timeout]] must be enabled in 
`[map/flatMap]GroupsWithState`
-   *   for calling this method.
-   * @note
-   *   This method has no effect when used in a batch query.
-   */
-  @throws[IllegalArgumentException](
-    "if 'timestampMs' is not positive or less than the current watermark in a 
streaming query")
-  @throws[UnsupportedOperationException](
-    "if event time timeout has not been enabled in 
[map|flatMap]GroupsWithState")
-  def setTimeoutTimestamp(timestampMs: Long): Unit
-
-  /**
-   * Set the timeout timestamp for this key as milliseconds in epoch time and 
an additional
-   * duration as a string (e.g. "1 hour", "2 days", etc.). The final timestamp 
(including the
-   * additional duration) cannot be older than the current watermark.
-   *
-   * @note
-   *   [[GroupStateTimeout Event time timeout]] must be enabled in 
`[map/flatMap]GroupsWithState`
-   *   for calling this method.
-   * @note
-   *   This method has no side effect when used in a batch query.
-   */
-  @throws[IllegalArgumentException](
-    "if 'additionalDuration' is invalid or the final timeout timestamp is less 
than " +
-      "the current watermark in a streaming query")
-  @throws[UnsupportedOperationException](
-    "if event time timeout has not been enabled in 
[map|flatMap]GroupsWithState")
-  def setTimeoutTimestamp(timestampMs: Long, additionalDuration: String): Unit
-
-  /**
-   * Set the timeout timestamp for this key as a java.sql.Date. This timestamp 
cannot be older
-   * than the current watermark.
-   *
-   * @note
-   *   [[GroupStateTimeout Event time timeout]] must be enabled in 
`[map/flatMap]GroupsWithState`
-   *   for calling this method.
-   * @note
-   *   This method has no side effect when used in a batch query.
-   */
-  @throws[UnsupportedOperationException](
-    "if event time timeout has not been enabled in 
[map|flatMap]GroupsWithState")
-  def setTimeoutTimestamp(timestamp: java.sql.Date): Unit
-
-  /**
-   * Set the timeout timestamp for this key as a java.sql.Date and an 
additional duration as a
-   * string (e.g. "1 hour", "2 days", etc.). The final timestamp (including 
the additional
-   * duration) cannot be older than the current watermark.
-   *
-   * @note
-   *   [[GroupStateTimeout Event time timeout]] must be enabled in 
`[map/flatMap]GroupsWithState`
-   *   for calling this method.
-   * @note
-   *   This method has no side effect when used in a batch query.
-   */
-  @throws[IllegalArgumentException]("if 'additionalDuration' is invalid")
-  @throws[UnsupportedOperationException](
-    "if event time timeout has not been enabled in 
[map|flatMap]GroupsWithState")
-  def setTimeoutTimestamp(timestamp: java.sql.Date, additionalDuration: 
String): Unit
-
-  /**
-   * Get the current event time watermark as milliseconds in epoch time.
-   *
-   * @note
-   *   In a streaming query, this can be called only when watermark is set 
before calling
-   *   `[map/flatMap]GroupsWithState`. In a batch query, this method always 
returns -1.
-   */
-  @throws[UnsupportedOperationException](
-    "if watermark has not been set before in [map|flatMap]GroupsWithState")
-  def getCurrentWatermarkMs(): Long
-
-  /**
-   * Get the current processing time as milliseconds in epoch time.
-   * @note
-   *   In a streaming query, this will return a constant value throughout the 
duration of a
-   *   trigger, even if the trigger is re-executed.
-   */
-  def getCurrentProcessingTimeMs(): Long
-}
diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala
index 3848caacbd2..9805ad7f09d 100644
--- a/project/MimaExcludes.scala
+++ b/project/MimaExcludes.scala
@@ -54,12 +54,6 @@ object MimaExcludes {
     
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.listTables"),
     // [SPARK-43992][SQL][PYTHON][CONNECT] Add optional pattern for 
Catalog.listFunctions
     
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.catalog.Catalog.listFunctions"),
-     // [SPARK-43919][SQL] Extract JSON functionality out of Row
-    
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.Row.json"),
-    
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.Row.prettyJson"),
-    
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.expressions.MutableAggregationBuffer.json"),
-    
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.expressions.MutableAggregationBuffer.prettyJson"),
-    
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.expressions.MutableAggregationBuffer.jsonValue"),
     // [SPARK-43952][CORE][CONNECT][SQL] Add SparkContext APIs for query 
cancellation by tag
     
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.JobData.this"),
     // [SPARK-44205][SQL] Extract Catalyst Code from DecimalType
@@ -73,7 +67,12 @@ object MimaExcludes {
     
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.GroupStateTimeout"),
     
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.OutputMode"),
     // [SPARK-44692][CONNECT][SQL] Move Trigger(s) to sql/api
-    
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.Trigger")
+    
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.Trigger"),
+    // [SPARK-44713][CONNECT][SQL] Move shared classes to sql/api
+    
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.api.java.function.FlatMapGroupsWithStateFunction"),
+    
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.api.java.function.MapGroupsWithStateFunction"),
+    
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SaveMode"),
+    
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.streaming.GroupState")
   )
 
   // Default exclude rules
diff --git 
a/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
 
b/sql/api/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
similarity index 100%
rename from 
sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
rename to 
sql/api/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java
diff --git 
a/sql/core/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
 
b/sql/api/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
similarity index 100%
rename from 
sql/core/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
rename to 
sql/api/src/main/java/org/apache/spark/api/java/function/MapGroupsWithStateFunction.java
diff --git a/sql/core/src/main/java/org/apache/spark/sql/SaveMode.java 
b/sql/api/src/main/java/org/apache/spark/sql/SaveMode.java
similarity index 100%
rename from sql/core/src/main/java/org/apache/spark/sql/SaveMode.java
rename to sql/api/src/main/java/org/apache/spark/sql/SaveMode.java
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala 
b/sql/api/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala
similarity index 100%
rename from 
sql/core/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala
rename to sql/api/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to