This is an automated email from the ASF dual-hosted git repository.

curth pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow-adbc.git


The following commit(s) were added to refs/heads/main by this push:
     new e93d18132 feat(csharp/src/Drivers/Databricks): Remove redundant 
closeoperation (#3093)
e93d18132 is described below

commit e93d18132f83a67695acd5cb40ca419560a06d89
Author: Todd Meng <todd.m...@databricks.com>
AuthorDate: Thu Jul 3 12:34:55 2025 -0700

    feat(csharp/src/Drivers/Databricks): Remove redundant closeoperation (#3093)
    
    The driver was breaking with DBR 11.3.
    
    [This PR introduced logic
    
](https://github.com/apache/arrow-adbc/commit/cde9e7b525b37dfdbde27a93d57ac214ea0701bb#diff-ea22827c908183e8cae28e5fcc37cef9d0c4b5637474262cffe163b6b78db9f5R386)which
    throws errors via HandleThriftResponse. Previously, we had been silently
    ignoring these CloseOperation failures; these errors occurred because
    the operations can be closed when returning via DirectResults, and we
    weren't properly checking for DirectResults.CloseOperation status for
    Metadata queries. With this change, we avoid making redundant
    CloseOperation requests upon dispose.
    
    1. Metadata Request to server
    2. Metadata Response with DirectResults and CloseOperation
    3. Statement.Dispose
    4. DirectResults includes CloseOperation
    5. Skip redundant CloseOperation
    
    
    ### Testing
    Added a test that only passes with the changes in this PR in DBR 11.3
---
 .../Drivers/Apache/Hive2/HiveServer2Statement.cs   |  3 +++
 csharp/test/Drivers/Databricks/StatementTests.cs   | 22 ++++++++++++++++++++++
 2 files changed, 25 insertions(+)

diff --git a/csharp/src/Drivers/Apache/Hive2/HiveServer2Statement.cs 
b/csharp/src/Drivers/Apache/Hive2/HiveServer2Statement.cs
index 05379631a..d25ae4fb1 100644
--- a/csharp/src/Drivers/Apache/Hive2/HiveServer2Statement.cs
+++ b/csharp/src/Drivers/Apache/Hive2/HiveServer2Statement.cs
@@ -566,6 +566,9 @@ namespace Apache.Arrow.Adbc.Drivers.Apache.Hive2
 
         private async Task<QueryResult> GetQueryResult(TSparkDirectResults? 
directResults, CancellationToken cancellationToken)
         {
+            // Set _directResults so that dispose logic can check if operation 
was already closed
+            _directResults = directResults;
+
             Schema schema;
             if (Connection.AreResultsAvailableDirectly && 
directResults?.ResultSet?.Results != null)
             {
diff --git a/csharp/test/Drivers/Databricks/StatementTests.cs 
b/csharp/test/Drivers/Databricks/StatementTests.cs
index ef1575156..e27e145f6 100644
--- a/csharp/test/Drivers/Databricks/StatementTests.cs
+++ b/csharp/test/Drivers/Databricks/StatementTests.cs
@@ -131,6 +131,28 @@ namespace Apache.Arrow.Adbc.Tests.Drivers.Databricks
             await 
base.CanGetCrossReferenceFromChildTable(TestConfiguration.Metadata.Catalog, 
TestConfiguration.Metadata.Schema);
         }
 
+        /// <summary>
+        /// Verifies that Dispose() can be called on metadata query statements 
without throwing
+        /// "Invalid OperationHandle" errors. This tests the fix for the issue 
where the server
+        /// auto-closes operations but the client still tries to close them 
during disposal.
+        /// </summary>
+        [SkippableFact]
+        public async Task CanDisposeMetadataQueriesWithoutError()
+        {
+            // Test a simple metadata command that's most likely to trigger 
the issue
+            var statement = Connection.CreateStatement();
+            statement.SetOption(ApacheParameters.IsMetadataCommand, "true");
+            statement.SqlQuery = "GetSchemas";
+
+            // Execute the metadata query
+            QueryResult queryResult = await statement.ExecuteQueryAsync();
+            Assert.NotNull(queryResult.Stream);
+
+            // This should not throw "Invalid OperationHandle" errors
+            // The fix ensures _directResults is set so dispose logic works 
correctly
+            statement.Dispose();
+        }
+
         [SkippableFact]
         public async Task CanGetColumnsWithBaseTypeName()
         {

Reply via email to