This is an automated email from the ASF dual-hosted git repository.

dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 14d9f3572e5 HIVE-22810: Add drop scheduled query IF EXISTS support 
(#5920)
14d9f3572e5 is described below

commit 14d9f3572e57c0203d8b6461e98d868ea1ba7b87
Author: Neeraj Khatri <[email protected]>
AuthorDate: Sat Jul 5 03:11:35 2025 +0530

    HIVE-22810: Add drop scheduled query IF EXISTS support (#5920)
---
 .../java/org/apache/hadoop/hive/ql/ErrorMsg.java   |  1 +
 .../org/apache/hadoop/hive/ql/parse/HiveParser.g   |  3 +-
 .../hadoop/hive/ql/parse/TestParseDriver.java      |  1 +
 .../hive/ql/parse/ScheduledQueryAnalyzer.java      | 26 +++++--
 .../hive/ql/schq/TestScheduledQueryStatements.java | 80 ++++++++++++++++++++--
 5 files changed, 101 insertions(+), 10 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java 
b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index e6f893edcf4..148932b2529 100644
--- a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -498,6 +498,7 @@ public enum ErrorMsg {
   ALTER_TABLE_COMPACTION_NON_PARTITIONED_COLUMN_NOT_ALLOWED(10443, "Filter 
expression can contain only partition columns."),
   CATALOG_ALREADY_EXISTS(10444, "Catalog {0} already exists", true),
   CATALOG_NOT_EXISTS(10445, "Catalog {0} does not exists:", true),
+  INVALID_SCHEDULED_QUERY(10446, "Scheduled query {0} does not exist", true),
 
   //========================== 20000 range starts here 
========================//
 
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g 
b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index f0efde1f027..713c5c7248e 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -1745,9 +1745,10 @@ createScheduledQueryStatement
 dropScheduledQueryStatement
 @init { pushMsg("drop scheduled query statement", state); }
 @after { popMsg(state); }
-    : KW_DROP KW_SCHEDULED KW_QUERY name=identifier
+    : KW_DROP KW_SCHEDULED KW_QUERY ifExists? name=identifier
     -> ^(TOK_DROP_SCHEDULED_QUERY
             $name
+            ifExists?
         )
     ;
 
diff --git 
a/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseDriver.java 
b/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseDriver.java
index f2d90d4f190..b5455531480 100644
--- a/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseDriver.java
+++ b/parser/src/test/org/apache/hadoop/hive/ql/parse/TestParseDriver.java
@@ -333,6 +333,7 @@ public void testParseAlterScheduledQuery() throws Exception 
{
   @Test
   public void testParseDropScheduledQuery() throws Exception {
     parseDriver.parse("drop scheduled query asd");
+    parseDriver.parse("drop scheduled query if exists asd");
   }
 
 }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java
index 58d813771a2..05b2cd0b44f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ScheduledQueryAnalyzer.java
@@ -29,11 +29,13 @@
 import org.apache.hadoop.hive.common.type.Timestamp;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.ScheduledQuery;
 import org.apache.hadoop.hive.metastore.api.ScheduledQuery._Fields;
 import org.apache.hadoop.hive.metastore.api.ScheduledQueryKey;
 import 
org.apache.hadoop.hive.metastore.api.ScheduledQueryMaintenanceRequestType;
 import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
@@ -74,7 +76,16 @@ public void analyzeInternal(ASTNode ast) throws 
SemanticException {
     ScheduledQueryMaintenanceWork work;
     ScheduledQueryMaintenanceRequestType type = 
translateAstType(ast.getToken().getType());
     ScheduledQuery parsedSchq = interpretAstNode(ast);
-    ScheduledQuery schq = fillScheduledQuery(type, parsedSchq);
+    boolean throwException = true;
+    if (type == ScheduledQueryMaintenanceRequestType.DROP) {
+      boolean ifExists = ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != 
null;
+      throwException = !ifExists && !HiveConf.getBoolVar(conf, 
ConfVars.DROP_IGNORES_NON_EXISTENT);
+    }
+    ScheduledQuery schq = fillScheduledQuery(type, parsedSchq, throwException);
+    if (schq == null) {
+      LOG.warn("Unable to find Scheduled query " + 
parsedSchq.getScheduleKey().getScheduleName());
+      return;
+    }
     checkAuthorization(type, schq);
     LOG.info("scheduled query operation: " + type + " " + schq);
     try {
@@ -87,8 +98,8 @@ public void analyzeInternal(ASTNode ast) throws 
SemanticException {
     queryState.setCommandType(toHiveOperation(type));
   }
 
-  private ScheduledQuery  
fillScheduledQuery(ScheduledQueryMaintenanceRequestType type, ScheduledQuery 
schqChanges)
-      throws SemanticException {
+  private ScheduledQuery  
fillScheduledQuery(ScheduledQueryMaintenanceRequestType type, ScheduledQuery 
schqChanges, boolean throwException)
+          throws SemanticException {
     if (type == ScheduledQueryMaintenanceRequestType.CREATE) {
       return composeOverlayObject(schqChanges, buildEmptySchq());
     } else {
@@ -101,8 +112,13 @@ private ScheduledQuery  
fillScheduledQuery(ScheduledQueryMaintenanceRequestType
         // clear the next execution time
         schqStored.setNextExecutionIsSet(false);
         return composeOverlayObject(schqChanges, schqStored);
+      } catch (NoSuchObjectException e) {
+        if (throwException) {
+          throw new 
SemanticException(ErrorMsg.INVALID_SCHEDULED_QUERY.format(schqChanges.getScheduleKey().getScheduleName()),
 e);
+        }
+        return null;
       } catch (TException e) {
-        throw new SemanticException("unable to get Scheduled query" + e);
+        throw new SemanticException(e.getMessage(), e);
       }
     }
   }
@@ -199,6 +215,8 @@ private void processScheduledQueryAstNode(ScheduledQuery 
schq, ASTNode node) thr
       int now = (int) (System.currentTimeMillis() / 1000);
       schq.setNextExecution(now);
       return;
+    case HiveParser.TOK_IFEXISTS:
+      return;
     default:
       throw new SemanticException("Unexpected token: " + node.getType());
     }
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java 
b/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java
index 4f7990f0b4e..6c5a3b73e66 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/schq/TestScheduledQueryStatements.java
@@ -17,17 +17,13 @@
  */
 package org.apache.hadoop.hive.ql.schq;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.ObjectStore;
 import org.apache.hadoop.hive.metastore.api.ScheduledQueryKey;
 import org.apache.hadoop.hive.metastore.model.MScheduledQuery;
 import org.apache.hadoop.hive.ql.DriverFactory;
+import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.IDriver;
 import org.apache.hadoop.hive.ql.parse.ParseException;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorException;
@@ -44,6 +40,8 @@
 
 import java.util.Optional;
 
+import static org.junit.Assert.*;
+
 public class TestScheduledQueryStatements {
 
   @ClassRule
@@ -56,6 +54,7 @@ public class TestScheduledQueryStatements {
   public static void beforeClass() throws Exception {
     
env_setup.getTestCtx().hiveConf.set("hive.security.authorization.scheduled.queries.supported",
 "true");
     env_setup.getTestCtx().hiveConf.setVar(ConfVars.USERS_IN_ADMIN_ROLE, 
System.getProperty("user.name"));
+    
env_setup.getTestCtx().hiveConf.setBoolVar(ConfVars.DROP_IGNORES_NON_EXISTENT, 
false);
 
     IDriver driver = createDriver();
     dropTables(driver);
@@ -203,6 +202,77 @@ public void testAlter() throws ParseException, Exception {
 
   }
 
+  @Test
+  public void testDrop() throws ParseException, Exception {
+    IDriver driver = createDriver();
+
+    driver.run("set role admin");
+    driver.run("create scheduled query drop1 cron '0 0 7 * * ? *' as select 1 
from tu");
+
+    try (CloseableObjectStore os = new 
CloseableObjectStore(env_setup.getTestCtx().hiveConf)) {
+      Optional<MScheduledQuery> sq = os.getMScheduledQuery(new 
ScheduledQueryKey("drop1", "hive"));
+      assertTrue(sq.isPresent());
+    }
+
+    driver.run("drop scheduled query drop1");
+
+    try (CloseableObjectStore os = new 
CloseableObjectStore(env_setup.getTestCtx().hiveConf)) {
+      Optional<MScheduledQuery> sq = os.getMScheduledQuery(new 
ScheduledQueryKey("drop1", "hive"));
+      assertFalse(sq.isPresent());
+    }
+  }
+
+  @Test
+  public void testDropIfExists() throws ParseException, Exception {
+    IDriver driver = createDriver();
+
+    driver.run("set role admin");
+    driver.run("create scheduled query drop2 cron '0 0 7 * * ? *' as select 1 
from tu");
+
+    try (CloseableObjectStore os = new 
CloseableObjectStore(env_setup.getTestCtx().hiveConf)) {
+      Optional<MScheduledQuery> sq = os.getMScheduledQuery(new 
ScheduledQueryKey("drop2", "hive"));
+      assertTrue(sq.isPresent());
+    }
+
+    driver.run("drop scheduled query if exists drop2");
+
+    try (CloseableObjectStore os = new 
CloseableObjectStore(env_setup.getTestCtx().hiveConf)) {
+      Optional<MScheduledQuery> sq = os.getMScheduledQuery(new 
ScheduledQueryKey("drop2", "hive"));
+      assertFalse(sq.isPresent());
+    }
+  }
+
+  @Test
+  public void testDropWithoutCreate() throws ParseException, Exception {
+    IDriver driver = createDriver();
+    driver.run("set role admin");
+
+    try {
+      driver.run("drop scheduled query drop3");
+      fail("Expected CommandProcessorException to be thrown when dropping a 
non existent scheduled query");
+    } catch (CommandProcessorException e) {
+      assertTrue(e.getMessage().contains("Scheduled query drop3 does not 
exist"));
+      assertEquals(ErrorMsg.INVALID_SCHEDULED_QUERY.getErrorCode(), 
e.getResponseCode());
+    }
+  }
+
+  @Test
+  public void testDropWithoutCreateWithIgnoreNonExistent() throws 
ParseException, Exception {
+    HiveConf conf = env_setup.getTestCtx().hiveConf;
+    conf.setBoolVar(ConfVars.DROP_IGNORES_NON_EXISTENT, true);
+    IDriver driver = createDriver();
+
+    driver.run("set role admin");
+    driver.run("drop scheduled query drop4");
+  }
+
+  @Test
+  public void testDropIfExistsWithoutCreate() throws ParseException, Exception 
{
+    IDriver driver = createDriver();
+    driver.run("set role admin");
+    driver.run("drop scheduled query if exists drop5");
+  }
+
   @Test
   public void testExecuteImmediate() throws ParseException, Exception {
     // use a different namespace because the schq executor might be able to

Reply via email to