This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit c4e7977f5ee1b740215eb486a074bc1326cff360
Author: Tim Armstrong <tarmstr...@cloudera.com>
AuthorDate: Mon Nov 23 12:14:33 2020 -0800

    IMPALA-10351,IMPALA-9812: enable mt_dop for DML by default
    
    This allows setting mt_dop for any query with any configuration.
    Before this patch it was not supported for DML.
    
    --unlock_mt_dop and --mt_dop_auto_fallback are now ignored.
    
    Testing:
    * Updated tests to reflect new behaviour.
    * Removed irrelevant tests for fallback/validation.
    * Ran exhaustive tests.
    
    Change-Id: I66331481260fe4b69d9e95b0200029b14d230ade
    Reviewed-on: http://gerrit.cloudera.org:8080/16775
    Reviewed-by: Impala Public Jenkins <impala-public-jenk...@cloudera.com>
    Tested-by: Impala Public Jenkins <impala-public-jenk...@cloudera.com>
---
 be/src/common/global-flags.cc                      |  9 +--
 be/src/util/backend-gflag-util.cc                  |  4 --
 bin/run-all-tests.sh                               |  3 +-
 common/thrift/BackendGflags.thrift                 |  4 +-
 .../java/org/apache/impala/common/RuntimeEnv.java  |  7 --
 .../java/org/apache/impala/planner/Planner.java    | 18 -----
 .../org/apache/impala/service/BackendConfig.java   |  8 ---
 .../org/apache/impala/planner/PlannerTest.java     | 16 ++---
 .../queries/PlannerTest/mt-dop-validation.test     | 80 ++++++++++++++++------
 .../queries/QueryTest/mt-dop-auto-fallback.test    | 31 ---------
 tests/custom_cluster/test_mt_dop.py                | 21 +-----
 11 files changed, 69 insertions(+), 132 deletions(-)

diff --git a/be/src/common/global-flags.cc b/be/src/common/global-flags.cc
index 339ff14..1b8c16f 100644
--- a/be/src/common/global-flags.cc
+++ b/be/src/common/global-flags.cc
@@ -290,13 +290,6 @@ 
DEFINE_double_hidden(invalidate_tables_fraction_on_memory_pressure, 0.1,
     "The fraction of tables to invalidate when CatalogdTableInvalidator 
considers the "
     "old GC generation to be almost full.");
 
-DEFINE_bool_hidden(unlock_mt_dop, false,
-    "(Experimental) If true, allow specifying mt_dop for all queries.");
-
-DEFINE_bool_hidden(mt_dop_auto_fallback, false,
-    "(Experimental) If true, fall back to non-mt_dop if mt_dop query option is 
set and "
-    "a query does not support it. Has no effect if --unlock_mt_dop is true.");
-
 DEFINE_bool_hidden(recursively_list_partitions, true,
     "If true, recursively list the content of partition directories.");
 
@@ -399,6 +392,7 @@ REMOVED_FLAG(llama_registration_timeout_secs);
 REMOVED_FLAG(llama_registration_wait_secs);
 REMOVED_FLAG(local_nodemanager_url);
 REMOVED_FLAG(max_free_io_buffers);
+REMOVED_FLAG(mt_dop_auto_fallback);
 REMOVED_FLAG(pull_incremental_statistics);
 REMOVED_FLAG(report_status_retry_interval_ms);
 REMOVED_FLAG(resource_broker_cnxn_attempts);
@@ -417,6 +411,7 @@ REMOVED_FLAG(staging_cgroup);
 REMOVED_FLAG(status_report_interval);
 REMOVED_FLAG(status_report_max_retries);
 REMOVED_FLAG(suppress_unknown_disk_id_warnings);
+REMOVED_FLAG(unlock_mt_dop);
 REMOVED_FLAG(use_krpc);
 REMOVED_FLAG(use_kudu_kinit);
 REMOVED_FLAG(use_statestore);
diff --git a/be/src/util/backend-gflag-util.cc 
b/be/src/util/backend-gflag-util.cc
index 3a2c470..b43c63e 100644
--- a/be/src/util/backend-gflag-util.cc
+++ b/be/src/util/backend-gflag-util.cc
@@ -66,8 +66,6 @@ DECLARE_int32(kudu_error_buffer_size);
 DECLARE_int32(hms_event_polling_interval_s);
 DECLARE_bool(enable_insert_events);
 DECLARE_string(authorization_factory_class);
-DECLARE_bool(unlock_mt_dop);
-DECLARE_bool(mt_dop_auto_fallback);
 DECLARE_string(ranger_service_type);
 DECLARE_string(ranger_app_id);
 DECLARE_string(authorization_provider);
@@ -153,8 +151,6 @@ Status GetThriftBackendGflags(JNIEnv* jni_env, jbyteArray* 
cfg_bytes) {
   cfg.__set_enable_insert_events(FLAGS_enable_insert_events);
   cfg.__set_impala_build_version(::GetDaemonBuildVersion());
   cfg.__set_authorization_factory_class(FLAGS_authorization_factory_class);
-  cfg.__set_unlock_mt_dop(FLAGS_unlock_mt_dop);
-  cfg.__set_mt_dop_auto_fallback(FLAGS_mt_dop_auto_fallback);
   cfg.__set_ranger_service_type(FLAGS_ranger_service_type);
   cfg.__set_ranger_app_id(FLAGS_ranger_app_id);
   cfg.__set_authorization_provider(FLAGS_authorization_provider);
diff --git a/bin/run-all-tests.sh b/bin/run-all-tests.sh
index 98fa122..329b866 100755
--- a/bin/run-all-tests.sh
+++ b/bin/run-all-tests.sh
@@ -169,11 +169,10 @@ TEST_RET_CODE=0
 
 # Helper function to start Impala cluster.
 start_impala_cluster() {
-  # TODO: IMPALA-9812: remove --unlock_mt_dop when it is no longer needed.
   run-step "Starting Impala cluster" start-impala-cluster.log \
       "${IMPALA_HOME}/bin/start-impala-cluster.py" \
       --log_dir="${IMPALA_EE_TEST_LOGS_DIR}" \
-      ${TEST_START_CLUSTER_ARGS} --impalad_args=--unlock_mt_dop=true
+      ${TEST_START_CLUSTER_ARGS}
 }
 
 run_ee_tests() {
diff --git a/common/thrift/BackendGflags.thrift 
b/common/thrift/BackendGflags.thrift
index 017ad18..89ce733 100644
--- a/common/thrift/BackendGflags.thrift
+++ b/common/thrift/BackendGflags.thrift
@@ -118,7 +118,7 @@ struct TBackendGflags {
 
   47: required string authorization_factory_class
 
-  48: required bool unlock_mt_dop
+  // REMOVED: 48: required bool unlock_mt_dop
 
   49: required string ranger_service_type
 
@@ -146,7 +146,7 @@ struct TBackendGflags {
 
   61: required string min_privilege_set_for_show_stmts
 
-  62: required bool mt_dop_auto_fallback
+  // REMOVED: 62: required bool mt_dop_auto_fallback
 
   63: required i32 num_expected_executors
 
diff --git a/fe/src/main/java/org/apache/impala/common/RuntimeEnv.java 
b/fe/src/main/java/org/apache/impala/common/RuntimeEnv.java
index 4342cf0..2b216f8 100644
--- a/fe/src/main/java/org/apache/impala/common/RuntimeEnv.java
+++ b/fe/src/main/java/org/apache/impala/common/RuntimeEnv.java
@@ -30,10 +30,6 @@ public class RuntimeEnv {
   // Indicates whether this is an environment for testing.
   private boolean isTestEnv_;
 
-  // Whether we should do the same mt_dop validation in frontend tests as in 
the Impala
-  // service.
-  private boolean enableMtDopValidation_;
-
   public RuntimeEnv() {
     reset();
   }
@@ -44,13 +40,10 @@ public class RuntimeEnv {
   public void reset() {
     numCores_ = Runtime.getRuntime().availableProcessors();
     isTestEnv_ = false;
-    enableMtDopValidation_ = false;
   }
 
   public int getNumCores() { return numCores_; }
   public void setNumCores(int numCores) { this.numCores_ = numCores; }
   public void setTestEnv(boolean v) { isTestEnv_ = v; }
   public boolean isTestEnv() { return isTestEnv_; }
-  public boolean isMtDopValidationEnabled() { return enableMtDopValidation_; }
-  public void setEnableMtDopValidation(boolean v) { enableMtDopValidation_ = 
v; }
 }
diff --git a/fe/src/main/java/org/apache/impala/planner/Planner.java 
b/fe/src/main/java/org/apache/impala/planner/Planner.java
index c02cc59..e62f6fe 100644
--- a/fe/src/main/java/org/apache/impala/planner/Planner.java
+++ b/fe/src/main/java/org/apache/impala/planner/Planner.java
@@ -125,24 +125,6 @@ public class Planner {
     invertJoins(singleNodePlan, ctx_.isSingleNodeExec());
     singleNodePlan = useNljForSingularRowBuilds(singleNodePlan, 
ctx_.getRootAnalyzer());
 
-    // Parallel plans are not supported by default for plans with table sinks 
because
-    // of issues like IMPALA-8125. We only allow such plans if 
--unlock_mt_dop=true is
-    // specified.
-    if (useParallelPlan()
-        && (!RuntimeEnv.INSTANCE.isTestEnv()
-               || RuntimeEnv.INSTANCE.isMtDopValidationEnabled())
-        && !BackendConfig.INSTANCE.isMtDopUnlocked()
-        && ctx_.hasTableSink()) {
-      if (BackendConfig.INSTANCE.mtDopAutoFallback()) {
-        // Fall back to non-dop mode. This assumes that the mt_dop value is 
only used
-        // in the distributed planning process, which should be generally true 
as long
-        // as the value isn't cached in any plan nodes.
-        ctx_.getQueryOptions().setMt_dop(0);
-      } else {
-        throw new NotImplementedException("MT_DOP not supported for DML 
statements.");
-      }
-    }
-
     singleNodePlanner.validatePlan(singleNodePlan);
 
     if (ctx_.isSingleNodeExec()) {
diff --git a/fe/src/main/java/org/apache/impala/service/BackendConfig.java 
b/fe/src/main/java/org/apache/impala/service/BackendConfig.java
index c1a5264..50ccbce 100644
--- a/fe/src/main/java/org/apache/impala/service/BackendConfig.java
+++ b/fe/src/main/java/org/apache/impala/service/BackendConfig.java
@@ -140,14 +140,6 @@ public class BackendConfig {
     return "".equals(val) ? null : val;
   }
 
-  public boolean isMtDopUnlocked() {
-    return backendCfg_.unlock_mt_dop;
-  }
-
-  public boolean mtDopAutoFallback() {
-    return backendCfg_.mt_dop_auto_fallback;
-  }
-
   public boolean recursivelyListPartitions() {
     return backendCfg_.recursively_list_partitions;
   }
diff --git a/fe/src/test/java/org/apache/impala/planner/PlannerTest.java 
b/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
index 78da229..96d3b19 100644
--- a/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
+++ b/fe/src/test/java/org/apache/impala/planner/PlannerTest.java
@@ -28,7 +28,6 @@ import org.apache.impala.catalog.FeHBaseTable;
 import org.apache.impala.catalog.HBaseColumn;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.ImpalaException;
-import org.apache.impala.common.RuntimeEnv;
 import org.apache.impala.datagenerator.HBaseTestDataRegionAssignment;
 import org.apache.impala.service.Frontend.PlanCtx;
 import org.apache.impala.testutil.TestUtils;
@@ -41,7 +40,6 @@ import org.apache.impala.thrift.TQueryCtx;
 import org.apache.impala.thrift.TQueryOptions;
 import org.apache.impala.thrift.TRuntimeFilterMode;
 import org.junit.Assert;
-import org.junit.Assume;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -682,20 +680,14 @@ public class PlannerTest extends PlannerTestBase {
 
   @Test
   public void testMtDopValidation() {
-    // Tests that queries supported with mt_dop > 0 produce a parallel plan, or
-    // throw a NotImplementedException otherwise (e.g. plan has a distributed 
join).
+    // Tests that queries planned with mt_dop > 0 produce a parallel plan.
+    // Since IMPALA-9812 was fixed all plans are supported. Previously some 
plans
+    // were rejected.
     TQueryOptions options = defaultQueryOptions();
     options.setMt_dop(3);
     options.setDisable_hdfs_num_rows_estimate(true);
     options.setExplain_level(TExplainLevel.EXTENDED);
-    try {
-      // Temporarily unset the test env such that unsupported queries with 
mt_dop > 0
-      // throw an exception. Those are otherwise allowed for testing parallel 
plans.
-      RuntimeEnv.INSTANCE.setEnableMtDopValidation(true);
-      runPlannerTestFile("mt-dop-validation", options);
-    } finally {
-      RuntimeEnv.INSTANCE.setEnableMtDopValidation(false);
-    }
+    runPlannerTestFile("mt-dop-validation", options);
   }
 
   @Test
diff --git 
a/testdata/workloads/functional-planner/queries/PlannerTest/mt-dop-validation.test
 
b/testdata/workloads/functional-planner/queries/PlannerTest/mt-dop-validation.test
index 6f7695a..e02c5f1 100644
--- 
a/testdata/workloads/functional-planner/queries/PlannerTest/mt-dop-validation.test
+++ 
b/testdata/workloads/functional-planner/queries/PlannerTest/mt-dop-validation.test
@@ -229,7 +229,7 @@ Per-Instance Resources: mem-estimate=26.00MB 
mem-reservation=8.00KB thread-reser
    tuple-ids=0 row-size=4B cardinality=unavailable
    in pipelines: 00(GETNEXT)
 ====
-# Insert not allowed.
+# Insert is allowed.
 insert into functional_parquet.alltypes partition(year,month)
 select * from functional_parquet.alltypessmall
 ---- PLAN
@@ -257,9 +257,31 @@ WRITE TO HDFS [functional_parquet.alltypes, 
OVERWRITE=false, PARTITION-KEYS=(yea
    tuple-ids=0 row-size=80B cardinality=unavailable
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
-NotImplementedException: MT_DOP not supported for DML statements.
+F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=4
+|  Per-Instance Resources: mem-estimate=1.01GB mem-reservation=12.09MB 
thread-reservation=1
+WRITE TO HDFS [functional_parquet.alltypes, OVERWRITE=false, 
PARTITION-KEYS=(year,month)]
+|  partitions=4
+|  output exprs: id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, 
float_col, double_col, date_string_col, string_col, timestamp_col, year, month
+|  mem-estimate=1.00GB mem-reservation=0B thread-reservation=0
+|
+01:SORT
+|  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  mem-estimate=12.00MB mem-reservation=12.00MB spill-buffer=2.00MB 
thread-reservation=0
+|  tuple-ids=2 row-size=80B cardinality=unavailable
+|  in pipelines: 01(GETNEXT), 00(OPEN)
+|
+00:SCAN HDFS [functional_parquet.alltypessmall, RANDOM]
+   HDFS partitions=4/4 files=4 size=14.76KB
+   stored statistics:
+     table: rows=unavailable size=unavailable
+     partitions: 0/4 rows=unavailable
+     columns missing stats: id, bool_col, tinyint_col, smallint_col, int_col, 
bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col
+   extrapolated-rows=disabled max-scan-range-rows=unavailable
+   mem-estimate=16.00MB mem-reservation=88.00KB thread-reservation=0
+   tuple-ids=0 row-size=80B cardinality=unavailable
+   in pipelines: 00(GETNEXT)
 ====
-# CTAS not allowed.
+# CTAS is allowed.
 create table ctas_mt_dop_test as select * from functional_parquet.alltypes
 ---- PLAN
 F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
@@ -270,7 +292,7 @@ WRITE TO HDFS [default.ctas_mt_dop_test, OVERWRITE=false]
 |  mem-estimate=100.00KB mem-reservation=0B thread-reservation=0
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   HDFS partitions=24/24 files=24 size=203.33KB
+   HDFS partitions=24/24 files=24 size=202.34KB
    stored statistics:
      table: rows=unavailable size=unavailable
      partitions: 0/24 rows=unavailable
@@ -280,7 +302,23 @@ WRITE TO HDFS [default.ctas_mt_dop_test, OVERWRITE=false]
    tuple-ids=0 row-size=80B cardinality=unavailable
    in pipelines: 00(GETNEXT)
 ---- PARALLELPLANS
-NotImplementedException: MT_DOP not supported for DML statements.
+F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=9
+|  Per-Instance Resources: mem-estimate=16.10MB mem-reservation=88.00KB 
thread-reservation=1
+WRITE TO HDFS [default.ctas_mt_dop_test, OVERWRITE=false]
+|  partitions=1
+|  output exprs: functional_parquet.alltypes.id, 
functional_parquet.alltypes.bool_col, functional_parquet.alltypes.tinyint_col, 
functional_parquet.alltypes.smallint_col, functional_parquet.alltypes.int_col, 
functional_parquet.alltypes.bigint_col, functional_parquet.alltypes.float_col, 
functional_parquet.alltypes.double_col, 
functional_parquet.alltypes.date_string_col, 
functional_parquet.alltypes.string_col, 
functional_parquet.alltypes.timestamp_col, functional_parquet.alltypes.year, 
func [...]
+|  mem-estimate=100.00KB mem-reservation=0B thread-reservation=0
+|
+00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
+   HDFS partitions=24/24 files=24 size=202.34KB
+   stored statistics:
+     table: rows=unavailable size=unavailable
+     partitions: 0/24 rows=unavailable
+     columns missing stats: id, bool_col, tinyint_col, smallint_col, int_col, 
bigint_col, float_col, double_col, date_string_col, string_col, timestamp_col
+   extrapolated-rows=disabled max-scan-range-rows=unavailable
+   mem-estimate=16.00MB mem-reservation=88.00KB thread-reservation=0
+   tuple-ids=0 row-size=80B cardinality=unavailable
+   in pipelines: 00(GETNEXT)
 ====
 # Single-table scan/filter/agg/topn should work.
 select count(int_col) cnt from functional_parquet.alltypes
@@ -309,7 +347,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   HDFS partitions=24/24 files=24 size=203.33KB
+   HDFS partitions=24/24 files=24 size=202.34KB
    predicates: id < CAST(10 AS INT)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -365,7 +403,7 @@ Per-Instance Resources: mem-estimate=144.00MB 
mem-reservation=34.02MB thread-res
 |  in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   HDFS partitions=24/24 files=24 size=203.33KB
+   HDFS partitions=24/24 files=24 size=202.34KB
    predicates: id < CAST(10 AS INT)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -405,7 +443,7 @@ PLAN-ROOT SINK
 |  in pipelines: 01(GETNEXT), 00(OPEN)
 |
 00:SCAN HDFS [functional_parquet.alltypes]
-   HDFS partitions=24/24 files=24 size=203.33KB
+   HDFS partitions=24/24 files=24 size=202.34KB
    predicates: id < CAST(10 AS INT)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -454,7 +492,7 @@ Per-Instance Resources: mem-estimate=10.11MB 
mem-reservation=10.00MB thread-rese
 F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=9
 Per-Instance Resources: mem-estimate=16.00MB mem-reservation=16.00KB 
thread-reservation=1
 00:SCAN HDFS [functional_parquet.alltypes, RANDOM]
-   HDFS partitions=24/24 files=24 size=203.33KB
+   HDFS partitions=24/24 files=24 size=202.34KB
    predicates: id < CAST(10 AS INT)
    stored statistics:
      table: rows=unavailable size=unavailable
@@ -523,14 +561,14 @@ PLAN-ROOT SINK
 |     in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   HDFS partitions=1/1 files=4 size=289.02MB
+   HDFS partitions=1/1 files=4 size=289.05MB
    predicates: c_custkey < CAST(10 AS BIGINT), !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderkey < CAST(5 AS BIGINT)
    predicates on o_lineitems: l_linenumber < CAST(3 AS INT)
    stored statistics:
-     table: rows=150.00K size=289.02MB
+     table: rows=150.00K size=289.05MB
      columns missing stats: c_orders
-   extrapolated-rows=disabled max-scan-range-rows=50.42K
+   extrapolated-rows=disabled max-scan-range-rows=50.72K
    parquet statistics predicates: c_custkey < CAST(10 AS BIGINT)
    parquet statistics predicates on o: o_orderkey < CAST(5 AS BIGINT)
    parquet statistics predicates on o_lineitems: l_linenumber < CAST(3 AS INT)
@@ -599,14 +637,14 @@ Per-Instance Resources: mem-estimate=104.00MB 
mem-reservation=104.00MB thread-re
 |     in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c, RANDOM]
-   HDFS partitions=1/1 files=4 size=289.02MB
+   HDFS partitions=1/1 files=4 size=289.05MB
    predicates: c_custkey < CAST(10 AS BIGINT), !empty(c.c_orders)
    predicates on o: !empty(o.o_lineitems), o_orderkey < CAST(5 AS BIGINT)
    predicates on o_lineitems: l_linenumber < CAST(3 AS INT)
    stored statistics:
-     table: rows=150.00K size=289.02MB
+     table: rows=150.00K size=289.05MB
      columns missing stats: c_orders
-   extrapolated-rows=disabled max-scan-range-rows=50.42K
+   extrapolated-rows=disabled max-scan-range-rows=50.72K
    parquet statistics predicates: c_custkey < CAST(10 AS BIGINT)
    parquet statistics predicates on o: o_orderkey < CAST(5 AS BIGINT)
    parquet statistics predicates on o_lineitems: l_linenumber < CAST(3 AS INT)
@@ -664,13 +702,13 @@ PLAN-ROOT SINK
 |     in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c]
-   HDFS partitions=1/1 files=4 size=289.02MB
+   HDFS partitions=1/1 files=4 size=289.05MB
    predicates: !empty(c.c_orders), !empty(c.c_orders)
    predicates on o1: o1.o_orderkey < CAST(5 AS BIGINT)
    stored statistics:
-     table: rows=150.00K size=289.02MB
+     table: rows=150.00K size=289.05MB
      columns missing stats: c_orders, c_orders
-   extrapolated-rows=disabled max-scan-range-rows=50.42K
+   extrapolated-rows=disabled max-scan-range-rows=50.72K
    parquet statistics predicates on o1: o1.o_orderkey < CAST(5 AS BIGINT)
    parquet dictionary predicates on o1: o1.o_orderkey < CAST(5 AS BIGINT)
    mem-estimate=88.00MB mem-reservation=16.00MB thread-reservation=0
@@ -726,13 +764,13 @@ Per-Instance Resources: mem-estimate=89.94MB 
mem-reservation=17.94MB thread-rese
 |     in pipelines: 00(GETNEXT)
 |
 00:SCAN HDFS [tpch_nested_parquet.customer c, RANDOM]
-   HDFS partitions=1/1 files=4 size=289.02MB
+   HDFS partitions=1/1 files=4 size=289.05MB
    predicates: !empty(c.c_orders), !empty(c.c_orders)
    predicates on o1: o1.o_orderkey < CAST(5 AS BIGINT)
    stored statistics:
-     table: rows=150.00K size=289.02MB
+     table: rows=150.00K size=289.05MB
      columns missing stats: c_orders, c_orders
-   extrapolated-rows=disabled max-scan-range-rows=50.42K
+   extrapolated-rows=disabled max-scan-range-rows=50.72K
    parquet statistics predicates on o1: o1.o_orderkey < CAST(5 AS BIGINT)
    parquet dictionary predicates on o1: o1.o_orderkey < CAST(5 AS BIGINT)
    mem-estimate=88.00MB mem-reservation=16.00MB thread-reservation=0
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/mt-dop-auto-fallback.test
 
b/testdata/workloads/functional-query/queries/QueryTest/mt-dop-auto-fallback.test
deleted file mode 100644
index 6b601ba..0000000
--- 
a/testdata/workloads/functional-query/queries/QueryTest/mt-dop-auto-fallback.test
+++ /dev/null
@@ -1,31 +0,0 @@
-====
----- QUERY
-# IMPALA-9099: mt_dop is now allowed for joins, so we should run with mt_dop.
-select min(l_orderkey), min(p_name)
-from tpch.lineitem join tpch.part on l_partkey = p_partkey;
----- RESULTS
-1,'almond antique blue royal burnished'
----- TYPES
-BIGINT,STRING
----- RUNTIME_PROFILE
-row_regex:.*Query Options \(set by configuration and planner\):.*MT_DOP=4.*
-====
----- QUERY
-# IMPALA-9099: mt_dop is now allowed for joins, so we should run with mt_dop.
-select min(l_orderkey), min(p_name)
-from tpch_kudu.lineitem join tpch_kudu.part on l_partkey = p_partkey;
----- RESULTS
-1,'almond antique blue royal burnished'
----- TYPES
-BIGINT,STRING
----- RUNTIME_PROFILE
-row_regex:.*Query Options \(set by configuration and planner\):.*MT_DOP=4.*
-====
----- QUERY
-create table tmp as
-select * from functional.alltypes
----- RUNTIME_PROFILE
-row_regex:.*Query Options \(set by configuration and planner\):.*MT_DOP=0.*
-row_regex:.*All 3 execution backends \(3 fragment instances\) started.*
-row_regex:.*NumScannerThreadsStarted.*
-====
diff --git a/tests/custom_cluster/test_mt_dop.py 
b/tests/custom_cluster/test_mt_dop.py
index fd48e37..098df28 100644
--- a/tests/custom_cluster/test_mt_dop.py
+++ b/tests/custom_cluster/test_mt_dop.py
@@ -46,26 +46,7 @@ class TestMtDopFlags(CustomClusterTestSuite):
   def add_test_dimensions(cls):
     super(TestMtDopFlags, cls).add_test_dimensions()
 
-  @pytest.mark.execute_serially
-  @CustomClusterTestSuite.with_args(impalad_args="--mt_dop_auto_fallback=true")
-  @SkipIfNotHdfsMinicluster.tuned_for_minicluster
-  def test_mt_dop_fallback(self, vector, unique_database):
-    """Test inserts fall back to non-mt_dop correctly.
-    TODO: IMPALA-8966: remove this test when mt_dop is enabled across the 
board.
-    """
-    vector = deepcopy(vector)
-    vector.get_value('exec_option')['mt_dop'] = 4
-    # Targeted test case that verifies that the fallback actually switches to 
the
-    # non-mt-dop plans.
-    self.run_test_case('QueryTest/mt-dop-auto-fallback', vector, 
use_db=unique_database)
-
-    # Check that the join and insert plans work as expected.
-    self.run_test_case('QueryTest/joins', vector, use_db="functional_parquet")
-    self.run_test_case('QueryTest/insert', vector, unique_database,
-        test_file_vars={'$ORIGINAL_DB': CustomClusterTestSuite
-        .get_db_name_from_format(vector.get_value('table_format'))})
-
-  @CustomClusterTestSuite.with_args(impalad_args="--unlock_mt_dop=true", 
cluster_size=1)
+  @CustomClusterTestSuite.with_args(cluster_size=1)
   def test_mt_dop_runtime_filters_one_node(self, vector):
     """Runtime filter tests, which assume 3 fragment instances, can also be 
run on a single
     node cluster to test multiple filter sources/destinations per backend."""

Reply via email to