This is an automated email from the ASF dual-hosted git repository.

michaelsmith pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit f0b9340983ea65a36c575f04db893881ee48609e
Author: Riza Suminto <[email protected]>
AuthorDate: Tue Apr 4 17:44:41 2023 -0700

    IMPALA-12041: Select first executor group if query not auto-scalable
    
    In multiple executor groups setup, some trivial queries like "select 1;"
    fail admission with "No mapping found for request" error message. This
    patch fixes a bug where the Frontend does not set group name prefix when
    query is not auto-scalable. In cases like trivial query run, correct
    executor group name prefix is still needed for backend to correctly
    resolve the target pool.
    
    Testing:
    - Pass test_executor_groups.py
    
    Change-Id: I89497c8f67bfd176c2b60fa1b70fe53f905bbab0
    Reviewed-on: http://gerrit.cloudera.org:8080/19691
    Reviewed-by: Impala Public Jenkins <[email protected]>
    Tested-by: Impala Public Jenkins <[email protected]>
---
 .../java/org/apache/impala/service/Frontend.java   | 62 ++++++++++++++--------
 tests/custom_cluster/test_executor_groups.py       | 30 ++++++++++-
 2 files changed, 67 insertions(+), 25 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java 
b/fe/src/main/java/org/apache/impala/service/Frontend.java
index 48f089a22..01e568149 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -2085,19 +2085,6 @@ public class Frontend {
         }
       }
 
-      // If it is for a single node plan, enable_replan is disabled, or it is 
not a query
-      // that can be auto scaled, return the 1st plan generated.
-      if (queryOptions.num_nodes == 1) {
-        reason = "the number of nodes is 1";
-        break;
-      } else if (!enable_replan) {
-        reason = "query option 'enable_replan' is false";
-        break;
-      } else if (!Frontend.canStmtBeAutoScaled(req.stmt_type)) {
-        reason = "query is not auto-scalable";
-        break;
-      }
-
       // Counters about this group set.
       int available_cores = expectedTotalCores(group_set);
       String profileName = "Executor group " + (i + 1);
@@ -2110,6 +2097,31 @@ public class Frontend {
           new TCounter(MEMORY_MAX, TUnit.BYTES,
               LongMath.saturatedMultiply(
                   expectedNumExecutor(group_set), 
group_set.getMax_mem_limit())));
+      if (ProcessingCost.isComputeCost(queryOptions)) {
+        addCounter(groupSetProfile, new TCounter(CPU_MAX, TUnit.UNIT, 
available_cores));
+      }
+
+      // If it is for a single node plan, enable_replan is disabled, or it is 
not a query
+      // that can be auto scaled, return the 1st plan generated.
+      boolean notScalable = false;
+      if (queryOptions.num_nodes == 1) {
+        reason = "the number of nodes is 1";
+        notScalable = true;
+      } else if (!enable_replan) {
+        reason = "query option ENABLE_REPLAN=false";
+        notScalable = true;
+      } else if (!Frontend.canStmtBeAutoScaled(req.stmt_type)) {
+        reason = "query is not auto-scalable";
+        notScalable = true;
+      }
+
+      if (notScalable) {
+        setGroupNamePrefix(default_executor_group, req, group_set);
+        addInfoString(
+            groupSetProfile, VERDICT, "Assign to first group because " + 
reason);
+        FrontendProfile.getCurrent().addChildrenProfile(groupSetProfile);
+        break;
+      }
 
       // Find out the per host memory estimated from two possible sources.
       long per_host_mem_estimate = -1;
@@ -2140,7 +2152,6 @@ public class Frontend {
                 cores_requirement / 
BackendConfig.INSTANCE.getQueryCpuCountDivisor()));
         cpuReqSatisfied = scaled_cores_requirement <= available_cores;
 
-        addCounter(groupSetProfile, new TCounter(CPU_MAX, TUnit.UNIT, 
available_cores));
         addCounter(
             groupSetProfile, new TCounter(CPU_ASK, TUnit.UNIT, 
scaled_cores_requirement));
         addCounter(groupSetProfile,
@@ -2204,15 +2215,7 @@ public class Frontend {
       FrontendProfile.getCurrent().addChildrenProfile(groupSetProfile);
 
       if (matchFound) {
-        // Set the group name prefix in both the returned query options and
-        // the query context for non default group setup.
-        if (!default_executor_group) {
-          String namePrefix = group_set.getExec_group_name_prefix();
-          req.query_options.setRequest_pool(namePrefix);
-          if (req.query_exec_request != null) {
-            req.query_exec_request.query_ctx.setRequest_pool(namePrefix);
-          }
-        }
+        setGroupNamePrefix(default_executor_group, req, group_set);
         break;
       }
 
@@ -2263,6 +2266,19 @@ public class Frontend {
     return req;
   }
 
+  private static void setGroupNamePrefix(
+      boolean default_executor_group, TExecRequest req, TExecutorGroupSet 
group_set) {
+    // Set the group name prefix in both the returned query options and
+    // the query context for non default group setup.
+    if (!default_executor_group) {
+      String namePrefix = group_set.getExec_group_name_prefix();
+      req.query_options.setRequest_pool(namePrefix);
+      if (req.query_exec_request != null) {
+        req.query_exec_request.query_ctx.setRequest_pool(namePrefix);
+      }
+    }
+  }
+
   private static TRuntimeProfileNode createTRuntimeProfileNode(
       String childrenProfileName) {
     return new TRuntimeProfileNode(childrenProfileName,
diff --git a/tests/custom_cluster/test_executor_groups.py 
b/tests/custom_cluster/test_executor_groups.py
index 739de16c7..0cdbce478 100644
--- a/tests/custom_cluster/test_executor_groups.py
+++ b/tests/custom_cluster/test_executor_groups.py
@@ -928,12 +928,38 @@ class TestExecutorGroups(CustomClusterTestSuite):
         ["Executor Group: root.small-group", "ExecutorGroupsConsidered: 2",
           "Verdict: Match", "CpuAsk: 4", "CpuAskUnbounded: 1"])
 
+    # ENABLE_REPLAN=false should force query to run in tiny group.
+    self.execute_query_expect_success(self.client, "SET ENABLE_REPLAN=false;")
+    self._run_query_and_verify_profile(CPU_TEST_QUERY, CPU_DOP_OPTIONS,
+        ["Executor Group: root.tiny-group", "ExecutorGroupsConsidered: 1",
+         "Verdict: Assign to first group because query option 
ENABLE_REPLAN=false"])
+    self.execute_query_expect_success(self.client, "SET ENABLE_REPLAN='';")
+
+    # Trivial query should be assigned to tiny group by Frontend.
+    # Backend may decide to run it in coordinator only.
+    self._run_query_and_verify_profile("SELECT 1", CPU_DOP_OPTIONS,
+        ["Executor Group: empty group (using coordinator only)",
+         "ExecutorGroupsConsidered: 1",
+         "Verdict: Assign to first group because the number of nodes is 1"])
+
+    # CREATE/DROP database should work and assigned to tiny group.
+    self._run_query_and_verify_profile(
+        "CREATE DATABASE test_non_scalable_query;", CPU_DOP_OPTIONS,
+        ["ExecutorGroupsConsidered: 1",
+         "Verdict: Assign to first group because query is not auto-scalable"],
+        ["Executor Group:"])
+    self._run_query_and_verify_profile(
+        "DROP DATABASE test_non_scalable_query;", CPU_DOP_OPTIONS,
+        ["ExecutorGroupsConsidered: 1",
+         "Verdict: Assign to first group because query is not auto-scalable"],
+        ["Executor Group:"])
+
     # Check resource pools on the Web queries site and admission site
     self._verify_query_num_for_resource_pool("root.small", 2)
-    self._verify_query_num_for_resource_pool("root.tiny", 1)
+    self._verify_query_num_for_resource_pool("root.tiny", 3)
     self._verify_query_num_for_resource_pool("root.large", 2)
     self._verify_total_admitted_queries("root.small", 2)
-    self._verify_total_admitted_queries("root.tiny", 1)
+    self._verify_total_admitted_queries("root.tiny", 3)
     self._verify_total_admitted_queries("root.large", 2)
     self.client.close()
 

Reply via email to