This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new 2c476753d81 branch-2.1: [fix](cache) fix sql cache throw npe in cloud 
mode #47221 (#47267)
2c476753d81 is described below

commit 2c476753d8126a96d42eca31a80e21d32d60bc66
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Fri Jan 24 17:30:37 2025 +0800

    branch-2.1: [fix](cache) fix sql cache throw npe in cloud mode #47221 
(#47267)
    
    Cherry-picked from #47221
    
    Co-authored-by: 924060929 <lanhuaj...@selectdb.com>
---
 .../doris/common/NereidsSqlCacheManager.java       | 53 +++++++-------
 .../org/apache/doris/qe/cache/CacheAnalyzer.java   | 84 +++++++++++-----------
 2 files changed, 70 insertions(+), 67 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/common/NereidsSqlCacheManager.java 
b/fe/fe-core/src/main/java/org/apache/doris/common/NereidsSqlCacheManager.java
index 421cf575304..db9fd3fb666 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/common/NereidsSqlCacheManager.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/common/NereidsSqlCacheManager.java
@@ -224,37 +224,36 @@ public class NereidsSqlCacheManager {
     private Optional<LogicalSqlCache> tryParseSql(
             ConnectContext connectContext, String key, SqlCacheContext 
sqlCacheContext,
             UserIdentity currentUserIdentity, boolean checkUserVariable) {
-        Env env = connectContext.getEnv();
-
-        if (!tryLockTables(connectContext, env, sqlCacheContext)) {
-            return invalidateCache(key);
-        }
+        try {
+            Env env = connectContext.getEnv();
 
-        // check table and view and their columns authority
-        if (privilegeChanged(connectContext, env, sqlCacheContext)) {
-            return invalidateCache(key);
-        }
-        if (tablesOrDataChanged(env, sqlCacheContext)) {
-            return invalidateCache(key);
-        }
-        if (viewsChanged(env, sqlCacheContext)) {
-            return invalidateCache(key);
-        }
+            if (!tryLockTables(connectContext, env, sqlCacheContext)) {
+                return invalidateCache(key);
+            }
 
-        LogicalEmptyRelation whateverPlan = new LogicalEmptyRelation(new 
RelationId(0), ImmutableList.of());
-        if (nondeterministicFunctionChanged(whateverPlan, connectContext, 
sqlCacheContext)) {
-            return invalidateCache(key);
-        }
+            // check table and view and their columns authority
+            if (privilegeChanged(connectContext, env, sqlCacheContext)) {
+                return invalidateCache(key);
+            }
+            if (tablesOrDataChanged(env, sqlCacheContext)) {
+                return invalidateCache(key);
+            }
+            if (viewsChanged(env, sqlCacheContext)) {
+                return invalidateCache(key);
+            }
 
-        // table structure and data not changed, now check policy
-        if (rowPoliciesChanged(currentUserIdentity, env, sqlCacheContext)) {
-            return invalidateCache(key);
-        }
-        if (dataMaskPoliciesChanged(currentUserIdentity, env, 
sqlCacheContext)) {
-            return invalidateCache(key);
-        }
+            LogicalEmptyRelation whateverPlan = new LogicalEmptyRelation(new 
RelationId(0), ImmutableList.of());
+            if (nondeterministicFunctionChanged(whateverPlan, connectContext, 
sqlCacheContext)) {
+                return invalidateCache(key);
+            }
 
-        try {
+            // table structure and data not changed, now check policy
+            if (rowPoliciesChanged(currentUserIdentity, env, sqlCacheContext)) 
{
+                return invalidateCache(key);
+            }
+            if (dataMaskPoliciesChanged(currentUserIdentity, env, 
sqlCacheContext)) {
+                return invalidateCache(key);
+            }
             Optional<ResultSet> resultSetInFe = 
sqlCacheContext.getResultSetInFe();
 
             List<Variable> currentVariables = ImmutableList.of();
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java
index 5dd31404ad1..29a570f0868 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java
@@ -471,54 +471,58 @@ public class CacheAnalyzer {
     }
 
     private List<CacheTable> buildCacheTableList() {
-        //Check the last version time of the table
-        MetricRepo.COUNTER_QUERY_TABLE.increase(1L);
-        long olapScanNodeSize = 0;
-        long hiveScanNodeSize = 0;
-        for (ScanNode scanNode : scanNodes) {
-            if (scanNode instanceof OlapScanNode) {
-                olapScanNodeSize++;
-            } else if (scanNode instanceof HiveScanNode) {
-                hiveScanNodeSize++;
+        try {
+            //Check the last version time of the table
+            MetricRepo.COUNTER_QUERY_TABLE.increase(1L);
+            long olapScanNodeSize = 0;
+            long hiveScanNodeSize = 0;
+            for (ScanNode scanNode : scanNodes) {
+                if (scanNode instanceof OlapScanNode) {
+                    olapScanNodeSize++;
+                } else if (scanNode instanceof HiveScanNode) {
+                    hiveScanNodeSize++;
+                }
             }
-        }
-        if (olapScanNodeSize > 0) {
-            MetricRepo.COUNTER_QUERY_OLAP_TABLE.increase(1L);
-        }
-        if (hiveScanNodeSize > 0) {
-            MetricRepo.COUNTER_QUERY_HIVE_TABLE.increase(1L);
-        }
-
-        if (!(olapScanNodeSize == scanNodes.size() || hiveScanNodeSize == 
scanNodes.size())) {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("only support olap/hive table with non-federated 
query, other types are not supported now, "
-                        + "queryId {}", DebugUtil.printId(queryId));
+            if (olapScanNodeSize > 0) {
+                MetricRepo.COUNTER_QUERY_OLAP_TABLE.increase(1L);
+            }
+            if (hiveScanNodeSize > 0) {
+                MetricRepo.COUNTER_QUERY_HIVE_TABLE.increase(1L);
             }
-            return Collections.emptyList();
-        }
 
-        List<CacheTable> tblTimeList = Lists.newArrayList();
-        for (int i = 0; i < scanNodes.size(); i++) {
-            ScanNode node = scanNodes.get(i);
-            if (enablePartitionCache()
-                    && (node instanceof OlapScanNode)
-                    && ((OlapScanNode) node).getSelectedPartitionNum() > 1
-                    && selectStmt != null
-                    && selectStmt.hasGroupByClause()) {
+            if (!(olapScanNodeSize == scanNodes.size() || hiveScanNodeSize == 
scanNodes.size())) {
                 if (LOG.isDebugEnabled()) {
-                    LOG.debug("more than one partition scanned when qeury has 
agg, "
-                                    + "partition cache cannot use, queryid {}",
-                            DebugUtil.printId(queryId));
+                    LOG.debug("only support olap/hive table with non-federated 
query, "
+                            + "other types are not supported now, queryId {}", 
DebugUtil.printId(queryId));
                 }
                 return Collections.emptyList();
             }
-            CacheTable cTable = node instanceof OlapScanNode
-                    ? buildCacheTableForOlapScanNode((OlapScanNode) node)
-                    : buildCacheTableForHiveScanNode((HiveScanNode) node);
-            tblTimeList.add(cTable);
+
+            List<CacheTable> tblTimeList = Lists.newArrayList();
+            for (int i = 0; i < scanNodes.size(); i++) {
+                ScanNode node = scanNodes.get(i);
+                if (enablePartitionCache()
+                        && (node instanceof OlapScanNode)
+                        && ((OlapScanNode) node).getSelectedPartitionNum() > 1
+                        && selectStmt != null
+                        && selectStmt.hasGroupByClause()) {
+                    if (LOG.isDebugEnabled()) {
+                        LOG.debug("more than one partition scanned when qeury 
has agg, "
+                                        + "partition cache cannot use, queryid 
{}",
+                                DebugUtil.printId(queryId));
+                    }
+                    return Collections.emptyList();
+                }
+                CacheTable cTable = node instanceof OlapScanNode
+                        ? buildCacheTableForOlapScanNode((OlapScanNode) node)
+                        : buildCacheTableForHiveScanNode((HiveScanNode) node);
+                tblTimeList.add(cTable);
+            }
+            Collections.sort(tblTimeList);
+            return tblTimeList;
+        } catch (Throwable t) {
+            return new ArrayList<>();
         }
-        Collections.sort(tblTimeList);
-        return tblTimeList;
     }
 
     public InternalService.PFetchCacheResult getCacheData() throws 
UserException {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to