This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new fcf5fba0402 branch-3.0: [fix](file cache) Fix 
BlockFileCache::get_stats #50584 (#50630)
fcf5fba0402 is described below

commit fcf5fba0402297adba42fe84bb89503dddda6d4d
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Wed May 7 10:10:48 2025 +0800

    branch-3.0: [fix](file cache) Fix BlockFileCache::get_stats #50584 (#50630)
    
    Cherry-picked from #50584
    
    Co-authored-by: Wen Zhenghu <[email protected]>
---
 be/src/io/cache/block_file_cache.cpp | 6 +++---
 be/src/olap/options.cpp              | 4 ++++
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/be/src/io/cache/block_file_cache.cpp 
b/be/src/io/cache/block_file_cache.cpp
index d38e9670f2f..fd88ca74ae5 100644
--- a/be/src/io/cache/block_file_cache.cpp
+++ b/be/src/io/cache/block_file_cache.cpp
@@ -2130,7 +2130,7 @@ std::map<std::string, double> BlockFileCache::get_stats() 
{
     stats["hits_ratio_1h"] = (double)_hit_ratio_1h->get_value();
 
     stats["index_queue_max_size"] = (double)_index_queue.get_max_size();
-    stats["index_queue_curr_size"] = 
(double)_cur_index_queue_element_count_metrics->get_value();
+    stats["index_queue_curr_size"] = 
(double)_cur_index_queue_cache_size_metrics->get_value();
     stats["index_queue_max_elements"] = 
(double)_index_queue.get_max_element_size();
     stats["index_queue_curr_elements"] =
             (double)_cur_index_queue_element_count_metrics->get_value();
@@ -2142,14 +2142,14 @@ std::map<std::string, double> 
BlockFileCache::get_stats() {
             
(double)_cur_ttl_cache_lru_queue_element_count_metrics->get_value();
 
     stats["normal_queue_max_size"] = (double)_normal_queue.get_max_size();
-    stats["normal_queue_curr_size"] = 
(double)_cur_normal_queue_element_count_metrics->get_value();
+    stats["normal_queue_curr_size"] = 
(double)_cur_normal_queue_cache_size_metrics->get_value();
     stats["normal_queue_max_elements"] = 
(double)_normal_queue.get_max_element_size();
     stats["normal_queue_curr_elements"] =
             (double)_cur_normal_queue_element_count_metrics->get_value();
 
     stats["disposable_queue_max_size"] = 
(double)_disposable_queue.get_max_size();
     stats["disposable_queue_curr_size"] =
-            (double)_cur_disposable_queue_element_count_metrics->get_value();
+            (double)_cur_disposable_queue_cache_size_metrics->get_value();
     stats["disposable_queue_max_elements"] = 
(double)_disposable_queue.get_max_element_size();
     stats["disposable_queue_curr_elements"] =
             (double)_cur_disposable_queue_element_count_metrics->get_value();
diff --git a/be/src/olap/options.cpp b/be/src/olap/options.cpp
index 6e4cb61e3d0..cbdbb220de3 100644
--- a/be/src/olap/options.cpp
+++ b/be/src/olap/options.cpp
@@ -240,6 +240,8 @@ Status parse_conf_cache_paths(const std::string& 
config_path, std::vector<CacheP
                 total_size = value.GetInt64();
             } else {
                 total_size = 0;
+                LOG(WARNING) << "[FileCache] the value of " << 
CACHE_TOTAL_SIZE.c_str()
+                             << " is not int64: " << value.GetString() << " , 
use 0 as default";
             }
         }
         if (config::enable_file_cache_query_limit) {
@@ -249,6 +251,8 @@ Status parse_conf_cache_paths(const std::string& 
config_path, std::vector<CacheP
                     query_limit_bytes = value.GetInt64();
                 } else {
                     query_limit_bytes = 0;
+                    LOG(WARNING) << "[FileCache] the value of " << 
CACHE_QUERY_LIMIT_SIZE.c_str()
+                                 << " is not int64: " << value.GetString() << 
" , use 0 as default";
                 }
             }
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to