This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 5970c82edc7 branch-3.0: [fix](filecache) fix 
load_cache_info_into_memory crash #51684 (#51904)
5970c82edc7 is described below

commit 5970c82edc72e59b07704af9bb3640f902700f08
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Thu Jun 19 16:25:09 2025 +0800

    branch-3.0: [fix](filecache) fix load_cache_info_into_memory crash #51684 
(#51904)
    
    Cherry-picked from #51684
    
    Signed-off-by: zhengyu <[email protected]>
    Co-authored-by: zhengyu <[email protected]>
---
 be/src/io/cache/block_file_cache.h | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/be/src/io/cache/block_file_cache.h 
b/be/src/io/cache/block_file_cache.h
index a64d339d081..c12b03cc998 100644
--- a/be/src/io/cache/block_file_cache.h
+++ b/be/src/io/cache/block_file_cache.h
@@ -496,7 +496,6 @@ private:
     size_t _max_query_cache_size = 0;
 
     mutable std::mutex _mutex;
-    std::unique_ptr<FileCacheStorage> _storage;
     bool _close {false};
     std::mutex _close_mtx;
     std::condition_variable _close_cv;
@@ -575,6 +574,11 @@ private:
     std::shared_ptr<bvar::LatencyRecorder> _evict_in_advance_latency_us;
     std::shared_ptr<bvar::LatencyRecorder> _recycle_keys_length_recorder;
     std::shared_ptr<bvar::LatencyRecorder> _ttl_gc_latency_us;
+    // keep _storage last so it will deconstruct first
+    // otherwise, load_cache_info_into_memory might crash
+    // coz it will use other members of BlockFileCache
+    // so join this async load thread first
+    std::unique_ptr<FileCacheStorage> _storage;
 };
 
 } // namespace doris::io


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to