This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 779ca464a5eabef2f8fa82333b19cc80013b9c58
Author: Uniqueyou <[email protected]>
AuthorDate: Thu Mar 7 20:29:28 2024 +0800

    [Fix](Status) Handle returned overall Status correctly (#31692)
    
    Handle returned overall Status correctly
---
 be/src/common/status.h                             |  8 ++++++
 be/src/io/fs/benchmark/fs_benchmark_tool.cpp       | 26 +++++++++--------
 be/src/olap/compaction.cpp                         |  2 +-
 be/src/olap/rowset/beta_rowset_writer.cpp          |  4 +--
 be/src/olap/rowset/segment_creator.cpp             |  2 +-
 .../segment_v2/bloom_filter_index_writer.cpp       |  6 ++--
 be/src/olap/rowset/segment_v2/column_reader.cpp    | 33 ++++++++++++----------
 be/src/olap/rowset/segment_v2/column_reader.h      |  8 +++---
 be/src/olap/rowset/segment_v2/column_writer.cpp    |  5 ++--
 be/src/olap/rowset/segment_v2/column_writer.h      |  4 +--
 .../segment_v2/inverted_index_compaction.cpp       |  2 +-
 .../rowset/segment_v2/inverted_index_reader.cpp    |  4 +--
 be/src/olap/rowset/segment_v2/plain_page.h         |  2 +-
 be/src/olap/rowset/segment_v2/rle_page.h           |  2 +-
 be/src/olap/rowset/segment_v2/segment_writer.cpp   |  2 +-
 be/src/olap/single_replica_compaction.cpp          |  2 +-
 be/src/olap/tablet.cpp                             |  8 +++---
 be/src/olap/tablet_manager.cpp                     |  2 +-
 be/src/olap/tablet_meta.cpp                        |  5 ++--
 be/src/olap/tablet_meta.h                          |  2 +-
 be/src/olap/tablet_meta_manager.cpp                |  2 +-
 be/src/olap/task/engine_clone_task.cpp             |  4 +--
 be/src/olap/task/engine_storage_migration_task.cpp |  2 +-
 be/src/olap/task/index_builder.cpp                 |  2 +-
 be/src/pipeline/exec/aggregation_sink_operator.cpp |  2 +-
 be/src/pipeline/exec/aggregation_sink_operator.h   |  2 +-
 be/src/pipeline/exec/analytic_sink_operator.cpp    |  2 +-
 be/src/pipeline/exec/analytic_source_operator.cpp  | 17 +++++------
 be/src/pipeline/exec/analytic_source_operator.h    |  6 ++--
 be/src/pipeline/exec/datagen_operator.cpp          |  4 +--
 be/src/pipeline/exec/empty_source_operator.h       |  2 +-
 be/src/pipeline/exec/mysql_scan_operator.cpp       |  2 +-
 be/src/pipeline/exec/result_sink_operator.cpp      |  2 +-
 be/src/pipeline/exec/scan_operator.cpp             | 15 ++++------
 be/src/pipeline/exec/scan_operator.h               |  2 +-
 be/src/pipeline/exec/schema_scan_operator.cpp      |  2 +-
 be/src/pipeline/exec/union_source_operator.cpp     |  2 +-
 be/src/pipeline/exec/union_source_operator.h       |  2 +-
 be/src/pipeline/pipeline.cpp                       |  2 +-
 be/src/pipeline/pipeline_fragment_context.cpp      |  4 +--
 .../pipeline_x/pipeline_x_fragment_context.cpp     |  4 +--
 be/src/pipeline/task_scheduler.cpp                 | 12 ++++----
 be/src/runtime/buffer_control_block.cpp            | 11 +++-----
 be/src/runtime/buffer_control_block.h              |  4 +--
 be/src/runtime/result_buffer_mgr.cpp               |  8 ++----
 be/src/runtime/result_buffer_mgr.h                 |  2 +-
 be/src/vec/exec/vaggregation_node.cpp              | 19 +++++--------
 be/src/vec/exec/vaggregation_node.h                |  2 +-
 be/test/http/http_client_test.cpp                  |  2 +-
 49 files changed, 134 insertions(+), 137 deletions(-)

diff --git a/be/src/common/status.h b/be/src/common/status.h
index 356a54f934d..2c3b462b645 100644
--- a/be/src/common/status.h
+++ b/be/src/common/status.h
@@ -571,6 +571,14 @@ inline std::string Status::to_string_no_stack() const {
         }                               \
     } while (false)
 
+#define RETURN_FALSE_IF_ERROR(stmt)   \
+    do {                              \
+        Status status = (stmt);       \
+        if (UNLIKELY(!status.ok())) { \
+            return false;             \
+        }                             \
+    } while (false)
+
 /// @brief Emit a warning if @c to_call returns a bad status.
 #define WARN_IF_ERROR(to_call, warning_prefix)              \
     do {                                                    \
diff --git a/be/src/io/fs/benchmark/fs_benchmark_tool.cpp 
b/be/src/io/fs/benchmark/fs_benchmark_tool.cpp
index 9b7ec178d77..be2fb8fdef6 100644
--- a/be/src/io/fs/benchmark/fs_benchmark_tool.cpp
+++ b/be/src/io/fs/benchmark/fs_benchmark_tool.cpp
@@ -79,7 +79,7 @@ int read_conf(const std::string& conf, std::map<std::string, 
std::string>* conf_
                 std::string val = line.substr(pos + 1);
                 (*conf_map)[key] = val;
             } else {
-                std::cout << "invalid config item: " << line << std::endl;
+                std::cerr << "invalid config item: " << line << std::endl;
                 ok = false;
                 break;
             }
@@ -91,7 +91,7 @@ int read_conf(const std::string& conf, std::map<std::string, 
std::string>* conf_
             std::cout << it->first << " = " << it->second << std::endl;
         }
     } else {
-        std::cout << "failed to open conf file: " << conf << std::endl;
+        std::cerr << "failed to open conf file: " << conf << std::endl;
         return 1;
     }
     return ok ? 0 : 1;
@@ -106,7 +106,7 @@ int main(int argc, char** argv) {
     std::map<std::string, std::string> conf_map;
     int res = read_conf(conf_file, &conf_map);
     if (res != 0) {
-        std::cout << "failed to read conf from file \"conf_file\"" << 
std::endl;
+        std::cerr << "failed to read conf from file \"conf_file\"" << 
std::endl;
         return 1;
     }
 
@@ -115,10 +115,14 @@ int main(int argc, char** argv) {
 
     // init s3 write buffer pool
     std::unique_ptr<doris::ThreadPool> s3_file_upload_thread_pool;
-    static_cast<void>(doris::ThreadPoolBuilder("S3FileUploadThreadPool")
-                              .set_min_threads(num_cores)
-                              .set_max_threads(num_cores)
-                              .build(&s3_file_upload_thread_pool));
+    doris::Status st = doris::ThreadPoolBuilder("S3FileUploadThreadPool")
+                               .set_min_threads(num_cores)
+                               .set_max_threads(num_cores)
+                               .build(&s3_file_upload_thread_pool);
+    if (!st.ok()) {
+        std::cerr << "init s3 write buffer pool failed" << std::endl;
+        return 1;
+    }
 
     try {
         doris::io::MultiBenchmark multi_bm(FLAGS_fs_type, FLAGS_operation, 
std::stoi(FLAGS_threads),
@@ -126,12 +130,12 @@ int main(int argc, char** argv) {
                                            conf_map);
         doris::Status st = multi_bm.init_env();
         if (!st) {
-            std::cout << "init env failed: " << st << std::endl;
+            std::cerr << "init env failed: " << st << std::endl;
             return 1;
         }
         st = multi_bm.init_bms();
         if (!st) {
-            std::cout << "init bms failed: " << st << std::endl;
+            std::cerr << "init bms failed: " << st << std::endl;
             return 1;
         }
 
@@ -140,10 +144,10 @@ int main(int argc, char** argv) {
         benchmark::Shutdown();
 
     } catch (std::invalid_argument const& ex) {
-        std::cout << "std::invalid_argument::what(): " << ex.what() << 
std::endl;
+        std::cerr << "std::invalid_argument::what(): " << ex.what() << 
std::endl;
         return 1;
     } catch (std::out_of_range const& ex) {
-        std::cout << "std::out_of_range::what(): " << ex.what() << std::endl;
+        std::cerr << "std::out_of_range::what(): " << ex.what() << std::endl;
         return 1;
     }
     return 0;
diff --git a/be/src/olap/compaction.cpp b/be/src/olap/compaction.cpp
index c94e6c0b4d5..0ccb1c53aae 100644
--- a/be/src/olap/compaction.cpp
+++ b/be/src/olap/compaction.cpp
@@ -175,7 +175,7 @@ bool Compaction::is_rowset_tidy(std::string& pre_max_key, 
const RowsetSharedPtr&
     // check segment size
     auto beta_rowset = reinterpret_cast<BetaRowset*>(rhs.get());
     std::vector<size_t> segments_size;
-    static_cast<void>(beta_rowset->get_segments_size(&segments_size));
+    RETURN_FALSE_IF_ERROR(beta_rowset->get_segments_size(&segments_size));
     for (auto segment_size : segments_size) {
         // is segment is too small, need to do compaction
         if (segment_size < min_tidy_size) {
diff --git a/be/src/olap/rowset/beta_rowset_writer.cpp 
b/be/src/olap/rowset/beta_rowset_writer.cpp
index 278b02db4d9..88e9f8cd8d5 100644
--- a/be/src/olap/rowset/beta_rowset_writer.cpp
+++ b/be/src/olap/rowset/beta_rowset_writer.cpp
@@ -375,8 +375,8 @@ Status BetaRowsetWriter::_rename_compacted_indices(int64_t 
begin, int64_t end, u
                         ret, errno);
             }
             // Erase the origin index file cache
-            
static_cast<void>(InvertedIndexSearcherCache::instance()->erase(src_idx_path));
-            
static_cast<void>(InvertedIndexSearcherCache::instance()->erase(dst_idx_path));
+            
RETURN_IF_ERROR(InvertedIndexSearcherCache::instance()->erase(src_idx_path));
+            
RETURN_IF_ERROR(InvertedIndexSearcherCache::instance()->erase(dst_idx_path));
         }
     }
     return Status::OK();
diff --git a/be/src/olap/rowset/segment_creator.cpp 
b/be/src/olap/rowset/segment_creator.cpp
index c16d87e3daa..fd506a8373e 100644
--- a/be/src/olap/rowset/segment_creator.cpp
+++ b/be/src/olap/rowset/segment_creator.cpp
@@ -132,7 +132,7 @@ Status 
SegmentFlusher::_expand_variant_to_subcolumns(vectorized::Block& block,
         // => update_schema:   A(bigint), B(double), C(int), D(int)
         std::lock_guard<std::mutex> lock(*(_context->schema_lock));
         TabletSchemaSPtr update_schema;
-        static_cast<void>(vectorized::schema_util::get_least_common_schema(
+        RETURN_IF_ERROR(vectorized::schema_util::get_least_common_schema(
                 {_context->tablet_schema, flush_schema}, nullptr, 
update_schema));
         CHECK_GE(update_schema->num_columns(), flush_schema->num_columns())
                 << "Rowset merge schema columns count is " << 
update_schema->num_columns()
diff --git a/be/src/olap/rowset/segment_v2/bloom_filter_index_writer.cpp 
b/be/src/olap/rowset/segment_v2/bloom_filter_index_writer.cpp
index e7e3e5e7f6a..e8bab57003a 100644
--- a/be/src/olap/rowset/segment_v2/bloom_filter_index_writer.cpp
+++ b/be/src/olap/rowset/segment_v2/bloom_filter_index_writer.cpp
@@ -139,7 +139,7 @@ public:
         RETURN_IF_ERROR(bf_writer.init());
         for (auto& bf : _bfs) {
             Slice data(bf->data(), bf->size());
-            static_cast<void>(bf_writer.add(&data));
+            RETURN_IF_ERROR(bf_writer.add(&data));
         }
         RETURN_IF_ERROR(bf_writer.finish(meta->mutable_bloom_filter()));
         return Status::OK();
@@ -220,7 +220,7 @@ Status 
PrimaryKeyBloomFilterIndexWriterImpl::finish(io::FileWriter* file_writer,
     RETURN_IF_ERROR(bf_writer.init());
     for (auto& bf : _bfs) {
         Slice data(bf->data(), bf->size());
-        static_cast<void>(bf_writer.add(&data));
+        RETURN_IF_ERROR(bf_writer.add(&data));
     }
     RETURN_IF_ERROR(bf_writer.finish(meta->mutable_bloom_filter()));
     return Status::OK();
@@ -277,7 +277,7 @@ Status 
NGramBloomFilterIndexWriterImpl::finish(io::FileWriter* file_writer,
     RETURN_IF_ERROR(bf_writer.init());
     for (auto& bf : _bfs) {
         Slice data(bf->data(), bf->size());
-        static_cast<void>(bf_writer.add(&data));
+        RETURN_IF_ERROR(bf_writer.add(&data));
     }
     RETURN_IF_ERROR(bf_writer.finish(meta->mutable_bloom_filter()));
     return Status::OK();
diff --git a/be/src/olap/rowset/segment_v2/column_reader.cpp 
b/be/src/olap/rowset/segment_v2/column_reader.cpp
index d75c1ef1753..65896044dd0 100644
--- a/be/src/olap/rowset/segment_v2/column_reader.cpp
+++ b/be/src/olap/rowset/segment_v2/column_reader.cpp
@@ -306,7 +306,8 @@ Status ColumnReader::next_batch_of_zone_map(size_t* n, 
vectorized::MutableColumn
     FieldType type = _type_info->type();
     std::unique_ptr<WrapperField> min_value(WrapperField::create_by_type(type, 
_meta_length));
     std::unique_ptr<WrapperField> max_value(WrapperField::create_by_type(type, 
_meta_length));
-    _parse_zone_map_skip_null(*_segment_zone_map, min_value.get(), 
max_value.get());
+    RETURN_IF_ERROR(
+            _parse_zone_map_skip_null(*_segment_zone_map, min_value.get(), 
max_value.get()));
 
     dst->reserve(*n);
     bool is_string = is_olap_string_type(type);
@@ -346,7 +347,7 @@ bool ColumnReader::match_condition(const 
AndBlockColumnPredicate* col_predicates
     FieldType type = _type_info->type();
     std::unique_ptr<WrapperField> min_value(WrapperField::create_by_type(type, 
_meta_length));
     std::unique_ptr<WrapperField> max_value(WrapperField::create_by_type(type, 
_meta_length));
-    _parse_zone_map(*_segment_zone_map, min_value.get(), max_value.get());
+    RETURN_FALSE_IF_ERROR(_parse_zone_map(*_segment_zone_map, min_value.get(), 
max_value.get()));
 
     return _zone_map_match_condition(*_segment_zone_map, min_value.get(), 
max_value.get(),
                                      col_predicates);
@@ -361,7 +362,7 @@ bool 
ColumnReader::prune_predicates_by_zone_map(std::vector<ColumnPredicate*>& p
     FieldType type = _type_info->type();
     std::unique_ptr<WrapperField> min_value(WrapperField::create_by_type(type, 
_meta_length));
     std::unique_ptr<WrapperField> max_value(WrapperField::create_by_type(type, 
_meta_length));
-    _parse_zone_map(*_segment_zone_map, min_value.get(), max_value.get());
+    RETURN_FALSE_IF_ERROR(_parse_zone_map(*_segment_zone_map, min_value.get(), 
max_value.get()));
 
     auto pruned = false;
     for (auto it = predicates.begin(); it != predicates.end();) {
@@ -377,12 +378,12 @@ bool 
ColumnReader::prune_predicates_by_zone_map(std::vector<ColumnPredicate*>& p
     return pruned;
 }
 
-void ColumnReader::_parse_zone_map(const ZoneMapPB& zone_map, WrapperField* 
min_value_container,
-                                   WrapperField* max_value_container) const {
+Status ColumnReader::_parse_zone_map(const ZoneMapPB& zone_map, WrapperField* 
min_value_container,
+                                     WrapperField* max_value_container) const {
     // min value and max value are valid if has_not_null is true
     if (zone_map.has_not_null()) {
-        static_cast<void>(min_value_container->from_string(zone_map.min()));
-        static_cast<void>(max_value_container->from_string(zone_map.max()));
+        RETURN_IF_ERROR(min_value_container->from_string(zone_map.min()));
+        RETURN_IF_ERROR(max_value_container->from_string(zone_map.max()));
     }
     // for compatible original Cond eval logic
     if (zone_map.has_null()) {
@@ -393,21 +394,23 @@ void ColumnReader::_parse_zone_map(const ZoneMapPB& 
zone_map, WrapperField* min_
             max_value_container->set_null();
         }
     }
+    return Status::OK();
 }
 
-void ColumnReader::_parse_zone_map_skip_null(const ZoneMapPB& zone_map,
-                                             WrapperField* min_value_container,
-                                             WrapperField* 
max_value_container) const {
+Status ColumnReader::_parse_zone_map_skip_null(const ZoneMapPB& zone_map,
+                                               WrapperField* 
min_value_container,
+                                               WrapperField* 
max_value_container) const {
     // min value and max value are valid if has_not_null is true
     if (zone_map.has_not_null()) {
-        static_cast<void>(min_value_container->from_string(zone_map.min()));
-        static_cast<void>(max_value_container->from_string(zone_map.max()));
+        RETURN_IF_ERROR(min_value_container->from_string(zone_map.min()));
+        RETURN_IF_ERROR(max_value_container->from_string(zone_map.max()));
     }
 
     if (!zone_map.has_not_null()) {
         min_value_container->set_null();
         max_value_container->set_null();
     }
+    return Status::OK();
 }
 
 bool ColumnReader::_zone_map_match_condition(const ZoneMapPB& zone_map,
@@ -440,7 +443,7 @@ Status ColumnReader::_get_filtered_pages(
         if (zone_maps[i].pass_all()) {
             page_indexes->push_back(i);
         } else {
-            _parse_zone_map(zone_maps[i], min_value.get(), max_value.get());
+            RETURN_IF_ERROR(_parse_zone_map(zone_maps[i], min_value.get(), 
max_value.get()));
             if (_zone_map_match_condition(zone_maps[i], min_value.get(), 
max_value.get(),
                                           col_predicates)) {
                 bool should_read = true;
@@ -1016,7 +1019,7 @@ Status FileColumnIterator::init(const 
ColumnIteratorOptions& opts) {
         // it has bad impact on primary key query. For example, select * from 
table where pk = 1, and
         // the table has 2000 columns.
         if (dict_encoding_type == ColumnReader::UNKNOWN_DICT_ENCODING && 
opts.is_predicate_column) {
-            static_cast<void>(seek_to_ordinal(_reader->num_rows() - 1));
+            RETURN_IF_ERROR(seek_to_ordinal(_reader->num_rows() - 1));
             _is_all_dict_encoding = _page.is_dict_encoding;
             _reader->set_dict_encoding_type(_is_all_dict_encoding
                                                     ? 
ColumnReader::ALL_DICT_ENCODING
@@ -1199,7 +1202,7 @@ Status FileColumnIterator::read_by_rowids(const rowid_t* 
rowids, const size_t co
                 }
 
                 if (!is_null) {
-                    static_cast<void>(
+                    RETURN_IF_ERROR(
                             
_page.data_decoder->seek_to_position_in_page(origin_index + this_run));
                 }
 
diff --git a/be/src/olap/rowset/segment_v2/column_reader.h 
b/be/src/olap/rowset/segment_v2/column_reader.h
index 78cd0164533..edfe4fbfce7 100644
--- a/be/src/olap/rowset/segment_v2/column_reader.h
+++ b/be/src/olap/rowset/segment_v2/column_reader.h
@@ -207,11 +207,11 @@ private:
                                    WrapperField* max_value_container,
                                    const AndBlockColumnPredicate* 
col_predicates) const;
 
-    void _parse_zone_map(const ZoneMapPB& zone_map, WrapperField* 
min_value_container,
-                         WrapperField* max_value_container) const;
+    Status _parse_zone_map(const ZoneMapPB& zone_map, WrapperField* 
min_value_container,
+                           WrapperField* max_value_container) const;
 
-    void _parse_zone_map_skip_null(const ZoneMapPB& zone_map, WrapperField* 
min_value_container,
-                                   WrapperField* max_value_container) const;
+    Status _parse_zone_map_skip_null(const ZoneMapPB& zone_map, WrapperField* 
min_value_container,
+                                     WrapperField* max_value_container) const;
 
     Status _get_filtered_pages(const AndBlockColumnPredicate* col_predicates,
                                const std::vector<const ColumnPredicate*>* 
delete_predicates,
diff --git a/be/src/olap/rowset/segment_v2/column_writer.cpp 
b/be/src/olap/rowset/segment_v2/column_writer.cpp
index 2967c2a8215..99299e3ce7a 100644
--- a/be/src/olap/rowset/segment_v2/column_writer.cpp
+++ b/be/src/olap/rowset/segment_v2/column_writer.cpp
@@ -711,7 +711,7 @@ Status ScalarColumnWriter::finish_current_page() {
     data_page_footer->set_num_values(_next_rowid - _first_rowid);
     data_page_footer->set_nullmap_size(nullmap.slice().size);
     if (_new_page_callback != nullptr) {
-        
static_cast<void>(_new_page_callback->put_extra_info_in_page(data_page_footer));
+        _new_page_callback->put_extra_info_in_page(data_page_footer);
     }
     // trying to compress page body
     OwnedSlice compressed_body;
@@ -770,9 +770,8 @@ Status OffsetColumnWriter::append_data(const uint8_t** ptr, 
size_t num_rows) {
     return Status::OK();
 }
 
-Status OffsetColumnWriter::put_extra_info_in_page(DataPageFooterPB* footer) {
+void OffsetColumnWriter::put_extra_info_in_page(DataPageFooterPB* footer) {
     footer->set_next_array_item_ordinal(_next_offset);
-    return Status::OK();
 }
 
 StructColumnWriter::StructColumnWriter(
diff --git a/be/src/olap/rowset/segment_v2/column_writer.h 
b/be/src/olap/rowset/segment_v2/column_writer.h
index 77a08607df5..ae22245b14e 100644
--- a/be/src/olap/rowset/segment_v2/column_writer.h
+++ b/be/src/olap/rowset/segment_v2/column_writer.h
@@ -164,7 +164,7 @@ private:
 class FlushPageCallback {
 public:
     virtual ~FlushPageCallback() = default;
-    virtual Status put_extra_info_in_page(DataPageFooterPB* footer) { return 
Status::OK(); }
+    virtual void put_extra_info_in_page(DataPageFooterPB* footer) {}
 };
 
 // Encode one column's data into some memory slice.
@@ -280,7 +280,7 @@ public:
     Status append_data(const uint8_t** ptr, size_t num_rows) override;
 
 private:
-    Status put_extra_info_in_page(DataPageFooterPB* footer) override;
+    void put_extra_info_in_page(DataPageFooterPB* footer) override;
 
     uint64_t _next_offset;
 };
diff --git a/be/src/olap/rowset/segment_v2/inverted_index_compaction.cpp 
b/be/src/olap/rowset/segment_v2/inverted_index_compaction.cpp
index 240d10edf67..ea019ef0adf 100644
--- a/be/src/olap/rowset/segment_v2/inverted_index_compaction.cpp
+++ b/be/src/olap/rowset/segment_v2/inverted_index_compaction.cpp
@@ -92,7 +92,7 @@ Status compact_column(int32_t index_id, int src_segment_num, 
int dest_segment_nu
     }
 
     // delete temporary index_writer_path
-    static_cast<void>(fs->delete_directory(index_writer_path.c_str()));
+    RETURN_IF_ERROR(fs->delete_directory(index_writer_path.c_str()));
     return Status::OK();
 }
 } // namespace doris::segment_v2
diff --git a/be/src/olap/rowset/segment_v2/inverted_index_reader.cpp 
b/be/src/olap/rowset/segment_v2/inverted_index_reader.cpp
index b8475cbf509..c83158ed269 100644
--- a/be/src/olap/rowset/segment_v2/inverted_index_reader.cpp
+++ b/be/src/olap/rowset/segment_v2/inverted_index_reader.cpp
@@ -426,8 +426,8 @@ Status 
StringTypeInvertedIndexReader::query(OlapReaderStatistics* stats,
         // try to reuse index_searcher's directory to read null_bitmap to cache
         // to avoid open directory additionally for null_bitmap
         InvertedIndexQueryCacheHandle null_bitmap_cache_handle;
-        static_cast<void>(read_null_bitmap(&null_bitmap_cache_handle,
-                                           
(*searcher_ptr)->getReader()->directory()));
+        RETURN_IF_ERROR(read_null_bitmap(&null_bitmap_cache_handle,
+                                         
(*searcher_ptr)->getReader()->directory()));
 
         try {
             switch (query_type) {
diff --git a/be/src/olap/rowset/segment_v2/plain_page.h 
b/be/src/olap/rowset/segment_v2/plain_page.h
index d1a62dc3312..29cec0e8795 100644
--- a/be/src/olap/rowset/segment_v2/plain_page.h
+++ b/be/src/olap/rowset/segment_v2/plain_page.h
@@ -125,7 +125,7 @@ public:
 
         _parsed = true;
 
-        static_cast<void>(seek_to_position_in_page(0));
+        RETURN_IF_ERROR(seek_to_position_in_page(0));
         return Status::OK();
     }
 
diff --git a/be/src/olap/rowset/segment_v2/rle_page.h 
b/be/src/olap/rowset/segment_v2/rle_page.h
index e199f37bc5b..bdc94a7080a 100644
--- a/be/src/olap/rowset/segment_v2/rle_page.h
+++ b/be/src/olap/rowset/segment_v2/rle_page.h
@@ -180,7 +180,7 @@ public:
         _rle_decoder = RleDecoder<CppType>((uint8_t*)_data.data + 
RLE_PAGE_HEADER_SIZE,
                                            _data.size - RLE_PAGE_HEADER_SIZE, 
_bit_width);
 
-        static_cast<void>(seek_to_position_in_page(0));
+        RETURN_IF_ERROR(seek_to_position_in_page(0));
         return Status::OK();
     }
 
diff --git a/be/src/olap/rowset/segment_v2/segment_writer.cpp 
b/be/src/olap/rowset/segment_v2/segment_writer.cpp
index f054a79db50..61d9f410be5 100644
--- a/be/src/olap/rowset/segment_v2/segment_writer.cpp
+++ b/be/src/olap/rowset/segment_v2/segment_writer.cpp
@@ -666,7 +666,7 @@ Status 
SegmentWriter::fill_missing_columns(vectorized::MutableColumns& mutable_f
                 auto default_value = 
_tablet_schema->column(cids_missing[i]).default_value();
                 vectorized::ReadBuffer 
rb(const_cast<char*>(default_value.c_str()),
                                           default_value.size());
-                
static_cast<void>(old_value_block.get_by_position(i).type->from_string(
+                
RETURN_IF_ERROR(old_value_block.get_by_position(i).type->from_string(
                         rb, mutable_default_value_columns[i].get()));
             }
         }
diff --git a/be/src/olap/single_replica_compaction.cpp 
b/be/src/olap/single_replica_compaction.cpp
index f92ffb8e12e..038c3893497 100644
--- a/be/src/olap/single_replica_compaction.cpp
+++ b/be/src/olap/single_replica_compaction.cpp
@@ -572,7 +572,7 @@ Status SingleReplicaCompaction::_finish_clone(const string& 
clone_dir,
             for (auto& file : linked_success_files) {
                 paths.emplace_back(file);
             }
-            
static_cast<void>(io::global_local_filesystem()->batch_delete(paths));
+            
RETURN_IF_ERROR(io::global_local_filesystem()->batch_delete(paths));
         }
     }
     // clear clone dir
diff --git a/be/src/olap/tablet.cpp b/be/src/olap/tablet.cpp
index 8a761406ca6..bfe31843ed1 100644
--- a/be/src/olap/tablet.cpp
+++ b/be/src/olap/tablet.cpp
@@ -1599,8 +1599,8 @@ bool Tablet::do_tablet_meta_checkpoint() {
         }
         if (RowsetMetaManager::check_rowset_meta(_data_dir->get_meta(), 
tablet_uid(),
                                                  rs_meta->rowset_id())) {
-            static_cast<void>(RowsetMetaManager::remove(_data_dir->get_meta(), 
tablet_uid(),
-                                                        rs_meta->rowset_id()));
+            
RETURN_FALSE_IF_ERROR(RowsetMetaManager::remove(_data_dir->get_meta(), 
tablet_uid(),
+                                                            
rs_meta->rowset_id()));
             VLOG_NOTICE << "remove rowset id from meta store because it is 
already persistent with "
                         << "tablet meta, rowset_id=" << rs_meta->rowset_id();
         }
@@ -1615,8 +1615,8 @@ bool Tablet::do_tablet_meta_checkpoint() {
         }
         if (RowsetMetaManager::check_rowset_meta(_data_dir->get_meta(), 
tablet_uid(),
                                                  rs_meta->rowset_id())) {
-            static_cast<void>(RowsetMetaManager::remove(_data_dir->get_meta(), 
tablet_uid(),
-                                                        rs_meta->rowset_id()));
+            
RETURN_FALSE_IF_ERROR(RowsetMetaManager::remove(_data_dir->get_meta(), 
tablet_uid(),
+                                                            
rs_meta->rowset_id()));
             VLOG_NOTICE << "remove rowset id from meta store because it is 
already persistent with "
                         << "tablet meta, rowset_id=" << rs_meta->rowset_id();
         }
diff --git a/be/src/olap/tablet_manager.cpp b/be/src/olap/tablet_manager.cpp
index 6751225d86e..baafa7f3c70 100644
--- a/be/src/olap/tablet_manager.cpp
+++ b/be/src/olap/tablet_manager.cpp
@@ -969,7 +969,7 @@ Status TabletManager::load_tablet_from_dir(DataDir* store, 
TTabletId tablet_id,
     // should change tablet uid when tablet object changed
     tablet_meta->set_tablet_uid(std::move(tablet_uid));
     std::string meta_binary;
-    RETURN_IF_ERROR(tablet_meta->serialize(&meta_binary));
+    tablet_meta->serialize(&meta_binary);
     RETURN_NOT_OK_STATUS_WITH_WARN(
             load_tablet_from_meta(store, tablet_id, schema_hash, meta_binary, 
true, force, restore,
                                   true),
diff --git a/be/src/olap/tablet_meta.cpp b/be/src/olap/tablet_meta.cpp
index 1bb007ca597..e029c0b1e81 100644
--- a/be/src/olap/tablet_meta.cpp
+++ b/be/src/olap/tablet_meta.cpp
@@ -450,7 +450,7 @@ Status TabletMeta::_save_meta(DataDir* data_dir) {
     string meta_binary;
 
     auto t1 = MonotonicMicros();
-    RETURN_IF_ERROR(serialize(&meta_binary));
+    serialize(&meta_binary);
     auto t2 = MonotonicMicros();
     Status status = TabletMetaManager::save(data_dir, tablet_id(), 
schema_hash(), meta_binary);
     if (!status.ok()) {
@@ -467,7 +467,7 @@ Status TabletMeta::_save_meta(DataDir* data_dir) {
     return status;
 }
 
-Status TabletMeta::serialize(string* meta_binary) {
+void TabletMeta::serialize(string* meta_binary) {
     TabletMetaPB tablet_meta_pb;
     to_meta_pb(&tablet_meta_pb);
     if (tablet_meta_pb.partition_id() <= 0) {
@@ -484,7 +484,6 @@ Status TabletMeta::serialize(string* meta_binary) {
     if (!serialize_success) {
         LOG(FATAL) << "failed to serialize meta " << tablet_id();
     }
-    return Status::OK();
 }
 
 Status TabletMeta::deserialize(const string& meta_binary) {
diff --git a/be/src/olap/tablet_meta.h b/be/src/olap/tablet_meta.h
index a7e284b420f..69b564a3396 100644
--- a/be/src/olap/tablet_meta.h
+++ b/be/src/olap/tablet_meta.h
@@ -125,7 +125,7 @@ public:
                                                   int64_t tablet_id);
     Status save_meta(DataDir* data_dir);
 
-    Status serialize(std::string* meta_binary);
+    void serialize(std::string* meta_binary);
     Status deserialize(const std::string& meta_binary);
     void init_from_pb(const TabletMetaPB& tablet_meta_pb);
     // Init `RowsetMeta._fs` if rowset is local.
diff --git a/be/src/olap/tablet_meta_manager.cpp 
b/be/src/olap/tablet_meta_manager.cpp
index 288da455e6a..3678bea0481 100644
--- a/be/src/olap/tablet_meta_manager.cpp
+++ b/be/src/olap/tablet_meta_manager.cpp
@@ -92,7 +92,7 @@ Status TabletMetaManager::save(DataDir* store, TTabletId 
tablet_id, TSchemaHash
                                TabletMetaSharedPtr tablet_meta, const string& 
header_prefix) {
     std::string key = fmt::format("{}{}_{}", header_prefix, tablet_id, 
schema_hash);
     std::string value;
-    static_cast<void>(tablet_meta->serialize(&value));
+    tablet_meta->serialize(&value);
     if (tablet_meta->partition_id() <= 0) {
         LOG(WARNING) << "invalid partition id " << tablet_meta->partition_id() 
<< " tablet "
                      << tablet_meta->tablet_id();
diff --git a/be/src/olap/task/engine_clone_task.cpp 
b/be/src/olap/task/engine_clone_task.cpp
index 8194b2f1756..b0b2e3e3873 100644
--- a/be/src/olap/task/engine_clone_task.cpp
+++ b/be/src/olap/task/engine_clone_task.cpp
@@ -225,7 +225,7 @@ Status EngineCloneTask::_do_clone() {
         if (missed_versions.empty()) {
             LOG(INFO) << "missed version size = 0, skip clone and return 
success. tablet_id="
                       << _clone_req.tablet_id << " replica_id=" << 
_clone_req.replica_id;
-            static_cast<void>(_set_tablet_info(is_new_tablet));
+            RETURN_IF_ERROR(_set_tablet_info(is_new_tablet));
             return Status::OK();
         }
 
@@ -289,7 +289,7 @@ Status EngineCloneTask::_do_clone() {
         // clone success, delete .hdr file because tablet meta is stored in 
rocksdb
         string header_path =
                 TabletMeta::construct_header_file_path(tablet_dir, 
_clone_req.tablet_id);
-        
static_cast<void>(io::global_local_filesystem()->delete_file(header_path));
+        
RETURN_IF_ERROR(io::global_local_filesystem()->delete_file(header_path));
     }
     return _set_tablet_info(is_new_tablet);
 }
diff --git a/be/src/olap/task/engine_storage_migration_task.cpp 
b/be/src/olap/task/engine_storage_migration_task.cpp
index efa85406c69..f9227a903f4 100644
--- a/be/src/olap/task/engine_storage_migration_task.cpp
+++ b/be/src/olap/task/engine_storage_migration_task.cpp
@@ -310,7 +310,7 @@ Status EngineStorageMigrationTask::_migrate() {
 
     if (!res.ok()) {
         // we should remove the dir directly for avoid disk full of junk data, 
and it's safe to remove
-        
static_cast<void>(io::global_local_filesystem()->delete_directory(full_path));
+        
RETURN_IF_ERROR(io::global_local_filesystem()->delete_directory(full_path));
     }
     return res;
 }
diff --git a/be/src/olap/task/index_builder.cpp 
b/be/src/olap/task/index_builder.cpp
index 96f27e5d165..5b6b73f95aa 100644
--- a/be/src/olap/task/index_builder.cpp
+++ b/be/src/olap/task/index_builder.cpp
@@ -156,7 +156,7 @@ Status IndexBuilder::update_inverted_index_info() {
         
rowset_meta->set_segments_overlap(input_rowset_meta->segments_overlap());
         rowset_meta->set_rowset_state(input_rowset_meta->rowset_state());
         std::vector<KeyBoundsPB> key_bounds;
-        static_cast<void>(input_rowset->get_segments_key_bounds(&key_bounds));
+        RETURN_IF_ERROR(input_rowset->get_segments_key_bounds(&key_bounds));
         rowset_meta->set_segments_key_bounds(key_bounds);
         auto output_rowset = output_rs_writer->manual_build(rowset_meta);
         if (input_rowset_meta->has_delete_predicate()) {
diff --git a/be/src/pipeline/exec/aggregation_sink_operator.cpp 
b/be/src/pipeline/exec/aggregation_sink_operator.cpp
index 9871711be23..e718bb41495 100644
--- a/be/src/pipeline/exec/aggregation_sink_operator.cpp
+++ b/be/src/pipeline/exec/aggregation_sink_operator.cpp
@@ -801,7 +801,7 @@ Status AggSinkOperatorX::sink(doris::RuntimeState* state, 
vectorized::Block* in_
     }
     if (eos) {
         if (local_state._shared_state->spill_context.has_data) {
-            static_cast<void>(local_state.try_spill_disk(true));
+            RETURN_IF_ERROR(local_state.try_spill_disk(true));
             
RETURN_IF_ERROR(local_state._shared_state->spill_context.prepare_for_reading());
         }
         local_state._dependency->set_ready_to_read();
diff --git a/be/src/pipeline/exec/aggregation_sink_operator.h 
b/be/src/pipeline/exec/aggregation_sink_operator.h
index 8077d78d993..29dbf611698 100644
--- a/be/src/pipeline/exec/aggregation_sink_operator.h
+++ b/be/src/pipeline/exec/aggregation_sink_operator.h
@@ -245,7 +245,7 @@ protected:
             if (blocks_rows[i] == 0) {
                 /// Here write one empty block to ensure there are enough 
blocks in the file,
                 /// blocks' count should be equal with partition_count.
-                static_cast<void>(writer->write(block_to_write));
+                RETURN_IF_ERROR(writer->write(block_to_write));
                 continue;
             }
 
diff --git a/be/src/pipeline/exec/analytic_sink_operator.cpp 
b/be/src/pipeline/exec/analytic_sink_operator.cpp
index 2468f956acf..26f0b0812f7 100644
--- a/be/src/pipeline/exec/analytic_sink_operator.cpp
+++ b/be/src/pipeline/exec/analytic_sink_operator.cpp
@@ -226,7 +226,7 @@ Status AnalyticSinkOperatorX::init(const TPlanNode& tnode, 
RuntimeState* state)
 
 Status AnalyticSinkOperatorX::prepare(RuntimeState* state) {
     for (const auto& ctx : _agg_expr_ctxs) {
-        static_cast<void>(vectorized::VExpr::prepare(ctx, state, 
_child_x->row_desc()));
+        RETURN_IF_ERROR(vectorized::VExpr::prepare(ctx, state, 
_child_x->row_desc()));
     }
     if (!_partition_by_eq_expr_ctxs.empty() || 
!_order_by_eq_expr_ctxs.empty()) {
         vector<TTupleId> tuple_ids;
diff --git a/be/src/pipeline/exec/analytic_source_operator.cpp 
b/be/src/pipeline/exec/analytic_source_operator.cpp
index cb98b2e6466..d8befd152a2 100644
--- a/be/src/pipeline/exec/analytic_source_operator.cpp
+++ b/be/src/pipeline/exec/analytic_source_operator.cpp
@@ -236,7 +236,7 @@ Status AnalyticLocalState::init(RuntimeState* state, 
LocalStateInfo& info) {
             std::bind<void>(&AnalyticLocalState::_execute_for_win_func, this, 
std::placeholders::_1,
                             std::placeholders::_2, std::placeholders::_3, 
std::placeholders::_4);
 
-    RETURN_IF_CATCH_EXCEPTION(static_cast<void>(_create_agg_status()));
+    _create_agg_status();
     return Status::OK();
 }
 
@@ -248,7 +248,7 @@ void AnalyticLocalState::_reset_agg_status() {
     }
 }
 
-Status AnalyticLocalState::_create_agg_status() {
+void AnalyticLocalState::_create_agg_status() {
     for (size_t i = 0; i < _agg_functions_size; ++i) {
         try {
             _agg_functions[i]->create(
@@ -264,19 +264,17 @@ Status AnalyticLocalState::_create_agg_status() {
         }
     }
     _agg_functions_created = true;
-    return Status::OK();
 }
 
-Status AnalyticLocalState::_destroy_agg_status() {
+void AnalyticLocalState::_destroy_agg_status() {
     if (UNLIKELY(_fn_place_ptr == nullptr || !_agg_functions_created)) {
-        return Status::OK();
+        return;
     }
     for (size_t i = 0; i < _agg_functions_size; ++i) {
         _agg_functions[i]->destroy(
                 _fn_place_ptr +
                 
_parent->cast<AnalyticSourceOperatorX>()._offsets_of_aggregate_states[i]);
     }
-    return Status::OK();
 }
 
 //now is execute for lead/lag row_number/rank/dense_rank/ntile functions
@@ -417,7 +415,7 @@ void AnalyticLocalState::_update_order_by_range() {
     }
 }
 
-Status AnalyticLocalState::init_result_columns() {
+void AnalyticLocalState::init_result_columns() {
     if (!_window_end_position) {
         _result_window_columns.resize(_agg_functions_size);
         for (size_t i = 0; i < _agg_functions_size; ++i) {
@@ -425,7 +423,6 @@ Status AnalyticLocalState::init_result_columns() {
                     _agg_functions[i]->data_type()->create_column(); //return 
type
         }
     }
-    return Status::OK();
 }
 
 //calculate pos have arrive partition end, so it's needed to init next 
partition, and update the boundary of partition
@@ -536,7 +533,7 @@ Status AnalyticSourceOperatorX::get_block(RuntimeState* 
state, vectorized::Block
         }
         local_state._next_partition =
                 
local_state.init_next_partition(local_state._shared_state->found_partition_end);
-        static_cast<void>(local_state.init_result_columns());
+        local_state.init_result_columns();
         size_t current_block_rows =
                 
local_state._shared_state->input_blocks[local_state._output_block_index].rows();
         static_cast<void>(local_state._executor.get_next(current_block_rows));
@@ -558,7 +555,7 @@ Status AnalyticLocalState::close(RuntimeState* state) {
         return Status::OK();
     }
 
-    static_cast<void>(_destroy_agg_status());
+    _destroy_agg_status();
     _agg_arena_pool = nullptr;
 
     std::vector<vectorized::MutableColumnPtr> tmp_result_window_columns;
diff --git a/be/src/pipeline/exec/analytic_source_operator.h 
b/be/src/pipeline/exec/analytic_source_operator.h
index eeb790ebf94..cdfe2644f45 100644
--- a/be/src/pipeline/exec/analytic_source_operator.h
+++ b/be/src/pipeline/exec/analytic_source_operator.h
@@ -55,7 +55,7 @@ public:
     Status init(RuntimeState* state, LocalStateInfo& info) override;
     Status close(RuntimeState* state) override;
 
-    Status init_result_columns();
+    void init_result_columns();
 
     Status output_current_block(vectorized::Block* block);
 
@@ -89,8 +89,8 @@ private:
     bool _whether_need_next_partition(vectorized::BlockRowPos& 
found_partition_end);
 
     void _reset_agg_status();
-    Status _create_agg_status();
-    Status _destroy_agg_status();
+    void _create_agg_status();
+    void _destroy_agg_status();
 
     friend class AnalyticSourceOperatorX;
 
diff --git a/be/src/pipeline/exec/datagen_operator.cpp 
b/be/src/pipeline/exec/datagen_operator.cpp
index 6c1310acf5b..4fbe21f71d5 100644
--- a/be/src/pipeline/exec/datagen_operator.cpp
+++ b/be/src/pipeline/exec/datagen_operator.cpp
@@ -40,7 +40,7 @@ Status DataGenOperator::open(RuntimeState* state) {
 
 Status DataGenOperator::close(RuntimeState* state) {
     RETURN_IF_ERROR(SourceOperator::close(state));
-    static_cast<void>(_node->close(state));
+    RETURN_IF_ERROR(_node->close(state));
     return Status::OK();
 }
 
@@ -108,7 +108,7 @@ Status DataGenLocalState::close(RuntimeState* state) {
     if (_closed) {
         return Status::OK();
     }
-    static_cast<void>(_table_func->close(state));
+    RETURN_IF_ERROR(_table_func->close(state));
     return PipelineXLocalState<>::close(state);
 }
 
diff --git a/be/src/pipeline/exec/empty_source_operator.h 
b/be/src/pipeline/exec/empty_source_operator.h
index b352f7cab29..b85d2b1a2ca 100644
--- a/be/src/pipeline/exec/empty_source_operator.h
+++ b/be/src/pipeline/exec/empty_source_operator.h
@@ -74,7 +74,7 @@ public:
     Status sink(RuntimeState*, vectorized::Block*, SourceState) override { 
return Status::OK(); }
 
     Status close(RuntimeState* state) override {
-        static_cast<void>(_exec_node->close(state));
+        RETURN_IF_ERROR(_exec_node->close(state));
         return Status::OK();
     }
 
diff --git a/be/src/pipeline/exec/mysql_scan_operator.cpp 
b/be/src/pipeline/exec/mysql_scan_operator.cpp
index d032a606396..7ef6170d152 100644
--- a/be/src/pipeline/exec/mysql_scan_operator.cpp
+++ b/be/src/pipeline/exec/mysql_scan_operator.cpp
@@ -30,7 +30,7 @@ Status MysqlScanOperator::open(RuntimeState* state) {
 
 Status MysqlScanOperator::close(RuntimeState* state) {
     RETURN_IF_ERROR(SourceOperator::close(state));
-    static_cast<void>(_node->close(state));
+    RETURN_IF_ERROR(_node->close(state));
     return Status::OK();
 }
 
diff --git a/be/src/pipeline/exec/result_sink_operator.cpp 
b/be/src/pipeline/exec/result_sink_operator.cpp
index ddaf1cd4a2b..b89ce4adb2e 100644
--- a/be/src/pipeline/exec/result_sink_operator.cpp
+++ b/be/src/pipeline/exec/result_sink_operator.cpp
@@ -180,7 +180,7 @@ Status ResultSinkLocalState::close(RuntimeState* state, 
Status exec_status) {
         if (_writer) {
             _sender->update_return_rows(_writer->get_written_rows());
         }
-        static_cast<void>(_sender->close(final_status));
+        RETURN_IF_ERROR(_sender->close(final_status));
     }
     state->exec_env()->result_mgr()->cancel_at_time(
             time(nullptr) + config::result_buffer_cancelled_interval_time,
diff --git a/be/src/pipeline/exec/scan_operator.cpp 
b/be/src/pipeline/exec/scan_operator.cpp
index 2c809ffdfe3..987034a76fb 100644
--- a/be/src/pipeline/exec/scan_operator.cpp
+++ b/be/src/pipeline/exec/scan_operator.cpp
@@ -372,9 +372,8 @@ Status ScanLocalState<Derived>::_normalize_predicate(
 
             if (pdt == vectorized::VScanNode::PushDownType::UNACCEPTABLE &&
                 TExprNodeType::COMPOUND_PRED == cur_expr->node_type()) {
-                static_cast<void>(_normalize_compound_predicate(
-                        cur_expr, context, &pdt, _is_runtime_filter_predicate, 
in_predicate_checker,
-                        eq_predicate_checker));
+                _normalize_compound_predicate(cur_expr, context, &pdt, 
_is_runtime_filter_predicate,
+                                              in_predicate_checker, 
eq_predicate_checker);
                 output_expr = conjunct_expr_root; // remaining in conjunct tree
                 return Status::OK();
             }
@@ -1011,7 +1010,7 @@ Status 
ScanLocalState<Derived>::_normalize_noneq_binary_predicate(
 }
 
 template <typename Derived>
-Status ScanLocalState<Derived>::_normalize_compound_predicate(
+void ScanLocalState<Derived>::_normalize_compound_predicate(
         vectorized::VExpr* expr, vectorized::VExprContext* expr_ctx,
         vectorized::VScanNode::PushDownType* pdt, bool 
_is_runtime_filter_predicate,
         const std::function<bool(const vectorized::VExprSPtrs&,
@@ -1070,14 +1069,12 @@ Status 
ScanLocalState<Derived>::_normalize_compound_predicate(
                     _compound_value_ranges.emplace_back(active_range);
                 }
             } else if (TExprNodeType::COMPOUND_PRED == 
child_expr->node_type()) {
-                static_cast<void>(_normalize_compound_predicate(
-                        child_expr, expr_ctx, pdt, 
_is_runtime_filter_predicate,
-                        in_predicate_checker, eq_predicate_checker));
+                _normalize_compound_predicate(child_expr, expr_ctx, pdt,
+                                              _is_runtime_filter_predicate, 
in_predicate_checker,
+                                              eq_predicate_checker);
             }
         }
     }
-
-    return Status::OK();
 }
 
 template <typename Derived>
diff --git a/be/src/pipeline/exec/scan_operator.h 
b/be/src/pipeline/exec/scan_operator.h
index 958581b2f6f..7cb7b01a9a4 100644
--- a/be/src/pipeline/exec/scan_operator.h
+++ b/be/src/pipeline/exec/scan_operator.h
@@ -261,7 +261,7 @@ protected:
                                              SlotDescriptor* slot, 
ColumnValueRange<T>& range,
                                              
vectorized::VScanNode::PushDownType* pdt);
 
-    Status _normalize_compound_predicate(
+    void _normalize_compound_predicate(
             vectorized::VExpr* expr, vectorized::VExprContext* expr_ctx,
             vectorized::VScanNode::PushDownType* pdt, bool 
is_runtimer_filter_predicate,
             const std::function<bool(const vectorized::VExprSPtrs&,
diff --git a/be/src/pipeline/exec/schema_scan_operator.cpp 
b/be/src/pipeline/exec/schema_scan_operator.cpp
index 7b0b2f0ff5d..20eec02bc13 100644
--- a/be/src/pipeline/exec/schema_scan_operator.cpp
+++ b/be/src/pipeline/exec/schema_scan_operator.cpp
@@ -40,7 +40,7 @@ Status SchemaScanOperator::open(RuntimeState* state) {
 
 Status SchemaScanOperator::close(RuntimeState* state) {
     RETURN_IF_ERROR(SourceOperator::close(state));
-    static_cast<void>(_node->close(state));
+    RETURN_IF_ERROR(_node->close(state));
     return Status::OK();
 }
 
diff --git a/be/src/pipeline/exec/union_source_operator.cpp 
b/be/src/pipeline/exec/union_source_operator.cpp
index 9e15c2bd9f6..43b0fb2762b 100644
--- a/be/src/pipeline/exec/union_source_operator.cpp
+++ b/be/src/pipeline/exec/union_source_operator.cpp
@@ -71,7 +71,7 @@ Status UnionSourceOperator::pull_data(RuntimeState* state, 
vectorized::Block* bl
     } else {
         std::unique_ptr<vectorized::Block> output_block;
         int child_idx = 0;
-        static_cast<void>(_data_queue->get_block_from_queue(&output_block, 
&child_idx));
+        RETURN_IF_ERROR(_data_queue->get_block_from_queue(&output_block, 
&child_idx));
         if (!output_block) {
             return Status::OK();
         }
diff --git a/be/src/pipeline/exec/union_source_operator.h 
b/be/src/pipeline/exec/union_source_operator.h
index a70d55ab606..023e6363d48 100644
--- a/be/src/pipeline/exec/union_source_operator.h
+++ b/be/src/pipeline/exec/union_source_operator.h
@@ -118,7 +118,7 @@ public:
     }
 
     Status prepare(RuntimeState* state) override {
-        static_cast<void>(Base::prepare(state));
+        RETURN_IF_ERROR(Base::prepare(state));
         // Prepare const expr lists.
         for (const vectorized::VExprContextSPtrs& exprs : _const_expr_lists) {
             RETURN_IF_ERROR(vectorized::VExpr::prepare(exprs, state, 
_row_descriptor));
diff --git a/be/src/pipeline/pipeline.cpp b/be/src/pipeline/pipeline.cpp
index 9c9d7cd6099..8b2123bcce2 100644
--- a/be/src/pipeline/pipeline.cpp
+++ b/be/src/pipeline/pipeline.cpp
@@ -38,7 +38,7 @@ Status Pipeline::build_operators() {
     for (auto& operator_t : _operator_builders) {
         auto o = operator_t->build_operator();
         if (pre) {
-            static_cast<void>(o->set_child(pre));
+            RETURN_IF_ERROR(o->set_child(pre));
         }
         _operators.emplace_back(o);
 
diff --git a/be/src/pipeline/pipeline_fragment_context.cpp 
b/be/src/pipeline/pipeline_fragment_context.cpp
index 6a5bd87e199..d1800299559 100644
--- a/be/src/pipeline/pipeline_fragment_context.cpp
+++ b/be/src/pipeline/pipeline_fragment_context.cpp
@@ -325,7 +325,7 @@ Status PipelineFragmentContext::prepare(const 
doris::TPipelineFragmentParams& re
             auto* scan_node = static_cast<ScanNode*>(node);
             auto scan_ranges = 
find_with_default(local_params.per_node_scan_ranges, scan_node->id(),
                                                  no_scan_ranges);
-            static_cast<void>(scan_node->set_scan_ranges(_runtime_state.get(), 
scan_ranges));
+            RETURN_IF_ERROR(scan_node->set_scan_ranges(_runtime_state.get(), 
scan_ranges));
             VLOG_CRITICAL << "query " << print_id(get_query_id())
                           << " scan_node_id=" << scan_node->id()
                           << " size=" << scan_ranges.get().size();
@@ -642,7 +642,7 @@ Status PipelineFragmentContext::_build_pipelines(ExecNode* 
node, PipelinePtr cur
         } else {
             OperatorBuilderPtr builder = 
std::make_shared<EmptySourceOperatorBuilder>(
                     node->child(1)->id(), node->child(1)->row_desc(), 
node->child(1));
-            static_cast<void>(new_pipe->add_operator(builder));
+            RETURN_IF_ERROR(new_pipe->add_operator(builder));
         }
         OperatorBuilderPtr join_sink =
                 std::make_shared<HashJoinBuildSinkBuilder>(node->id(), 
join_node);
diff --git a/be/src/pipeline/pipeline_x/pipeline_x_fragment_context.cpp 
b/be/src/pipeline/pipeline_x/pipeline_x_fragment_context.cpp
index 1b717ec3c3b..01dc49d3667 100644
--- a/be/src/pipeline/pipeline_x/pipeline_x_fragment_context.cpp
+++ b/be/src/pipeline/pipeline_x/pipeline_x_fragment_context.cpp
@@ -432,7 +432,7 @@ Status 
PipelineXFragmentContext::_create_data_sink(ObjectPool* pool, const TData
             // 1. create and set the source operator of 
multi_cast_data_stream_source for new pipeline
             source_op.reset(new MultiCastDataStreamerSourceOperatorX(
                     i, pool, thrift_sink.multi_cast_stream_sink.sinks[i], 
row_desc, source_id));
-            static_cast<void>(new_pipeline->add_operator(source_op));
+            RETURN_IF_ERROR(new_pipeline->add_operator(source_op));
             // 2. create and set sink operator of data stream sender for new 
pipeline
 
             DataSinkOperatorXPtr sink_op;
@@ -441,7 +441,7 @@ Status 
PipelineXFragmentContext::_create_data_sink(ObjectPool* pool, const TData
                                               
thrift_sink.multi_cast_stream_sink.sinks[i],
                                               
thrift_sink.multi_cast_stream_sink.destinations[i]));
 
-            static_cast<void>(new_pipeline->set_sink(sink_op));
+            RETURN_IF_ERROR(new_pipeline->set_sink(sink_op));
             {
                 TDataSink* t = pool->add(new TDataSink());
                 t->stream_sink = thrift_sink.multi_cast_stream_sink.sinks[i];
diff --git a/be/src/pipeline/task_scheduler.cpp 
b/be/src/pipeline/task_scheduler.cpp
index 5032bdef6b7..e30a5b2d26b 100644
--- a/be/src/pipeline/task_scheduler.cpp
+++ b/be/src/pipeline/task_scheduler.cpp
@@ -206,12 +206,12 @@ TaskScheduler::~TaskScheduler() {
 Status TaskScheduler::start() {
     int cores = _task_queue->cores();
     // Must be mutil number of cpu cores
-    static_cast<void>(ThreadPoolBuilder(_name)
-                              .set_min_threads(cores)
-                              .set_max_threads(cores)
-                              .set_max_queue_size(0)
-                              .set_cgroup_cpu_ctl(_cgroup_cpu_ctl)
-                              .build(&_fix_thread_pool));
+    RETURN_IF_ERROR(ThreadPoolBuilder(_name)
+                            .set_min_threads(cores)
+                            .set_max_threads(cores)
+                            .set_max_queue_size(0)
+                            .set_cgroup_cpu_ctl(_cgroup_cpu_ctl)
+                            .build(&_fix_thread_pool));
     _markers.reserve(cores);
     for (size_t i = 0; i < cores; ++i) {
         _markers.push_back(std::make_unique<std::atomic<bool>>(true));
diff --git a/be/src/runtime/buffer_control_block.cpp 
b/be/src/runtime/buffer_control_block.cpp
index c2ec1831c7e..7513dc46e7b 100644
--- a/be/src/runtime/buffer_control_block.cpp
+++ b/be/src/runtime/buffer_control_block.cpp
@@ -96,7 +96,7 @@ BufferControlBlock::BufferControlBlock(const TUniqueId& id, 
int buffer_size)
 }
 
 BufferControlBlock::~BufferControlBlock() {
-    static_cast<void>(cancel());
+    cancel();
 }
 
 Status BufferControlBlock::init() {
@@ -257,7 +257,7 @@ Status BufferControlBlock::close(Status exec_status) {
     return Status::OK();
 }
 
-Status BufferControlBlock::cancel() {
+void BufferControlBlock::cancel() {
     std::unique_lock<std::mutex> l(_lock);
     _is_cancelled = true;
     _data_removal.notify_all();
@@ -266,7 +266,6 @@ Status BufferControlBlock::cancel() {
         ctx->on_failure(Status::Cancelled("Cancelled"));
     }
     _waiting_rpc.clear();
-    return Status::OK();
 }
 
 Status PipBufferControlBlock::add_batch(std::unique_ptr<TFetchDataResult>& 
result) {
@@ -292,11 +291,9 @@ Status 
PipBufferControlBlock::get_arrow_batch(std::shared_ptr<arrow::RecordBatch
     return Status::OK();
 }
 
-Status PipBufferControlBlock::cancel() {
-    RETURN_IF_ERROR(BufferControlBlock::cancel());
+void PipBufferControlBlock::cancel() {
+    BufferControlBlock::cancel();
     _update_dependency();
-
-    return Status::OK();
 }
 
 void PipBufferControlBlock::set_dependency(
diff --git a/be/src/runtime/buffer_control_block.h 
b/be/src/runtime/buffer_control_block.h
index b691b4640dd..33fd1eed724 100644
--- a/be/src/runtime/buffer_control_block.h
+++ b/be/src/runtime/buffer_control_block.h
@@ -87,7 +87,7 @@ public:
     // called because data has been read or error happened.
     Status close(Status exec_status);
     // this is called by RPC, called from coordinator
-    virtual Status cancel();
+    virtual void cancel();
 
     [[nodiscard]] const TUniqueId& fragment_id() const { return _fragment_id; }
 
@@ -152,7 +152,7 @@ public:
 
     Status get_arrow_batch(std::shared_ptr<arrow::RecordBatch>* result) 
override;
 
-    Status cancel() override;
+    void cancel() override;
 
     void set_dependency(std::shared_ptr<pipeline::Dependency> 
result_sink_dependency);
 
diff --git a/be/src/runtime/result_buffer_mgr.cpp 
b/be/src/runtime/result_buffer_mgr.cpp
index 04831fcefb2..3d96c1871b9 100644
--- a/be/src/runtime/result_buffer_mgr.cpp
+++ b/be/src/runtime/result_buffer_mgr.cpp
@@ -150,13 +150,13 @@ Status ResultBufferMgr::fetch_arrow_data(const TUniqueId& 
finst_id,
     return Status::OK();
 }
 
-Status ResultBufferMgr::cancel(const TUniqueId& query_id) {
+void ResultBufferMgr::cancel(const TUniqueId& query_id) {
     {
         std::unique_lock<std::shared_mutex> wlock(_buffer_map_lock);
         BufferMap::iterator iter = _buffer_map.find(query_id);
 
         if (_buffer_map.end() != iter) {
-            static_cast<void>(iter->second->cancel());
+            iter->second->cancel();
             _buffer_map.erase(iter);
         }
     }
@@ -169,8 +169,6 @@ Status ResultBufferMgr::cancel(const TUniqueId& query_id) {
             _arrow_schema_map.erase(arrow_schema_iter);
         }
     }
-
-    return Status::OK();
 }
 
 void ResultBufferMgr::cancel_at_time(time_t cancel_time, const TUniqueId& 
query_id) {
@@ -208,7 +206,7 @@ void ResultBufferMgr::cancel_thread() {
 
         // cancel query
         for (int i = 0; i < query_to_cancel.size(); ++i) {
-            static_cast<void>(cancel(query_to_cancel[i]));
+            cancel(query_to_cancel[i]);
         }
     } while 
(!_stop_background_threads_latch.wait_for(std::chrono::seconds(1)));
 
diff --git a/be/src/runtime/result_buffer_mgr.h 
b/be/src/runtime/result_buffer_mgr.h
index 06d13104205..e6ae0cc1042 100644
--- a/be/src/runtime/result_buffer_mgr.h
+++ b/be/src/runtime/result_buffer_mgr.h
@@ -71,7 +71,7 @@ public:
     std::shared_ptr<arrow::Schema> find_arrow_schema(const TUniqueId& 
query_id);
 
     // cancel
-    Status cancel(const TUniqueId& fragment_id);
+    void cancel(const TUniqueId& fragment_id);
 
     // cancel one query at a future time.
     void cancel_at_time(time_t cancel_time, const TUniqueId& query_id);
diff --git a/be/src/vec/exec/vaggregation_node.cpp 
b/be/src/vec/exec/vaggregation_node.cpp
index 5e0f9762668..d46aa3f5736 100644
--- a/be/src/vec/exec/vaggregation_node.cpp
+++ b/be/src/vec/exec/vaggregation_node.cpp
@@ -415,7 +415,7 @@ Status AggregationNode::alloc_resource(doris::RuntimeState* 
state) {
     // this could cause unable to get JVM
     if (_probe_expr_ctxs.empty()) {
         // _create_agg_status may acquire a lot of memory, may allocate failed 
when memory is very few
-        
RETURN_IF_CATCH_EXCEPTION(static_cast<void>(_create_agg_status(_agg_data->without_key)));
+        RETURN_IF_ERROR(_create_agg_status(_agg_data->without_key));
         _agg_data_created_without_key = true;
     }
 
@@ -509,7 +509,7 @@ Status AggregationNode::sink(doris::RuntimeState* state, 
vectorized::Block* in_b
     }
     if (eos) {
         if (_spill_context.has_data) {
-            static_cast<void>(_try_spill_disk(true));
+            RETURN_IF_ERROR(_try_spill_disk(true));
             RETURN_IF_ERROR(_spill_context.prepare_for_reading());
         }
         _can_read = true;
@@ -555,11 +555,10 @@ Status 
AggregationNode::_create_agg_status(AggregateDataPtr data) {
     return Status::OK();
 }
 
-Status AggregationNode::_destroy_agg_status(AggregateDataPtr data) {
+void AggregationNode::_destroy_agg_status(AggregateDataPtr data) {
     for (int i = 0; i < _aggregate_evaluators.size(); ++i) {
         _aggregate_evaluators[i]->function()->destroy(data + 
_offsets_of_aggregate_states[i]);
     }
-    return Status::OK();
 }
 
 Status AggregationNode::_get_without_key_result(RuntimeState* state, Block* 
block, bool* eos) {
@@ -698,7 +697,7 @@ void AggregationNode::_close_without_key() {
     //but finally call close to destory agg data, if agg data has bitmapValue
     //will be core dump, it's not initialized
     if (_agg_data_created_without_key) {
-        static_cast<void>(_destroy_agg_status(_agg_data->without_key));
+        _destroy_agg_status(_agg_data->without_key);
         _agg_data_created_without_key = false;
     }
     release_tracker();
@@ -795,7 +794,7 @@ Status AggregationNode::_reset_hash_table() {
 
                 hash_table.for_each_mapped([&](auto& mapped) {
                     if (mapped) {
-                        static_cast<void>(_destroy_agg_status(mapped));
+                        _destroy_agg_status(mapped);
                         mapped = nullptr;
                     }
                 });
@@ -1509,16 +1508,12 @@ void AggregationNode::_close_with_serialized_key() {
                 auto& data = *agg_method.hash_table;
                 data.for_each_mapped([&](auto& mapped) {
                     if (mapped) {
-                        static_cast<void>(_destroy_agg_status(mapped));
+                        _destroy_agg_status(mapped);
                         mapped = nullptr;
                     }
                 });
                 if (data.has_null_key_data()) {
-                    auto st = _destroy_agg_status(
-                            data.template 
get_null_key_data<AggregateDataPtr>());
-                    if (!st) {
-                        throw Exception(st.code(), st.to_string());
-                    }
+                    _destroy_agg_status(data.template 
get_null_key_data<AggregateDataPtr>());
                 }
             },
             _agg_data->method_variant);
diff --git a/be/src/vec/exec/vaggregation_node.h 
b/be/src/vec/exec/vaggregation_node.h
index bbb0ea62019..f74d035caef 100644
--- a/be/src/vec/exec/vaggregation_node.h
+++ b/be/src/vec/exec/vaggregation_node.h
@@ -505,7 +505,7 @@ private:
     size_t _get_hash_table_size();
 
     Status _create_agg_status(AggregateDataPtr data);
-    Status _destroy_agg_status(AggregateDataPtr data);
+    void _destroy_agg_status(AggregateDataPtr data);
 
     Status _get_without_key_result(RuntimeState* state, Block* block, bool* 
eos);
     Status _serialize_without_key(RuntimeState* state, Block* block, bool* 
eos);
diff --git a/be/test/http/http_client_test.cpp 
b/be/test/http/http_client_test.cpp
index ae56bd9712f..729a709fb93 100644
--- a/be/test/http/http_client_test.cpp
+++ b/be/test/http/http_client_test.cpp
@@ -99,7 +99,7 @@ public:
         s_server->register_handler(HEAD, "/simple_get", &s_simple_get_handler);
         s_server->register_handler(POST, "/simple_post", 
&s_simple_post_handler);
         s_server->register_handler(GET, "/not_found", &s_not_found_handler);
-        s_server->start();
+        static_cast<void>(s_server->start());
         real_port = s_server->get_real_port();
         EXPECT_NE(0, real_port);
         hostname = "http://127.0.0.1:"; + std::to_string(real_port);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to