This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 119867310c0fb1c5d0b5a31ed3361ff40b55d200
Author: Tamas Mate <[email protected]>
AuthorDate: Mon Jan 29 15:08:41 2024 +0100

    IMPALA-12764: Fix Iceberg metadata table scan's LIMIT clause
    
    Earlier the ReachedLimit() condition was not evaluated during
    IcebergMetadataScanNode::GetNext(). This missing condition has been
    added and LIMIT is now being evaluated.
    
    Testing:
     - Added E2E test
    
    Change-Id: Iea73716fe475c8b063235d2ae971a4074b8e2a20
    Reviewed-on: http://gerrit.cloudera.org:8080/20968
    Reviewed-by: Impala Public Jenkins <[email protected]>
    Tested-by: Impala Public Jenkins <[email protected]>
---
 .../iceberg-metadata/iceberg-metadata-scan-node.cc  |  4 +++-
 .../queries/QueryTest/iceberg-metadata-tables.test  | 21 +++++++++++++++++++++
 2 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/be/src/exec/iceberg-metadata/iceberg-metadata-scan-node.cc 
b/be/src/exec/iceberg-metadata/iceberg-metadata-scan-node.cc
index 5210b7cd1..352371930 100644
--- a/be/src/exec/iceberg-metadata/iceberg-metadata-scan-node.cc
+++ b/be/src/exec/iceberg-metadata/iceberg-metadata-scan-node.cc
@@ -163,7 +163,7 @@ Status IcebergMetadataScanNode::GetNext(RuntimeState* 
state, RowBatch* row_batch
       &tuple_buffer));
   Tuple* tuple = reinterpret_cast<Tuple*>(tuple_buffer);
   tuple->Init(tuple_buffer_size);
-  while (!row_batch->AtCapacity()) {
+  while (!ReachedLimit() && !row_batch->AtCapacity()) {
     int row_idx = row_batch->AddRow();
     TupleRow* tuple_row = row_batch->GetRow(row_idx);
     tuple_row->SetTuple(0, tuple);
@@ -188,12 +188,14 @@ Status IcebergMetadataScanNode::GetNext(RuntimeState* 
state, RowBatch* row_batch
       row_batch->CommitLastRow();
       tuple = reinterpret_cast<Tuple*>(
           reinterpret_cast<uint8_t*>(tuple) + tuple_desc_->byte_size());
+      IncrementNumRowsReturned(1);
     } else {
       // Reset the null bits, everyhing else will be overwritten
       Tuple::ClearNullBits(tuple, tuple_desc_->null_bytes_offset(),
           tuple_desc_->num_null_bytes());
     }
   }
+  if (ReachedLimit()) *eos = true;
   return Status::OK();
 }
 
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-metadata-tables.test
 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-metadata-tables.test
index e678efe9a..35141225c 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-metadata-tables.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-metadata-tables.test
@@ -294,6 +294,27 @@ where snapshot_id = $OVERWRITE_SNAPSHOT_ID or snapshot_id 
= 1;
 row_regex:$OVERWRITE_SNAPSHOT_TS,$OVERWRITE_SNAPSHOT_ID,[1-9]\d*|0,true
 ---- TYPES
 TIMESTAMP,BIGINT,BIGINT,BOOLEAN
+====
+---- QUERY
+# Test LIMIT
+select snapshot_id from functional_parquet.iceberg_query_metadata.snapshots 
limit 2;
+---- RESULTS
+row_regex:[1-9]\d*|0
+row_regex:[1-9]\d*|0
+---- TYPES
+BIGINT
+====
+---- QUERY
+# Test LIMIT
+set BATCH_SIZE=1;
+select snapshot_id from functional_parquet.iceberg_query_metadata.snapshots 
limit 3;
+---- RESULTS
+row_regex:[1-9]\d*|0
+row_regex:[1-9]\d*|0
+row_regex:[1-9]\d*|0
+---- TYPES
+BIGINT
+====
 
 ####
 # Test 4 : Test joins

Reply via email to