comphead commented on code in PR #17562:
URL: https://github.com/apache/datafusion/pull/17562#discussion_r2349448414
##########
datafusion/physical-plan/src/joins/nested_loop_join.rs:
##########
@@ -1232,11 +1235,49 @@ impl NestedLoopJoinStream {
// and push the result into output_buffer
// ========
+ // Special case:
+ // When the right batch is very small, join with multiple left rows at
once,
+ //
+ // The regular implementation is not efficient if the plan's right
child is
+ // very small (e.g. 1 row total), because inside the inner loop of
NLJ, it's
+ // handling one input right batch at once, if it's not large enough,
the
+ // overheads like filter evalaution can't be amortized through
vectorization.
+ debug_assert_ne!(
+ right_batch.num_rows(),
+ 0,
+ "When fetching the right batch, empty batches will be skipped"
+ );
+ if (self.batch_size / right_batch.num_rows()) > 10 {
+ // Calculate max left rows to handle at once. This operator tries
to handle
+ // up to `datafusion.execution.batch_size` rows at once in the
intermediate
+ // batch.
+ let l_row_count = self.batch_size / right_batch.num_rows();
+ let l_row_count = std::cmp::min(
+ l_row_count,
+ left_data.batch().num_rows() - self.left_probe_idx,
+ );
Review Comment:
```suggestion
let l_row_cnt_ratio = self.batch_size / right_batch.num_rows();
if (l_row_cnt_ratio > 10 {
// Calculate max left rows to handle at once. This operator
tries to handle
// up to `datafusion.execution.batch_size` rows at once in the
intermediate
// batch.
let l_row_count = std::cmp::min(
l_row_cnt_ratio,
left_data.batch().num_rows() - self.left_probe_idx,
);
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]