Dandandan commented on code in PR #12269:
URL: https://github.com/apache/datafusion/pull/12269#discussion_r1740912268


##########
datafusion/physical-plan/src/aggregates/group_values/row_like.rs:
##########
@@ -0,0 +1,392 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::aggregates::group_values::GroupValues;
+use ahash::RandomState;
+use arrow::compute::cast;
+use arrow::datatypes::{
+    Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, 
UInt16Type,
+    UInt32Type, UInt64Type, UInt8Type,
+};
+use arrow::record_batch::RecordBatch;
+use arrow::row::{RowConverter, Rows, SortField};
+use arrow_array::{Array, ArrayRef};
+use arrow_schema::{DataType, SchemaRef};
+use datafusion_common::hash_utils::create_hashes;
+use datafusion_common::{internal_err, DataFusionError, Result};
+use datafusion_execution::memory_pool::proxy::{RawTableAllocExt, VecAllocExt};
+use datafusion_expr::EmitTo;
+use datafusion_physical_expr::binary_map::OutputType;
+use datafusion_physical_expr_common::group_value_row::{
+    ArrayEq, ByteGroupValueBuilderNaive, PrimitiveGroupValueBuilder,
+};
+use hashbrown::raw::RawTable;
+
+/// A [`GroupValues`] making use of [`Rows`]
+pub struct GroupValuesRowLike {
+    /// The output schema
+    schema: SchemaRef,
+
+    /// Converter for the group values
+    row_converter: RowConverter,
+
+    /// Logically maps group values to a group_index in
+    /// [`Self::group_values`] and in each accumulator
+    ///
+    /// Uses the raw API of hashbrown to avoid actually storing the
+    /// keys (group values) in the table
+    ///
+    /// keys: u64 hashes of the GroupValue
+    /// values: (hash, group_index)
+    map: RawTable<(u64, usize)>,
+
+    /// The size of `map` in bytes
+    map_size: usize,
+
+    /// The actual group by values, stored in arrow [`Row`] format.
+    /// `group_values[i]` holds the group value for group_index `i`.
+    ///
+    /// The row format is used to compare group keys quickly and store
+    /// them efficiently in memory. Quick comparison is especially
+    /// important for multi-column group keys.
+    ///
+    /// [`Row`]: arrow::row::Row
+    // group_values: Option<Rows>,
+
+    /// reused buffer to store hashes
+    hashes_buffer: Vec<u64>,
+
+    /// reused buffer to store rows
+    rows_buffer: Rows,
+
+    /// Random state for creating hashes
+    random_state: RandomState,
+    group_values_v2: Option<Vec<Box<dyn ArrayEq>>>,
+}
+
+impl GroupValuesRowLike {
+    pub fn try_new(schema: SchemaRef) -> Result<Self> {
+        let row_converter = RowConverter::new(
+            schema
+                .fields()
+                .iter()
+                .map(|f| SortField::new(f.data_type().clone()))
+                .collect(),
+        )?;
+
+        let map = RawTable::with_capacity(0);
+
+        let starting_rows_capacity = 1000;
+        let starting_data_capacity = 64 * starting_rows_capacity;
+        let rows_buffer =
+            row_converter.empty_rows(starting_rows_capacity, 
starting_data_capacity);
+        Ok(Self {
+            schema,
+            row_converter,
+            map,
+            map_size: 0,
+            // group_values: None,
+            group_values_v2: None,
+            hashes_buffer: Default::default(),
+            rows_buffer,
+            random_state: Default::default(),
+        })
+    }
+}
+
+impl GroupValues for GroupValuesRowLike {
+    fn intern(&mut self, cols: &[ArrayRef], groups: &mut Vec<usize>) -> 
Result<()> {
+        // Convert the group keys into the row format
+        // let group_rows = &mut self.rows_buffer;
+        // group_rows.clear();
+        // self.row_converter.append(group_rows, cols)?;
+        // let n_rows = group_rows.num_rows();
+
+        // let mut group_values = match self.group_values.take() {
+        //     Some(group_values) => group_values,
+        //     None => self.row_converter.empty_rows(0, 0),
+        // };
+
+        let n_rows = cols[0].len();
+        let mut group_values_v2 = match self.group_values_v2.take() {
+            Some(group_values) => group_values,
+            None => {
+                let len = cols.len();
+                let mut v = Vec::with_capacity(len);
+                // Move to `try_new`
+                for (i, f) in self.schema.fields().iter().enumerate() {
+                    match f.data_type() {
+                        &DataType::Int8 => {
+                            let b = 
PrimitiveGroupValueBuilder::<Int8Type>::new();
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::Int16 => {
+                            let b = 
PrimitiveGroupValueBuilder::<Int16Type>::new();
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::Int32 => {
+                            let b = 
PrimitiveGroupValueBuilder::<Int32Type>::new();
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::Int64 => {
+                            let b = 
PrimitiveGroupValueBuilder::<Int64Type>::new();
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::UInt8 => {
+                            let b = 
PrimitiveGroupValueBuilder::<UInt8Type>::new();
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::UInt16 => {
+                            let b = 
PrimitiveGroupValueBuilder::<UInt16Type>::new();
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::UInt32 => {
+                            let b = 
PrimitiveGroupValueBuilder::<UInt32Type>::new();
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::UInt64 => {
+                            let b = 
PrimitiveGroupValueBuilder::<UInt64Type>::new();
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::Float32 => {
+                            let b = 
PrimitiveGroupValueBuilder::<Float32Type>::new();
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::Float64 => {
+                            let b = 
PrimitiveGroupValueBuilder::<Float64Type>::new();
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::Utf8 => {
+                            let b =
+                                
ByteGroupValueBuilderNaive::<i32>::new(OutputType::Utf8);
+                            v.push(Box::new(b) as _)
+                        }
+                        &DataType::LargeUtf8 => {
+                            let b =
+                                
ByteGroupValueBuilderNaive::<i64>::new(OutputType::Utf8);
+                            v.push(Box::new(b) as _)
+                        }
+                        dt => todo!("{dt} not impl"),
+                    }
+                }
+                v
+            }
+        };
+
+        // tracks to which group each of the input rows belongs
+        groups.clear();
+
+        // 1.1 Calculate the group keys for the group values
+        let batch_hashes = &mut self.hashes_buffer;
+        batch_hashes.clear();
+        batch_hashes.resize(n_rows, 0);
+        create_hashes(cols, &self.random_state, batch_hashes)?;
+
+        for (row, &target_hash) in batch_hashes.iter().enumerate() {
+            let entry = self.map.get_mut(target_hash, |(exist_hash, 
group_idx)| {
+                // Somewhat surprisingly, this closure can be called even if 
the
+                // hash doesn't match, so check the hash first with an integer
+                // comparison first avoid the more expensive comparison with
+                // group value. https://github.com/apache/datafusion/pull/11718
+                if target_hash != *exist_hash {
+                    return false;
+                }
+                // verify that the group that we are inserting with hash is
+                // actually the same key value as the group in
+                // existing_idx  (aka group_values @ row)
+                // && group_rows.row(row) == group_values.row(*group_idx)
+
+                fn compare_equal(
+                    arry_eq: &dyn ArrayEq,
+                    lhs_row: usize,
+                    array: &ArrayRef,
+                    rhs_row: usize,
+                ) -> bool {
+                    arry_eq.equal_to(lhs_row, array, rhs_row)
+                }
+
+                for (i, group_val) in group_values_v2.iter().enumerate() {
+                    if !compare_equal(group_val.as_ref(), *group_idx, 
&cols[i], row) {

Review Comment:
   As this is called in a loop, this can be optimized/specialized for certain 
cases like: do the arrays have any nulls or not.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to