gstvg commented on code in PR #12116: URL: https://github.com/apache/datafusion/pull/12116#discussion_r1727606208
########## datafusion/functions/src/core/union_extract.rs: ########## @@ -0,0 +1,722 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::cmp::Ordering; +use std::sync::Arc; + +use arrow::array::{ + layout, make_array, new_empty_array, new_null_array, Array, ArrayRef, BooleanArray, + Int32Array, Scalar, UnionArray, +}; +use arrow::compute::take; +use arrow::datatypes::{DataType, FieldRef, UnionFields, UnionMode}; + +use arrow::buffer::{BooleanBuffer, MutableBuffer, NullBuffer, ScalarBuffer}; +use arrow::util::bit_util; +use datafusion_common::cast::as_union_array; +use datafusion_common::{ + exec_datafusion_err, exec_err, internal_err, ExprSchema, Result, ScalarValue, +}; +use datafusion_expr::{ColumnarValue, Expr}; +use datafusion_expr::{ScalarUDFImpl, Signature, Volatility}; + +#[derive(Debug)] +pub struct UnionExtractFun { + signature: Signature, +} + +impl Default for UnionExtractFun { + fn default() -> Self { + Self::new() + } +} + +impl UnionExtractFun { + pub fn new() -> Self { + Self { + signature: Signature::any(2, Volatility::Immutable), + } + } +} + +impl ScalarUDFImpl for UnionExtractFun { + fn as_any(&self) -> &dyn std::any::Any { + self + } + + fn name(&self) -> &str { + "union_extract" + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn return_type(&self, _: &[DataType]) -> Result<DataType> { + // should be using return_type_from_exprs and not calling the default implementation + internal_err!("union_extract should return type from exprs") + } + + fn return_type_from_exprs( + &self, + args: &[Expr], + _: &dyn ExprSchema, + arg_types: &[DataType], + ) -> Result<DataType> { + if args.len() != 2 { + return exec_err!( + "union_extract expects 2 arguments, got {} instead", + args.len() + ); + } + + let fields = if let DataType::Union(fields, _) = &arg_types[0] { + fields + } else { + return exec_err!( + "union_extract first argument must be a union, got {} instead", + arg_types[0] + ); + }; + + let field_name = if let Expr::Literal(ScalarValue::Utf8(Some(field_name))) = + &args[1] + { + field_name + } else { + return exec_err!( + "union_extract second argument must be a non-null string literal, got {} instead", + arg_types[1] + ); + }; + + let field = find_field(fields, field_name)?.1; + + Ok(field.data_type().clone()) + } + + fn invoke(&self, args: &[ColumnarValue]) -> Result<ColumnarValue> { + if args.len() != 2 { + return exec_err!( + "union_extract expects 2 arguments, got {} instead", + args.len() + ); + } + + let union = &args[0]; + + let target_name = match &args[1] { + ColumnarValue::Scalar(ScalarValue::Utf8(Some(target_name))) => Ok(target_name), + ColumnarValue::Scalar(ScalarValue::Utf8(None)) => exec_err!("union_extract second argument must be a non-null string literal, got a null instead"), + _ => exec_err!("union_extract second argument must be a non-null string literal, got {} instead", &args[1].data_type()), + }; + + match union { + ColumnarValue::Array(array) => { + let union_array = as_union_array(&array).map_err(|_| { + exec_datafusion_err!( + "union_extract first argument must be a union, got {} instead", + array.data_type() + ) + })?; + + let (fields, mode) = match union_array.data_type() { + DataType::Union(fields, mode) => (fields, mode), + _ => unreachable!(), + }; + + let target_type_id = find_field(fields, target_name?)?.0; + + match mode { + UnionMode::Sparse => { + Ok(extract_sparse(union_array, fields, target_type_id)?) + } + UnionMode::Dense => { + Ok(extract_dense(union_array, fields, target_type_id)?) + } + } + } + ColumnarValue::Scalar(ScalarValue::Union(value, fields, _)) => { + let target_name = target_name?; + let (target_type_id, target) = find_field(fields, target_name)?; + + let result = match value { + Some((type_id, value)) if target_type_id == *type_id => { + *value.clone() + } + _ => ScalarValue::try_from(target.data_type())?, + }; + + Ok(ColumnarValue::Scalar(result)) + } + other => exec_err!( + "union_extract first argument must be a union, got {} instead", + other.data_type() + ), + } + } +} + +fn find_field<'a>(fields: &'a UnionFields, name: &str) -> Result<(i8, &'a FieldRef)> { + fields + .iter() + .find(|field| field.1.name() == name) + .ok_or_else(|| exec_datafusion_err!("field {name} not found on union")) +} + +fn extract_sparse( + union_array: &UnionArray, + fields: &UnionFields, + target_type_id: i8, +) -> Result<ColumnarValue> { + let target = union_array.child(target_type_id); + + if fields.len() == 1 // case 1.1: if there is a single field, all type ids are the same, and since union doesn't have a null mask, the result array is exactly the same as it only child + || union_array.is_empty() // case 1.2: sparse union length and childrens length must match, if the union is empty, so is any children + || target.null_count() == target.len() || target.data_type().is_null() + // case 1.3: if all values of the target children are null, regardless of selected type ids, the result will also be completely null + { + Ok(ColumnarValue::Array(Arc::clone(target))) + } else { + match eq_scalar(union_array.type_ids(), target_type_id) { + // case 2: all type ids equals our target, and since unions doesn't have a null mask, the result array is exactly the same as our target + BoolValue::Scalar(true) => Ok(ColumnarValue::Array(Arc::clone(target))), + // case 3: none type_id matches our target, the result is a null array + BoolValue::Scalar(false) => { + if layout(target.data_type()).can_contain_null_mask { + // case 3.1: target array can contain a null mask + //SAFETY: The only change to the array data is the addition of a null mask, and if the target data type can contain a null mask was just checked above + let data = unsafe { + target + .into_data() + .into_builder() + .nulls(Some(NullBuffer::new_null(target.len()))) + .build_unchecked() + }; + + Ok(ColumnarValue::Array(make_array(data))) + } else { + // case 3.2: target can't contain a null mask + Ok(new_null_columnar_value(target.data_type(), target.len())) + } + } + // case 4: some but not all type_id matches our target + BoolValue::Buffer(selected) => { + if layout(target.data_type()).can_contain_null_mask { + // case 4.1: target array can contain a null mask + let nulls = match target.nulls().filter(|n| n.null_count() > 0) { + // case 4.1.1: our target child has nulls and types other than our target are selected, union the masks + // the case where n.null_count() == n.len() is cheaply handled at case 1.3 + Some(nulls) => &selected & nulls.inner(), + // case 4.1.2: target child has no nulls, but types other than our target are selected, use the selected mask as a null mask + None => selected, + }; + + //SAFETY: The only change to the array data is the addition of a null mask, and if the target data type can contain a null mask was just checked above + let data = unsafe { + assert_eq!(nulls.len(), target.len()); + + target + .into_data() + .into_builder() + .nulls(Some(nulls.into())) + .build_unchecked() + }; + + Ok(ColumnarValue::Array(make_array(data))) + } else { + // case 4.2: target can't containt a null mask, zip the values that match with a null value + Ok(ColumnarValue::Array(arrow::compute::kernels::zip::zip( + &BooleanArray::new(selected, None), + target, + &Scalar::new(new_null_array(target.data_type(), 1)), + )?)) + } + } + } + } +} + +fn extract_dense( + union_array: &UnionArray, + fields: &UnionFields, + target_type_id: i8, +) -> Result<ColumnarValue> { + let target = union_array.child(target_type_id); + let offsets = union_array.offsets().unwrap(); + + if union_array.is_empty() { + // case 1: the union is empty + if target.is_empty() { + // case 1.1: the target is also empty, do a cheap Arc::clone instead of allocating a new empty array + Ok(ColumnarValue::Array(Arc::clone(target))) + } else { + // case 1.2: the target is not empty, allocate a new empty array + Ok(ColumnarValue::Array(new_empty_array(target.data_type()))) + } + } else if target.is_empty() { + // case 2: the union is not empty but the target is, which implies that none type_id points to it. The result is a null array + Ok(new_null_columnar_value( + target.data_type(), + union_array.len(), + )) + } else if target.null_count() == target.len() || target.data_type().is_null() { + // case 3: since all values on our target are null, regardless of selected type ids and offsets, the result is a null array + match target.len().cmp(&union_array.len()) { + // case 3.1: since the target is smaller than the union, allocate a new correclty sized null array + Ordering::Less => Ok(new_null_columnar_value( + target.data_type(), + union_array.len(), + )), + // case 3.2: target equals the union len, return it direcly + Ordering::Equal => Ok(ColumnarValue::Array(Arc::clone(target))), + // case 3.3: target len is bigger than the union len, slice it + Ordering::Greater => { + Ok(ColumnarValue::Array(target.slice(0, union_array.len()))) + } + } + } else if fields.len() == 1 // case A: since there's a single field, our target, every type id must matches our target + || fields + .iter() + .filter(|(field_type_id, _)| *field_type_id != target_type_id) + .all(|(sibling_type_id, _)| union_array.child(sibling_type_id).is_empty()) + // case B: since siblings are empty, every type id must matches our target + { + // case 4: every type id matches our target + Ok(ColumnarValue::Array(extract_dense_all_selected( + union_array, + target, + offsets, + )?)) + } else { + match eq_scalar(union_array.type_ids(), target_type_id) { + // case 4C: all type ids matches our target. + // Non empty sibling without any selected value may happen after slicing the parent union, + // since only type_ids and offsets are sliced, not the children + BoolValue::Scalar(true) => Ok(ColumnarValue::Array( + extract_dense_all_selected(union_array, target, offsets)?, + )), + BoolValue::Scalar(false) => { + // case 5: none type_id matches our target, so the result array will be completely null + // Non empty target without any selected value may happen after slicing the parent union, + // since only type_ids and offsets are sliced, not the children + match (target.len().cmp(&union_array.len()), layout(target.data_type()).can_contain_null_mask) { + (Ordering::Less, _) // case 5.1A: our target is smaller than the parent union, allocate a new correclty sized null array + | (_, false) => { // case 5.1B: target array can't contain a null mask + Ok(new_null_columnar_value(target.data_type(), union_array.len())) + } + // case 5.2: target and parent union lengths are equal, and the target can contain a null mask, let's set it to a all-null null-buffer + (Ordering::Equal, true) => { + //SAFETY: The only change to the array data is the addition of a null mask, and if the target data type can contain a null mask was just checked above + let data = unsafe { + target + .into_data() + .into_builder() + .nulls(Some(NullBuffer::new_null(union_array.len()))) + .build_unchecked() + }; + + Ok(ColumnarValue::Array(make_array(data))) + } + // case 5.3: target is bigger than it's parent union and can contain a null mask, let's slice it, and set it's nulls to a all-null null-buffer + (Ordering::Greater, true) => { + //SAFETY: The only change to the array data is the addition of a null mask, and if the target data type can contain a null mask was just checked above + let data = unsafe { + target + .into_data() + .slice(0, union_array.len()) + .into_builder() + .nulls(Some(NullBuffer::new_null(union_array.len()))) + .build_unchecked() + }; + + Ok(ColumnarValue::Array(make_array(data))) + } + } + } + BoolValue::Buffer(selected) => { + //case 6: some type_ids matches our target, but not all. For selected values, take the value pointed by the offset. For unselected, take a valid null + Ok(ColumnarValue::Array(take( + target, + &Int32Array::new(offsets.clone(), Some(selected.into())), + None, + )?)) + } + } + } +} + +fn extract_dense_all_selected( + union_array: &UnionArray, + target: &Arc<dyn Array>, + offsets: &ScalarBuffer<i32>, +) -> Result<ArrayRef> { + let sequential = + target.len() - offsets[0] as usize >= union_array.len() && is_sequential(offsets); + + if sequential && target.len() == union_array.len() { + // case 1: all offsets are sequential and both lengths match, return the array directly + Ok(Arc::clone(target)) + } else if sequential && target.len() > union_array.len() { + // case 2: All offsets are sequential, but our target is bigger than our union, slice it, starting at the first offset + Ok(target.slice(offsets[0] as usize, union_array.len())) + } else { + // case 3: Since offsets are not sequential, take them from the child to a new sequential and correcly sized array + let indices = Int32Array::try_new(offsets.clone(), None)?; + + Ok(take(target, &indices, None)?) + } +} + +const EQ_SCALAR_CHUNK_SIZE: usize = 512; + Review Comment: Those `pub` items with `#[doc(hidden)] ` are there to be benchmarked, in case any reviewer wants to, but maybe it can be removed before merge. Just let me know -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org For additional commands, e-mail: github-h...@datafusion.apache.org