b4l commented on code in PR #471: URL: https://github.com/apache/sedona-db/pull/471#discussion_r2695461419
########## rust/sedona-pointcloud/src/laz/opener.rs: ########## @@ -0,0 +1,130 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::sync::Arc; + +use datafusion_common::{error::DataFusionError, pruning::PrunableStatistics}; +use datafusion_datasource::{ + file_meta::FileMeta, + file_stream::{FileOpenFuture, FileOpener}, + PartitionedFile, +}; +use datafusion_physical_expr::PhysicalExpr; +use datafusion_pruning::PruningPredicate; +use futures::StreamExt; + +use crate::laz::{ + options::LazTableOptions, + reader::{LazFileReader, LazFileReaderFactory}, + schema::schema_from_header, +}; + +pub struct LazOpener { + /// Column indexes in `table_schema` needed by the query + pub projection: Arc<[usize]>, + /// Optional limit on the number of rows to read + pub limit: Option<usize>, + pub predicate: Option<Arc<dyn PhysicalExpr>>, + /// Factory for instantiating laz reader + pub laz_file_reader_factory: Arc<LazFileReaderFactory>, + /// Table options + pub options: LazTableOptions, +} + +impl FileOpener for LazOpener { + fn open( + &self, + _file_meta: FileMeta, + file: PartitionedFile, + ) -> Result<FileOpenFuture, DataFusionError> { + let projection = self.projection.clone(); + let limit = self.limit; + + let predicate = self.predicate.clone(); + + let laz_reader: Box<LazFileReader> = self + .laz_file_reader_factory + .create_reader(file.clone(), self.options.clone())?; + + Ok(Box::pin(async move { + let metadata = laz_reader.get_metadata().await?; + let schema = Arc::new(schema_from_header( + &metadata.header, + laz_reader.options.point_encoding, + laz_reader.options.extra_bytes, + )); + + let pruning_predicate = predicate.and_then(|physical_expr| { + PruningPredicate::try_new(physical_expr, schema.clone()).ok() + }); + + // file pruning + if let Some(pruning_predicate) = &pruning_predicate { + if let Some(statistics) = file.statistics { + let prunable_statistics = PrunableStatistics::new(vec![statistics], schema); + if let Ok(filter) = pruning_predicate.prune(&prunable_statistics) { + if !filter[0] { + return Ok(futures::stream::empty().boxed()); + } + } + } + } + + // map chunk table + let chunk_table: Vec<_> = metadata + .chunk_table + .iter() + .filter(|chunk_meta| { + file.range.as_ref().is_none_or(|range| { + let offset = chunk_meta.byte_range.start; + offset >= range.start as u64 && offset < range.end as u64 + }) + }) + .cloned() + .collect(); + + let mut row_count = 0; + + let stream = async_stream::try_stream! { + for chunk_meta in chunk_table.into_iter() { + // limit + if let Some(limit) = limit { + if row_count >= limit { + break; + } + } + + // fetch batch + let record_batch = laz_reader.get_batch(&chunk_meta).await?; Review Comment: Hmm not sure this applies here. This is already a part of a partitioned stream, so all cores should be working. And I am very suspicious about data remaining in l2 across operations in datafusion in general, even when the size is right given there is no explicit fusion and execution is driven by tokio. I might be wrong though here, it's just a hunch. Regarding the bytes range partitioning, I however made the experience that for pointclouds, randomly splitting the points across as many files as you have cores gives a certain performance boost as all partions always contribute the same amount to the result set. Otherwise round robin is superior to byte ranges as there is almost always a certain order (even if it is just scanlines) and thus the workload and result set is more evenly distributed among partitions for selective queries that can end up in a single byte range. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
