mertak-synnada commented on code in PR #14224: URL: https://github.com/apache/datafusion/pull/14224#discussion_r1923766886
########## datafusion/core/src/datasource/data_source.rs: ########## @@ -0,0 +1,264 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! DataSource and FileSource trait implementations + +use std::any::Any; +use std::fmt; +use std::fmt::Formatter; +use std::sync::Arc; + +use crate::datasource::listing::PartitionedFile; +use crate::datasource::physical_plan::{ + FileGroupPartitioner, FileOpener, FileScanConfig, FileStream, +}; + +use arrow_schema::SchemaRef; +use datafusion_common::Statistics; +use datafusion_execution::{SendableRecordBatchStream, TaskContext}; +use datafusion_physical_expr::{EquivalenceProperties, Partitioning}; +use datafusion_physical_expr_common::sort_expr::LexOrdering; +use datafusion_physical_plan::metrics::ExecutionPlanMetricsSet; +use datafusion_physical_plan::source::{DataSource, DataSourceExec}; +use datafusion_physical_plan::{DisplayAs, DisplayFormatType}; + +use object_store::ObjectStore; + +/// Common behaviors that every `FileSourceConfig` needs to implement. +pub trait FileSource: Send + Sync { + /// Creates a `dyn FileOpener` based on given parameters + fn create_file_opener( + &self, + object_store: datafusion_common::Result<Arc<dyn ObjectStore>>, + base_config: &FileScanConfig, + partition: usize, + ) -> datafusion_common::Result<Arc<dyn FileOpener>>; + /// Any + fn as_any(&self) -> &dyn Any; + /// Initialize new type with batch size configuration + fn with_batch_size(&self, batch_size: usize) -> Arc<dyn FileSource>; + /// Initialize new instance with a new schema + fn with_schema(&self, schema: SchemaRef) -> Arc<dyn FileSource>; + /// Initialize new instance with projection information + fn with_projection(&self, config: &FileScanConfig) -> Arc<dyn FileSource>; + /// Initialize new instance with projected statistics + fn with_statistics(&self, statistics: Statistics) -> Arc<dyn FileSource>; + /// Return execution plan metrics + fn metrics(&self) -> &ExecutionPlanMetricsSet; + /// Return projected statistics + fn statistics(&self) -> datafusion_common::Result<Statistics>; + /// Returns the file type such as Arrow, Avro, Parquet, ... + fn file_type(&self) -> FileType; + /// Format FileType specific information + fn fmt_extra(&self, _t: DisplayFormatType, _f: &mut Formatter) -> fmt::Result { + Ok(()) + } +} + +/// Determines file types +pub enum FileType { + /// Arrow File + Arrow, + /// Avro File + Avro, + /// CSV File + Csv, + /// JSON File + Json, + /// Parquet File + Parquet, +} + +impl FileType { + fn to_str(&self) -> &str { + match self { + FileType::Arrow => "arrow", + FileType::Avro => "avro", + FileType::Csv => "csv", + FileType::Json => "json", + FileType::Parquet => "parquet", + } + } + + /// Is the file type avro? + pub fn is_avro(&self) -> bool { + matches!(self, FileType::Avro) + } + + /// Is the file type csv? + pub fn is_csv(&self) -> bool { + matches!(self, FileType::Csv) + } + + /// Is the file type parquet? + pub fn is_parquet(&self) -> bool { + matches!(self, FileType::Parquet) + } +} + +/// Holds generic file configuration, and common behaviors for file sources. +/// Can be initialized with a `FileScanConfig` +/// and a `dyn FileSource` type such as `CsvConfig`, `ParquetConfig`, `AvroConfig`, etc. +#[derive(Clone)] +pub struct FileSourceConfig { + source: Arc<dyn FileSource>, + base_config: FileScanConfig, +} + +impl FileSourceConfig { + // TODO: This function should be moved into DataSourceExec once FileScanConfig and FileSourceConfig moved out of datafusion/core Review Comment: This function is created as a syntactic-sugar, to avoid repeating the lines: ``` let source = Arc::new(FileSourceConfig::new(base_config, file_source)); let exec = Arc::new(DataSourceExec::new(source)); ``` However, it feels like this needs to be in `DataSourceExec` but since these are in the `datafusion/core` currently, it's not possible to do so. But once core/physical-plan is moved into datafusion/physical-plan this code can be changed into: ``` impl DataSourceExec { pub fn from_file_config(base_config: FileScanConfig, file_source: Arc<dyn FileSource>) -> Arc<DataSourceExec> { let source = Arc::new(FileSourceConfig::new(base_config, file_source)); Arc::new(Self::new(source)) } } ``` ########## datafusion/physical-plan/src/source.rs: ########## @@ -0,0 +1,201 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::any::Any; +use std::fmt; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; + +use crate::execution_plan::{Boundedness, EmissionType}; +use crate::metrics::{ExecutionPlanMetricsSet, MetricsSet}; +use crate::{DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties}; + +use datafusion_common::config::ConfigOptions; +use datafusion_common::{Constraints, Statistics}; +use datafusion_execution::{SendableRecordBatchStream, TaskContext}; +use datafusion_physical_expr::{EquivalenceProperties, Partitioning}; +use datafusion_physical_expr_common::sort_expr::LexOrdering; + +/// Common behaviors in Data Sources for both from Files and Memory. +/// See `DataSourceExec` for physical plan implementation +pub trait DataSource: Send + Sync { Review Comment: This trait defines the common data source behaviors, which is similar to `DataSink`. The main function is `open` that creates either a `FileStream` or `MemoryStream`. ########## datafusion/physical-plan/src/source.rs: ########## @@ -0,0 +1,201 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::any::Any; +use std::fmt; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; + +use crate::execution_plan::{Boundedness, EmissionType}; +use crate::metrics::{ExecutionPlanMetricsSet, MetricsSet}; +use crate::{DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties}; + +use datafusion_common::config::ConfigOptions; +use datafusion_common::{Constraints, Statistics}; +use datafusion_execution::{SendableRecordBatchStream, TaskContext}; +use datafusion_physical_expr::{EquivalenceProperties, Partitioning}; +use datafusion_physical_expr_common::sort_expr::LexOrdering; + +/// Common behaviors in Data Sources for both from Files and Memory. +/// See `DataSourceExec` for physical plan implementation +pub trait DataSource: Send + Sync { + fn open( + &self, + partition: usize, + context: Arc<TaskContext>, + ) -> datafusion_common::Result<SendableRecordBatchStream>; + fn as_any(&self) -> &dyn Any; + fn fmt_as(&self, t: DisplayFormatType, f: &mut Formatter) -> fmt::Result; + fn repartitioned( + &self, + _target_partitions: usize, + _repartition_file_min_size: usize, + _output_ordering: Option<LexOrdering>, + ) -> datafusion_common::Result<Option<Arc<dyn DataSource>>> { + Ok(None) + } + + fn output_partitioning(&self) -> Partitioning; + fn eq_properties(&self) -> EquivalenceProperties; + fn statistics(&self) -> datafusion_common::Result<Statistics>; + fn with_fetch(&self, _limit: Option<usize>) -> Option<Arc<dyn DataSource>> { + None + } + fn fetch(&self) -> Option<usize> { + None + } + fn metrics(&self) -> ExecutionPlanMetricsSet { + ExecutionPlanMetricsSet::new() + } +} + +impl Debug for dyn DataSource { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "DataSource: ") + } +} + +/// Unified data source for file formats like JSON, CSV, AVRO, ARROW, PARQUET +#[derive(Clone, Debug)] +pub struct DataSourceExec { Review Comment: `DataSourceExec` is responsible for calculating properties, managing cache, and using `source` to get source-related information and create a stream. The repartition function is separated into two, DataSourceExec handles the part of `ExecutionPlan` and passes the rest to the source definition. ########## docs/source/library-user-guide/custom-table-providers.md: ########## @@ -89,7 +89,7 @@ This: 2. Constructs the individual output arrays (columns) 3. Returns a `MemoryStream` of a single `RecordBatch` with the arrays -I.e. returns the "physical" data. For other examples, refer to the [`CsvExec`][csv] and [`ParquetExec`][parquet] for more complex implementations. +I.e. returns the "physical" data. For other examples, refer to the [`CsvConfig`][csv] and [`ParquetConfig`][parquet] for more complex implementations. Review Comment: Note: There are some links in this documentation that need to be updated after this PR is merged. ``` [ex]: https://github.com/apache/datafusion/blob/a5e86fae3baadbd99f8fd0df83f45fde22f7b0c6/datafusion-examples/examples/custom_datasource.rs#L214C1-L276 [csv]: https://github.com/apache/datafusion/blob/a5e86fae3baadbd99f8fd0df83f45fde22f7b0c6/datafusion/core/src/datasource/physical_plan/csv.rs#L57-L70 [parquet]: https://github.com/apache/datafusion/blob/a5e86fae3baadbd99f8fd0df83f45fde22f7b0c6/datafusion/core/src/datasource/physical_plan/parquet.rs#L77-L104 ``` ########## datafusion/physical-plan/src/memory.rs: ########## @@ -168,38 +137,41 @@ impl ExecutionPlan for MemoryExec { } } -impl MemoryExec { - /// Create a new execution plan for reading in-memory record batches +impl MemorySourceConfig { + /// Create a new `MemorySourceConfig` for reading in-memory record batches /// The provided `schema` should not have the projection applied. pub fn try_new( partitions: &[Vec<RecordBatch>], schema: SchemaRef, projection: Option<Vec<usize>>, ) -> Result<Self> { let projected_schema = project_schema(&schema, projection.as_ref())?; - let constraints = Constraints::empty(); - let cache = Self::compute_properties( - Arc::clone(&projected_schema), - &[], - constraints, - partitions, - ); Ok(Self { partitions: partitions.to_vec(), schema, projected_schema, projection, sort_information: vec![], - cache, show_sizes: true, }) } + /// Create a new `DataSourceExec` plan for reading in-memory record batches + /// The provided `schema` should not have the projection applied. + pub fn try_new_exec( Review Comment: Same as FileSourceConfig::new_exec -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org For additional commands, e-mail: github-h...@datafusion.apache.org