danny0405 commented on code in PR #13060: URL: https://github.com/apache/hudi/pull/13060#discussion_r2021975825
########## hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/source/FileIndex.java: ########## @@ -68,25 +69,30 @@ public class FileIndex implements Serializable { private final org.apache.hadoop.conf.Configuration hadoopConf; private final PartitionPruners.PartitionPruner partitionPruner; // for partition pruning private final ColumnStatsProbe colStatsProbe; // for probing column stats - private final int dataBucket; // for bucket pruning + private final int dataBucketHashing; // for bucket pruning private List<String> partitionPaths; // cache of partition paths private final FileStatsIndex fileStatsIndex; // for data skipping + private final NumBucketsFunction numBucketsFunction; private FileIndex( StoragePath path, Configuration conf, RowType rowType, ColumnStatsProbe colStatsProbe, PartitionPruners.PartitionPruner partitionPruner, - int dataBucket) { + int dataBucketHashing, + NumBucketsFunction numBucketsFunction) { this.path = path; this.hadoopConf = HadoopConfigurations.getHadoopConf(conf); this.tableExists = StreamerUtil.tableExists(path.toString(), hadoopConf); this.metadataConfig = StreamerUtil.metadataConfig(conf); this.colStatsProbe = isDataSkippingFeasible(conf.get(FlinkOptions.READ_DATA_SKIPPING_ENABLED)) ? colStatsProbe : null; this.partitionPruner = partitionPruner; - this.dataBucket = dataBucket; Review Comment: Can we still use the bucket id here? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@hudi.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org