danny0405 commented on code in PR #13060:
URL: https://github.com/apache/hudi/pull/13060#discussion_r2021978502


##########
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieFileIndex.scala:
##########
@@ -103,6 +104,15 @@ case class HoodieFileIndex(spark: SparkSession,
     endCompletionTime = options.get(DataSourceReadOptions.END_COMMIT.key)) 
with FileIndex {
 
   @transient protected var hasPushedDownPartitionPredicates: Boolean = false
+  private val isPartitionSimpleBucketIndex = 
PartitionBucketIndexUtils.isPartitionSimpleBucketIndex(spark.sparkContext.hadoopConfiguration,
+    metaClient.getBasePath.toString)
+
+  @transient private lazy val bucketIndexSupport = if 
(isPartitionSimpleBucketIndex) {
+    val specifiedQueryInstant = 
options.get(DataSourceReadOptions.TIME_TRAVEL_AS_OF_INSTANT.key).map(HoodieSqlCommonUtils.formatQueryInstant)
+    new PartitionBucketIndexSupport(spark, metadataConfig, metaClient, 
specifiedQueryInstant)

Review Comment:
   always query from the latest hash config because there is no SI for 
reader/writers.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@hudi.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to