lokeshj1703 commented on code in PR #13591:
URL: https://github.com/apache/hudi/pull/13591#discussion_r2231050650


##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestHoodieBackedMetadata.java:
##########
@@ -1466,6 +1466,7 @@ private void 
verifyMetadataMergedRecords(HoodieTableMetaClient metadataMetaClien
         .withRequestedSchema(schema)
         .withDataSchema(schema)
         .withProps(new TypedProperties())
+        
.withEnableOptimizedLogBlockScan(writeConfig.enableOptimizedLogBlocksScan())

Review Comment:
   This needs to be metadata config since we are reading the MDT records here



##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java:
##########
@@ -1846,7 +1846,7 @@ public List<HoodieColumnRangeMetadata<Comparable>> 
getSortedColumnStatsList(Stri
             String filePath = new StoragePath(storagePartitionPath, 
filename).toString();
             try {
               return getLogFileColumnRangeMetadata(filePath, partitionPath, 
metaClient, allColumnNameList, Option.of(readerSchema),
-                  metadataConfig.getMaxReaderBufferSize())
+                  metadataConfig.getMaxReaderBufferSize(), 
metadataConfig.isOptimizedLogBlocksScanEnabled())

Review Comment:
   This should be write config



##########
hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java:
##########
@@ -3110,6 +3133,7 @@ private static TypedProperties 
getFileGroupReaderPropertiesFromStorageConf(Stora
         storageConf.getEnum(SPILLABLE_DISK_MAP_TYPE.key(), 
SPILLABLE_DISK_MAP_TYPE.defaultValue()).toString());
     properties.setProperty(DISK_MAP_BITCASK_COMPRESSION_ENABLED.key(),
         
Boolean.toString(storageConf.getBoolean(DISK_MAP_BITCASK_COMPRESSION_ENABLED.key(),
 DISK_MAP_BITCASK_COMPRESSION_ENABLED.defaultValue())));
+    
properties.setProperty(HoodieReaderConfig.ENABLE_OPTIMIZED_LOG_BLOCKS_SCAN.key(),
 HoodieReaderConfig.ENABLE_OPTIMIZED_LOG_BLOCKS_SCAN.defaultValue());

Review Comment:
   We should fetch from storage config here?



##########
hudi-hadoop-common/src/test/java/org/apache/hudi/metadata/TestHoodieTableMetadataUtil.java:
##########
@@ -273,7 +281,8 @@ public void testGetLogFileColumnRangeMetadata() throws 
Exception {
             metaClient,
             columnsToIndex,
             
Option.of(HoodieTestDataGenerator.AVRO_SCHEMA_WITH_METADATA_FIELDS),
-            HoodieMetadataConfig.MAX_READER_BUFFER_SIZE_PROP.defaultValue());
+            HoodieMetadataConfig.MAX_READER_BUFFER_SIZE_PROP.defaultValue(),
+            metadataConfig.isOptimizedLogBlocksScanEnabled());

Review Comment:
   We need to update this to not use metadata config



##########
hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java:
##########
@@ -2563,7 +2583,8 @@ public static HoodieData<HoodieRecord> 
convertFilesToPartitionStatsRecords(Hoodi
                                                                              
HoodieMetadataConfig metadataConfig,
                                                                              
HoodieTableMetaClient dataTableMetaClient,
                                                                              
Lazy<Option<Schema>> lazyWriterSchemaOpt,
-                                                                             
Option<HoodieRecordType> recordTypeOpt) {
+                                                                             
Option<HoodieRecordType> recordTypeOpt,
+                                                                             
boolean enableOptimizeLogBlocksScan) {

Review Comment:
   Should we add a similar parameter for `readRecordKeysFromFileSlices`?



##########
hudi-spark-datasource/hudi-spark-common/src/test/java/org/apache/hudi/testutils/LogFileColStatsTestUtil.java:
##########
@@ -45,8 +46,13 @@ public static Option<Row> 
getLogFileColumnRangeMetadata(String filePath, HoodieT
                                                           int maxBufferSize) 
throws IOException {
     if (writerSchemaOpt.isPresent()) {
       String partitionPath = 
FSUtils.getRelativePartitionPath(datasetMetaClient.getBasePath(), new 
StoragePath(filePath).getParent());
+      // Create metadata config from table properties
+      HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder()
+          .fromProperties(datasetMetaClient.getTableConfig().getProps())
+          .build();
       List<HoodieColumnRangeMetadata<Comparable>> columnRangeMetadataList =
-          HoodieTableMetadataUtil.getLogFileColumnRangeMetadata(filePath, 
partitionPath, datasetMetaClient, columnsToIndex, writerSchemaOpt, 
maxBufferSize);
+          HoodieTableMetadataUtil.getLogFileColumnRangeMetadata(filePath, 
partitionPath, datasetMetaClient, columnsToIndex, writerSchemaOpt,
+                  maxBufferSize, 
metadataConfig.isOptimizedLogBlocksScanEnabled());

Review Comment:
   This should be write config



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to