lokeshj1703 commented on code in PR #11434:
URL: https://github.com/apache/hudi/pull/11434#discussion_r1636319799
##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestRecordLevelIndexWithSQL.scala:
##########
@@ -188,4 +188,19 @@ class TestRecordLevelIndexWithSQL extends
RecordLevelIndexTestBase {
assertEquals(2,
spark.read.format("hudi").options(hudiOpts).load(dummyTablePath).filter("not_record_key_col
in ('row1', 'abc')").count())
}
-}
+
+ @Test
+ def testPrunedStoragePaths(): Unit = {
+ var hudiOpts = commonOpts
+ doWriteAndValidateDataAndRecordIndex(hudiOpts,
+ operation = DataSourceWriteOptions.INSERT_OPERATION_OPT_VAL,
+ saveMode = SaveMode.Overwrite,
+ validate = false)
+ val reloadedMetaClient = HoodieTableMetaClient.reload(metaClient)
+ val globbedPaths = basePath + "/2015/03/16," + basePath + "/2015/03/17," +
basePath + "/2016/03/15"
+ val fileIndex = new HoodieFileIndex(sparkSession, reloadedMetaClient,
Option.empty, Map("glob.paths" -> globbedPaths), includeLogFiles = true)
+ val partitionFilter: Expression = EqualTo(AttributeReference("partition",
StringType)(), Literal("2016/03/15"))
+ val prunedPaths =
fileIndex.getFileSlicesForPrunedPartitions(Seq(partitionFilter))
Review Comment:
Updated the test
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]