lokeshj1703 commented on code in PR #13526:
URL: https://github.com/apache/hudi/pull/13526#discussion_r2230929092
##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestCOWDataSource.scala:
##########
@@ -142,6 +141,48 @@ class TestCOWDataSource extends HoodieSparkClientTestBase
with ScalaAssertionSup
spark.read.format("org.apache.hudi").options(readOpts).load(basePath).count()
}
+ @Test
+ def testMultipleOrderingFields() {
+ val (writeOpts, readOpts) = getWriterReaderOpts(HoodieRecordType.AVRO)
+
+ // Insert Operation
+ var records = recordsToStrings(dataGen.generateInserts("002",
100)).asScala.toList
+ var inputDF = spark.read.json(spark.sparkContext.parallelize(records, 2))
+
+ val commonOptsWithMultipleOrderingFields = writeOpts ++ Map(
+ "hoodie.insert.shuffle.parallelism" -> "4",
+ "hoodie.upsert.shuffle.parallelism" -> "4",
+ DataSourceWriteOptions.RECORDKEY_FIELD.key -> "_row_key",
+ DataSourceWriteOptions.PARTITIONPATH_FIELD.key -> "partition",
+ DataSourceWriteOptions.PRECOMBINE_FIELD.key() -> "timestamp,rider",
Review Comment:
Added a test case for it.
##########
hudi-hadoop-common/src/test/java/org/apache/hudi/common/table/log/block/TestHoodieDeleteBlock.java:
##########
@@ -100,7 +100,7 @@ public static Stream<Arguments> orderingValueParams() {
{new String[] {"val1", "val2", "val3", null}},
{new Timestamp[] {new Timestamp(1690766971000L), new
Timestamp(1672536571000L)}},
// {new LocalDate[] {LocalDate.of(2023, 1, 1), LocalDate.of(1980,
7, 1)}} // HUDI-8854
- {new BigDecimal[] {new BigDecimal("12345678901234.2948"), new
BigDecimal("23456789012345.4856")}}
+ {new BigDecimal[] {new
BigDecimal("12345678901234.294800000000000"), new
BigDecimal("23456789012345.485600000000000")}}
Review Comment:
These were probably required due to avro schema changes. Not required
anymore. Removed them.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]