lokeshj1703 commented on code in PR #13229:
URL: https://github.com/apache/hudi/pull/13229#discussion_r2085322027


##########
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieClientOnCopyOnWriteStorage.java:
##########
@@ -686,15 +687,26 @@ public void testDeletesForInsertsInSameBatch() throws 
Exception {
   }
 
   private Pair<JavaRDD<WriteStatus>, List<HoodieRecord>> 
insertBatchRecords(SparkRDDWriteClient client, String commitTime,
-                                                                         
Integer recordNum, int expectStatusSize, int numSlices,
-                                                                         
Function3<JavaRDD<WriteStatus>, SparkRDDWriteClient, JavaRDD<HoodieRecord>, 
String> writeFn) throws IOException {
+                                                                            
Integer recordNum, int expectStatusSize, int numSlices,
+                                                                            
Function3<JavaRDD<WriteStatus>, SparkRDDWriteClient, JavaRDD<HoodieRecord>, 
String> writeFn) throws IOException {
+    return insertBatchRecords(client, commitTime, recordNum, expectStatusSize, 
numSlices, writeFn, false);
+  }
+
+  private Pair<JavaRDD<WriteStatus>, List<HoodieRecord>> 
insertBatchRecords(SparkRDDWriteClient client, String commitTime,
+                                                                            
Integer recordNum, int expectStatusSize, int numSlices,
+                                                                            
Function3<JavaRDD<WriteStatus>, SparkRDDWriteClient, JavaRDD<HoodieRecord>, 
String> writeFn,
+                                                                            
boolean skipCommit) throws IOException {
     client.startCommitWithTime(commitTime);
     List<HoodieRecord> inserts = dataGen.generateInserts(commitTime, 
recordNum);
     JavaRDD<HoodieRecord> insertRecordsRDD = jsc.parallelize(inserts, 
numSlices);
     JavaRDD<WriteStatus> statuses = writeFn.apply(client, insertRecordsRDD, 
commitTime);
-    assertNoWriteErrors(statuses.collect());

Review Comment:
   Addressed



##########
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/TestClientRollback.java:
##########
@@ -110,8 +110,8 @@ public void testSavepointAndRollback(Boolean 
testFailedRestore, Boolean failedRe
       List<HoodieRecord> records = dataGen.generateInserts(newCommitTime, 200);
       JavaRDD<HoodieRecord> writeRecords = jsc.parallelize(records, 1);
 
-      List<WriteStatus> statuses = client.upsert(writeRecords, 
newCommitTime).collect();
-      assertNoWriteErrors(statuses);

Review Comment:
   Addressed



##########
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/TestHoodieReadClient.java:
##########
@@ -116,9 +117,8 @@ private void testReadFilterExist(HoodieWriteConfig config,
 
       JavaRDD<HoodieRecord> smallRecordsRDD = 
jsc.parallelize(records.subList(0, 75), PARALLELISM);
       // We create three base file, each having one record. (3 different 
partitions)
-      List<WriteStatus> statuses = writeFn.apply(writeClient, smallRecordsRDD, 
newCommitTime).collect();
-      // Verify there are no errors
-      assertNoWriteErrors(statuses);

Review Comment:
   Addressed



##########
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/TestSavepoint.java:
##########
@@ -86,15 +89,17 @@ public void testSavepoint(boolean enableMetadataTable,
       client.startCommitWithTime(commitTime1);
       List<HoodieRecord> records1 = dataGen.generateInserts(commitTime1, 200);
       JavaRDD<HoodieRecord> writeRecords1 = jsc.parallelize(records1, 1);
-      List<WriteStatus> statuses1 = client.upsert(writeRecords1, 
commitTime1).collect();
-      assertNoWriteErrors(statuses1);

Review Comment:
   Addressed



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to