skoppu22 commented on code in PR #109:
URL: 
https://github.com/apache/cassandra-analytics/pull/109#discussion_r2081781154


##########
cassandra-analytics-integration-tests/src/test/java/org/apache/cassandra/analytics/BulkWriteUdtTest.java:
##########
@@ -88,17 +141,298 @@ void testWriteWithNestedUdt()
         assertThat(result.hasNext()).isTrue();
         validateWritesWithDriverResultSet(df.collectAsList(),
                                           
queryAllDataWithDriver(NESTED_TABLE_NAME),
-                                          
BulkWriteUdtTest::defaultRowFormatter);
+                                          BulkWriteUdtTest::udtRowFormatter);
+    }
+
+    @Test
+    void testListOfUdts()
+    {
+        int numRowsInserted = populateListOfUdts();
+
+        // Create a spark frame with the data inserted during the setup
+        Dataset<Row> sourceData = 
bulkReaderDataFrame(LIST_OF_UDT_SOURCE_TABLE).load();
+        assertThat(sourceData.count()).isEqualTo(numRowsInserted);
+
+        // Insert the dataset containing list of UDTs, and UDT itself has 
collections in it
+        bulkWriterDataFrameWriter(sourceData, LIST_OF_UDT_DEST_TABLE).save();
+        validateWritesWithDriverResultSet(sourceData.collectAsList(),
+                queryAllDataWithDriver(LIST_OF_UDT_DEST_TABLE),
+                BulkWriteUdtTest::listOfUdtRowFormatter);
+    }
+
+    private int populateListOfUdts()
+    {
+        // table(id, list<udt(list<>, set<>, map<>)>)
+        // insert list of UDTs, and each UDT has a list, set and map
+        String insertIntoListOfUdts = "INSERT INTO %s (id, udtlist) VALUES 
(%d, [{f1:['value %d'], f2:{%d}, f3:{%d : 'value %d'}}])";
+
+        int i = 0;
+        for (; i < ROW_COUNT; i++)
+        {
+            
cluster.schemaChangeIgnoringStoppedInstances(String.format(insertIntoListOfUdts,
 LIST_OF_UDT_SOURCE_TABLE, i, i, i, i, i));
+        }
+
+        // test null cases
+        cluster.schemaChangeIgnoringStoppedInstances(String.format("insert 
into %s (id) values (%d)",
+                LIST_OF_UDT_SOURCE_TABLE, i++));
+        cluster.schemaChangeIgnoringStoppedInstances(String.format("insert 
into %s (id, udtlist) values (%d, null)",
+                LIST_OF_UDT_SOURCE_TABLE, i++));
+        cluster.schemaChangeIgnoringStoppedInstances(String.format("insert 
into %s (id, udtlist) values (%d, [{f1:null, f2:null, f3:null}])",
+                LIST_OF_UDT_SOURCE_TABLE, i++));
+
+        return i;
+    }
+
+    @Test
+    void testSetOfUdts()
+    {
+        int numRowsInserted = populateSetOfUdts();
+        // Create a spark frame with the data inserted during the setup
+        Dataset<Row> sourceData = 
bulkReaderDataFrame(SET_OF_UDT_SOURCE_TABLE).load();
+        assertThat(sourceData.count()).isEqualTo(numRowsInserted);
+
+        // Insert the dataset containing set of UDTs, and UDT itself has 
collections in it
+        bulkWriterDataFrameWriter(sourceData, SET_OF_UDT_DEST_TABLE).save();
+        validateWritesWithDriverResultSet(sourceData.collectAsList(),
+                queryAllDataWithDriver(SET_OF_UDT_DEST_TABLE),
+                BulkWriteUdtTest::setOfUdtRowFormatter);
+    }
+
+    private int populateSetOfUdts()
+    {
+        // table(id, set<udt(list<>, set<>, map<>)>)
+        // insert set of UDTs, and UDT has a list, set and map inside it
+        String insertIntoSetOfUdts = "INSERT INTO %s (id, udtset) VALUES (%d, 
" +
+                "{{f1:['value %d'], f2:{%d}, f3:{%d : 'value %d'}}})";
+
+        int i = 0;
+        for (; i < ROW_COUNT; i++)
+        {
+            
cluster.schemaChangeIgnoringStoppedInstances(String.format(insertIntoSetOfUdts, 
SET_OF_UDT_SOURCE_TABLE,
+                    i, i, i, i, i));
+        }
+
+        // test null cases
+        cluster.schemaChangeIgnoringStoppedInstances(String.format("insert 
into %s (id) values (%d)",
+                SET_OF_UDT_SOURCE_TABLE, i++));
+        cluster.schemaChangeIgnoringStoppedInstances(String.format("insert 
into %s (id, udtset) values (%d, null)",
+                SET_OF_UDT_SOURCE_TABLE, i++));
+        cluster.schemaChangeIgnoringStoppedInstances(String.format("insert 
into %s (id, udtset) values (%d, " +
+                        "{{f1:null, f2:null, f3:null}})",
+                SET_OF_UDT_SOURCE_TABLE, i++));
+

Review Comment:
   - schemaChangeIgnoringStoppedInstances also uses executeWithResult 
internally, but it uses Monitor additionally. 
   - Changed to use execute for insert and select queries



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to