HeartSaVioR commented on code in PR #49277:
URL: https://github.com/apache/spark/pull/49277#discussion_r1917940301


##########
python/pyspark/sql/tests/pandas/test_pandas_transform_with_state.py:
##########
@@ -1294,6 +1307,208 @@ def 
test_transform_with_state_with_timers_single_partition(self):
             self.test_transform_with_state_in_pandas_proc_timer()
             
self.test_transform_with_state_restart_with_multiple_rows_init_state()
 
+    def _run_evolution_test(
+        self, processor, checkpoint_dir, check_results, df, 
check_exception=None
+    ):
+        # Schema definitions
+        basic_state_schema = StructType(
+            [StructField("id", IntegerType(), True), StructField("name", 
StringType(), True)]
+        )
+
+        evolved_state_schema = StructType(
+            [
+                StructField("id", IntegerType(), True),
+                StructField("name", StringType(), True),
+                StructField("count", IntegerType(), True),
+                StructField("active", BooleanType(), True),
+                StructField("score", FloatType(), True),
+            ]
+        )
+
+        reordered_state_schema = StructType(
+            [
+                StructField("name", StringType(), True),
+                StructField("id", IntegerType(), True),
+                StructField("score", FloatType(), True),
+                StructField("count", IntegerType(), True),
+                StructField("active", BooleanType(), True),
+            ]
+        )
+
+        upcast_state_schema = StructType(
+            [
+                StructField("id", LongType(), True),  # Upcast from Int to Long
+                StructField("name", StringType(), True),
+            ]
+        )
+        schema_map = {
+            BasicProcessor: basic_state_schema,
+            AddFieldsProcessor: evolved_state_schema,
+            RemoveFieldsProcessor: basic_state_schema,
+            ReorderedFieldsProcessor: reordered_state_schema,
+            UpcastProcessor: upcast_state_schema,
+        }
+
+        output_schema = StructType(
+            [
+                StructField("id", StringType(), True),
+                StructField("value", schema_map[processor.__class__], True),
+            ]
+        )
+
+        # Stop any active streams first
+        for q in self.spark.streams.active:
+            q.stop()
+
+        try:
+            q = (
+                df.groupBy("id")
+                .transformWithStateInPandas(
+                    statefulProcessor=processor,
+                    outputStructType=output_schema,
+                    outputMode="Update",
+                    timeMode="None",
+                )
+                .writeStream.queryName("evolution_test")
+                .option("checkpointLocation", checkpoint_dir)
+                .foreachBatch(check_results)
+                .outputMode("update")
+                .start()
+            )
+
+            self.assertEqual(q.name, "evolution_test")
+            self.assertTrue(q.isActive)
+            q.processAllAvailable()
+            q.awaitTermination(10)
+
+            if q.exception() is None:
+                assert check_exception is None
+
+        except Exception as e:
+            # If we are expecting an exception, verify it's the right one
+            if check_exception is None:
+                raise  # Re-raise if we weren't expecting an exception
+            self.assertTrue(check_exception(e))
+
+    def test_schema_evolution_scenarios(self):
+        """Test various schema evolution scenarios"""
+        with self.sql_conf({"spark.sql.streaming.stateStore.encodingFormat": 
"avro"}):
+            with tempfile.TemporaryDirectory() as checkpoint_dir:
+                # Test 1: Basic state
+
+                input_path = tempfile.mkdtemp()
+                self._prepare_test_resource1(input_path)
+
+                df = self._build_test_df(input_path)
+
+                def check_basic_state(batch_df, batch_id):
+                    result = batch_df.collect()[0]
+                    assert result.value["id"] == 0  # First ID from test data
+                    assert result.value["name"] == "name-0"
+
+                self._run_evolution_test(BasicProcessor(), checkpoint_dir, 
check_basic_state, df)
+
+                self._prepare_test_resource2(input_path)
+
+                # Test 2: Add fields
+                def check_add_fields(batch_df, batch_id):
+                    results = batch_df.collect()
+                    # Check default values for existing key
+                    assert results[0].value["count"] is None
+                    assert results[0].value["active"] is None
+                    assert results[0].value["score"] is None
+
+                self._run_evolution_test(AddFieldsProcessor(), checkpoint_dir, 
check_add_fields, df)
+                self._prepare_test_resource3(input_path)
+
+                # Test 3: Remove fields
+                def check_remove_fields(batch_df, batch_id):
+                    result = batch_df.collect()[0]
+                    assert result.value["id"] == 0  # First ID from test data
+                    assert result.value["name"] == "name-0"
+                    assert len(result.value) == 2
+
+                self._run_evolution_test(
+                    RemoveFieldsProcessor(), checkpoint_dir, 
check_remove_fields, df
+                )
+                self._prepare_test_resource4(input_path)
+
+                # Test 4: Reorder fields

Review Comment:
   I prefer to have concise test verifying exact the thing, but it's 
individual's preference and this seems to be OK.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to