zhengruifeng commented on code in PR #50748:
URL: https://github.com/apache/spark/pull/50748#discussion_r2067564698


##########
python/pyspark/sql/tests/connect/test_connect_collection.py:
##########
@@ -16,18 +16,19 @@
 #
 
 import unittest
-from pyspark.testing.connectutils import should_test_connect
-from pyspark.sql.tests.connect.test_connect_basic import 
SparkConnectSQLTestCase
+from pyspark.testing.connectutils import should_test_connect, 
ReusedMixedTestCase
+from pyspark.testing.pandasutils import PandasOnSparkTestUtils
 
 if should_test_connect:
     from pyspark.sql import functions as SF
     from pyspark.sql.connect import functions as CF
 
 
-class SparkConnectCollectionTests(SparkConnectSQLTestCase):
+class SparkConnectCollectionTests(ReusedMixedTestCase, PandasOnSparkTestUtils):
     def test_collect(self):
-        cdf = self.connect.read.table(self.tbl_name)
-        sdf = self.spark.read.table(self.tbl_name)
+        query = "SELECT id, CAST(id AS STRING) AS name FROM RANGE(100)"
+        cdf = self.connect.sql(query)
+        sdf = self.spark.sql(query)

Review Comment:
   yes, we already has dedicated test for read/write: `test_connect_readwriter`
   
   no need to setup tables in such tests



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to