parthchandra commented on code in PR #3722:
URL: https://github.com/apache/datafusion-comet/pull/3722#discussion_r2966823685
##########
dev/diffs/3.4.3.diff:
##########
@@ -524,31 +534,41 @@ index 2796b1cf154..52438178a0e 100644
import org.apache.spark.sql.execution.{FileSourceScanLike, SimpleMode}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.datasources.FilePartition
-@@ -815,6 +816,7 @@ class FileBasedDataSourceSuite extends QueryTest
+@@ -499,7 +500,8 @@ class FileBasedDataSourceSuite extends QueryTest
+ }
+
+ Seq("parquet", "orc").foreach { format =>
+- test(s"Spark native readers should respect spark.sql.caseSensitive -
${format}") {
++ test(s"Spark native readers should respect spark.sql.caseSensitive -
${format}",
++
IgnoreCometNativeDataFusion("https://github.com/apache/datafusion-comet/issues/3311"))
{
+ withTempDir { dir =>
+ val tableName = s"spark_25132_${format}_native"
+ val tableDir = dir.getCanonicalPath + s"/$tableName"
+@@ -815,6 +817,7 @@ class FileBasedDataSourceSuite extends QueryTest
assert(bJoinExec.isEmpty)
val smJoinExec = collect(joinedDF.queryExecution.executedPlan) {
case smJoin: SortMergeJoinExec => smJoin
+ case smJoin: CometSortMergeJoinExec => smJoin
Review Comment:
what's the change here (and in the two changed lines below)?
##########
dev/diffs/3.4.3.diff:
##########
@@ -2314,26 +2447,37 @@ index 26e61c6b58d..cb09d7e116a 100644
spark.range(10).selectExpr("id", "id % 3 as p")
.write.partitionBy("p").saveAsTable("testDataForScan")
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFsSuite.scala
-index 0ab8691801d..d9125f658ad 100644
+index 0ab8691801d..f1c4b3d92b1 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFsSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFsSuite.scala
-@@ -18,6 +18,7 @@
+@@ -18,6 +18,8 @@
package org.apache.spark.sql.execution.python
import org.apache.spark.sql.catalyst.plans.logical.{ArrowEvalPython,
BatchEvalPython, Limit, LocalLimit}
++import org.apache.spark.sql.IgnoreCometNativeDataFusion
+import org.apache.spark.sql.comet._
import org.apache.spark.sql.execution.{FileSourceScanExec, SparkPlan,
SparkPlanTest}
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetScan
-@@ -108,6 +109,7 @@ class ExtractPythonUDFsSuite extends SparkPlanTest with
SharedSparkSession {
+@@ -93,7 +95,8 @@ class ExtractPythonUDFsSuite extends SparkPlanTest with
SharedSparkSession {
+ assert(arrowEvalNodes.size == 2)
+ }
+
+- test("Python UDF should not break column pruning/filter pushdown -- Parquet
V1") {
++ test("Python UDF should not break column pruning/filter pushdown -- Parquet
V1",
++
IgnoreCometNativeDataFusion("https://github.com/apache/datafusion-comet/issues/3311"))
{
+ withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "parquet") {
+ withTempPath { f =>
+ spark.range(10).select($"id".as("a"), $"id".as("b"))
+@@ -108,6 +111,7 @@ class ExtractPythonUDFsSuite extends SparkPlanTest with
SharedSparkSession {
val scanNodes = query.queryExecution.executedPlan.collect {
case scan: FileSourceScanExec => scan
+ case scan: CometScanExec => scan
Review Comment:
Is this not sufficient to enable the test?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]