hsiang-c commented on code in PR #1715: URL: https://github.com/apache/datafusion-comet/pull/1715#discussion_r2078667555
########## dev/diffs/iceberg/1.8.1.diff: ########## @@ -0,0 +1,266 @@ +diff --git a/spark/v3.4/build.gradle b/spark/v3.4/build.gradle +index 6eb26e8..c288e72 100644 +--- a/spark/v3.4/build.gradle ++++ b/spark/v3.4/build.gradle +@@ -75,7 +75,7 @@ project(":iceberg-spark:iceberg-spark-${sparkMajorVersion}_${scalaVersion}") { + exclude group: 'org.roaringbitmap' + } + +- compileOnly "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:0.5.0" ++ compileOnly "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:0.9.0-SNAPSHOT" + + implementation libs.parquet.column + implementation libs.parquet.hadoop +@@ -185,7 +185,7 @@ project(":iceberg-spark:iceberg-spark-extensions-${sparkMajorVersion}_${scalaVer + testImplementation libs.avro.avro + testImplementation libs.parquet.hadoop + testImplementation libs.junit.vintage.engine +- testImplementation "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:0.5.0" ++ testImplementation "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:0.9.0-SNAPSHOT" + + // Required because we remove antlr plugin dependencies from the compile configuration, see note above + runtimeOnly libs.antlr.runtime +@@ -260,6 +260,8 @@ project(":iceberg-spark:iceberg-spark-runtime-${sparkMajorVersion}_${scalaVersio + integrationImplementation project(path: ':iceberg-hive-metastore', configuration: 'testArtifacts') + integrationImplementation project(path: ":iceberg-spark:iceberg-spark-${sparkMajorVersion}_${scalaVersion}", configuration: 'testArtifacts') + integrationImplementation project(path: ":iceberg-spark:iceberg-spark-extensions-${sparkMajorVersion}_${scalaVersion}", configuration: 'testArtifacts') ++ integrationImplementation project(path: ':iceberg-parquet') ++ integrationImplementation "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:0.9.0-SNAPSHOT" + + // runtime dependencies for running Hive Catalog based integration test + integrationRuntimeOnly project(':iceberg-hive-metastore') +@@ -297,8 +299,8 @@ project(":iceberg-spark:iceberg-spark-runtime-${sparkMajorVersion}_${scalaVersio + relocate 'org.apache.avro', 'org.apache.iceberg.shaded.org.apache.avro' + relocate 'avro.shaded', 'org.apache.iceberg.shaded.org.apache.avro.shaded' + relocate 'com.thoughtworks.paranamer', 'org.apache.iceberg.shaded.com.thoughtworks.paranamer' +- relocate 'org.apache.parquet', 'org.apache.iceberg.shaded.org.apache.parquet' +- relocate 'shaded.parquet', 'org.apache.iceberg.shaded.org.apache.parquet.shaded' ++// relocate 'org.apache.parquet', 'org.apache.iceberg.shaded.org.apache.parquet' ++// relocate 'shaded.parquet', 'org.apache.iceberg.shaded.org.apache.parquet.shaded' + relocate 'org.apache.orc', 'org.apache.iceberg.shaded.org.apache.orc' + relocate 'io.airlift', 'org.apache.iceberg.shaded.io.airlift' + relocate 'org.apache.hc.client5', 'org.apache.iceberg.shaded.org.apache.hc.client5' +diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkSQLProperties.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkSQLProperties.java +index 0ca1236..87daef4 100644 +--- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkSQLProperties.java ++++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkSQLProperties.java +@@ -29,7 +29,7 @@ public class SparkSQLProperties { + + // Controls which Parquet reader implementation to use + public static final String PARQUET_READER_TYPE = "spark.sql.iceberg.parquet.reader-type"; +- public static final ParquetReaderType PARQUET_READER_TYPE_DEFAULT = ParquetReaderType.ICEBERG; ++ public static final ParquetReaderType PARQUET_READER_TYPE_DEFAULT = ParquetReaderType.COMET; + + // Controls whether reading/writing timestamps without timezones is allowed + @Deprecated +diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/vectorized/CometColumnReader.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/vectorized/CometColumnReader.java +index 4794863..8d02f02 100644 +--- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/vectorized/CometColumnReader.java ++++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/vectorized/CometColumnReader.java +@@ -20,11 +20,11 @@ package org.apache.iceberg.spark.data.vectorized; + + import java.io.IOException; + import java.util.Map; ++import org.apache.comet.CometSchemaImporter; + import org.apache.comet.parquet.AbstractColumnReader; + import org.apache.comet.parquet.ColumnReader; + import org.apache.comet.parquet.TypeUtil; + import org.apache.comet.parquet.Utils; +-import org.apache.comet.shaded.arrow.c.CometSchemaImporter; + import org.apache.comet.shaded.arrow.memory.RootAllocator; + import org.apache.iceberg.parquet.VectorizedReader; + import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatchQueryScan.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatchQueryScan.java +index a361a7f..9021cd5 100644 +--- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatchQueryScan.java ++++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkBatchQueryScan.java +@@ -24,6 +24,7 @@ import java.util.Objects; + import java.util.Set; + import java.util.function.Supplier; + import java.util.stream.Collectors; ++import org.apache.comet.parquet.SupportsComet; + import org.apache.iceberg.DeleteFile; + import org.apache.iceberg.FileContent; + import org.apache.iceberg.FileScanTask; +@@ -63,7 +64,7 @@ import org.slf4j.Logger; + import org.slf4j.LoggerFactory; + + class SparkBatchQueryScan extends SparkPartitioningAwareScan<PartitionScanTask> +- implements SupportsRuntimeV2Filtering { ++ implements SupportsRuntimeV2Filtering, SupportsComet { + + private static final Logger LOG = LoggerFactory.getLogger(SparkBatchQueryScan.class); + +@@ -290,4 +291,9 @@ class SparkBatchQueryScan extends SparkPartitioningAwareScan<PartitionScanTask> + runtimeFilterExpressions, + caseSensitive()); + } ++ ++ @Override ++ public boolean isCometEnabled() { ++ return true; ++ } + } +diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWriterV2.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWriterV2.java +index 47a0e87..531b7ce 100644 +--- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWriterV2.java ++++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWriterV2.java +@@ -41,6 +41,7 @@ import org.apache.spark.sql.internal.SQLConf; + import org.junit.After; + import org.junit.Assert; + import org.junit.Before; ++import org.junit.Ignore; + import org.junit.Test; + + public class TestDataFrameWriterV2 extends SparkTestBaseWithCatalog { +@@ -214,7 +215,7 @@ public class TestDataFrameWriterV2 extends SparkTestBaseWithCatalog { + Assert.assertEquals(4, fields.size()); + } + +- @Test ++ @Ignore + public void testMergeSchemaIgnoreCastingLongToInt() throws Exception { + sql( + "ALTER TABLE %s SET TBLPROPERTIES ('%s'='true')", +@@ -254,7 +255,7 @@ public class TestDataFrameWriterV2 extends SparkTestBaseWithCatalog { + assertThat(idField.type().typeId()).isEqualTo(Type.TypeID.LONG); + } + +- @Test ++ @Ignore + public void testMergeSchemaIgnoreCastingDoubleToFloat() throws Exception { + removeTables(); + sql("CREATE TABLE %s (id double, data string) USING iceberg", tableName); +diff --git a/spark/v3.5/build.gradle b/spark/v3.5/build.gradle +index e2d2c7a..8b5bff8 100644 +--- a/spark/v3.5/build.gradle ++++ b/spark/v3.5/build.gradle +@@ -75,7 +75,7 @@ project(":iceberg-spark:iceberg-spark-${sparkMajorVersion}_${scalaVersion}") { + exclude group: 'org.roaringbitmap' + } + +- compileOnly "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:0.5.0" ++ compileOnly "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:0.9.0-SNAPSHOT" + + implementation libs.parquet.column + implementation libs.parquet.hadoop +@@ -182,8 +182,8 @@ project(":iceberg-spark:iceberg-spark-extensions-${sparkMajorVersion}_${scalaVer + + testImplementation libs.avro.avro + testImplementation libs.parquet.hadoop ++ testImplementation "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:0.9.0-SNAPSHOT" + testImplementation libs.awaitility +- testImplementation "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:0.5.0" + + // Required because we remove antlr plugin dependencies from the compile configuration, see note above + runtimeOnly libs.antlr.runtime +@@ -263,6 +263,7 @@ project(":iceberg-spark:iceberg-spark-runtime-${sparkMajorVersion}_${scalaVersio + integrationImplementation project(path: ':iceberg-hive-metastore', configuration: 'testArtifacts') + integrationImplementation project(path: ":iceberg-spark:iceberg-spark-${sparkMajorVersion}_${scalaVersion}", configuration: 'testArtifacts') + integrationImplementation project(path: ":iceberg-spark:iceberg-spark-extensions-${sparkMajorVersion}_${scalaVersion}", configuration: 'testArtifacts') ++ integrationImplementation "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:0.9.0-SNAPSHOT" + + // runtime dependencies for running Hive Catalog based integration test + integrationRuntimeOnly project(':iceberg-hive-metastore') +@@ -300,8 +301,8 @@ project(":iceberg-spark:iceberg-spark-runtime-${sparkMajorVersion}_${scalaVersio + relocate 'org.apache.avro', 'org.apache.iceberg.shaded.org.apache.avro' + relocate 'avro.shaded', 'org.apache.iceberg.shaded.org.apache.avro.shaded' + relocate 'com.thoughtworks.paranamer', 'org.apache.iceberg.shaded.com.thoughtworks.paranamer' +- relocate 'org.apache.parquet', 'org.apache.iceberg.shaded.org.apache.parquet' +- relocate 'shaded.parquet', 'org.apache.iceberg.shaded.org.apache.parquet.shaded' ++// relocate 'org.apache.parquet', 'org.apache.iceberg.shaded.org.apache.parquet' ++// relocate 'shaded.parquet', 'org.apache.iceberg.shaded.org.apache.parquet.shaded' + relocate 'org.apache.orc', 'org.apache.iceberg.shaded.org.apache.orc' + relocate 'io.airlift', 'org.apache.iceberg.shaded.io.airlift' + relocate 'org.apache.hc.client5', 'org.apache.iceberg.shaded.org.apache.hc.client5' +diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/SparkSQLProperties.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/SparkSQLProperties.java +index d6c16bb..123a300 100644 +--- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/SparkSQLProperties.java ++++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/SparkSQLProperties.java +@@ -29,7 +29,7 @@ public class SparkSQLProperties { + + // Controls which Parquet reader implementation to use + public static final String PARQUET_READER_TYPE = "spark.sql.iceberg.parquet.reader-type"; +- public static final ParquetReaderType PARQUET_READER_TYPE_DEFAULT = ParquetReaderType.ICEBERG; ++ public static final ParquetReaderType PARQUET_READER_TYPE_DEFAULT = ParquetReaderType.COMET; Review Comment: @huaxingao I changed the default to `COMET`. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org For additional commands, e-mail: github-h...@datafusion.apache.org