alexeykudinkin commented on a change in pull request #4888:
URL: https://github.com/apache/hudi/pull/4888#discussion_r832627036



##########
File path: 
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieFileScanRDD.scala
##########
@@ -20,64 +20,15 @@ package org.apache.hudi
 
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.execution.QueryExecutionException
-import org.apache.spark.sql.execution.datasources.{FilePartition, 
PartitionedFile, SchemaColumnConvertNotSupportedException}
-import org.apache.spark.{Partition, TaskContext}
+import org.apache.spark.sql.execution.datasources.{FilePartition, FileScanRDD, 
PartitionedFile}
 
 case class HoodieBaseFileSplit(filePartition: FilePartition) extends 
HoodieFileSplit
 
-/**
- * TODO eval if we actually need it
- */
 class HoodieFileScanRDD(@transient private val sparkSession: SparkSession,
                         readFunction: PartitionedFile => Iterator[InternalRow],
                         @transient fileSplits: Seq[HoodieBaseFileSplit])
-  extends HoodieUnsafeRDD(sparkSession.sparkContext) {
-
-  override def compute(split: Partition, context: TaskContext): 
Iterator[InternalRow] = {

Review comment:
       Pretty much




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to