hvanhovell commented on code in PR #49111:
URL: https://github.com/apache/spark/pull/49111#discussion_r1941539628


##########
connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala:
##########
@@ -471,16 +481,120 @@ private class KeyValueGroupedDatasetImpl[K, V, IK, IV](
     }
   }
 
+  private def aggUntypedWithValueMapFunc(columns: TypedColumn[_, _]*): 
Dataset[_] = {
+    val originalDs = sparkSession.newDataset(ivEncoder, plan)
+
+    // Apply the value transformation, get a DS of two columns "iv" and "v".
+    // If any of "iv" or "v" consists of a single primitive field, wrap it 
with a struct so it
+    // would not be flattened.
+    // Also here we detect if the input "iv" is a single field struct. If yes, 
we rename the field
+    // to "key" to align with Spark behaviour.
+    val (valueTransformedDf, ivFields, vFields) =
+      renameSingleFieldStruct(applyValueMapFunc(originalDs))
+
+    // Rewrite grouping expressions to use "iv" as input.
+    val updatedGroupingExprs = groupingColumns
+      .filterNot(c => KeyValueGroupedDatasetImpl.containsDummyUDF(c.node))
+      .map(c =>
+        ColumnNodeToProtoConverter.toExprWithTransformation(
+          c.node,
+          encoder = None,
+          rewriteInputColumnHook("iv", ivFields)))
+    // Rewrite aggregate columns to use "v" as input.
+    val updatedAggTypedExprs = columns.map { c =>
+      ColumnNodeToProtoConverter.toExprWithTransformation(
+        c.node,
+        encoder = Some(vEncoder), // Pass encoder to convert it to a typed 
column.
+        rewriteInputColumnHook("v", vFields))
+    }
+
+    val rEnc = ProductEncoder.tuple(kEncoder +: columns.map(c => 
agnosticEncoderFor(c.encoder)))
+    sparkSession.newDataset(rEnc) { builder =>
+      builder.getAggregateBuilder
+        .setInput(valueTransformedDf.plan.getRoot)
+        .setGroupType(proto.Aggregate.GroupType.GROUP_TYPE_GROUPBY)
+        .addAllGroupingExpressions(updatedGroupingExprs.asJava)
+        .addAllAggregateExpressions(updatedAggTypedExprs.asJava)
+    }
+  }
+
+  private def applyValueMapFunc(ds: Dataset[IV]): DataFrame = {
+    require(valueMapFunc.isDefined, "valueMapFunc is not defined")
+
+    val ivIsStruct = ivEncoder.isInstanceOf[StructEncoder[_]]
+    val vIsStruct = vEncoder.isInstanceOf[StructEncoder[_]]
+    val transformEncoder = {
+      val wrappedIvEncoder =
+        (if (ivIsStruct) ivEncoder else ProductEncoder.tuple(Seq(ivEncoder)))
+          .asInstanceOf[AgnosticEncoder[Any]]
+      val wrappedVEncoder =
+        (if (vIsStruct) vEncoder else ProductEncoder.tuple(Seq(vEncoder)))
+          .asInstanceOf[AgnosticEncoder[Any]]
+      ProductEncoder
+        .tuple(Seq(wrappedIvEncoder, wrappedVEncoder))
+        .asInstanceOf[AgnosticEncoder[(Any, Any)]]
+    }
+    val transformFunc = UDFAdaptors.mapValues(valueMapFunc.get, ivIsStruct, 
vIsStruct)
+    ds.mapPartitions(transformFunc)(transformEncoder).toDF("iv", "v")
+  }
+
+  /**
+   * Given a DF of two Struct columns "iv" and "v", rename the fields of "iv" 
if it consists of a
+   * single field. Also return the column names of "iv" and "v" to avoid 
recomputing them later.
+   * @return
+   *   (new dataframe, column names in IV, column names in V)
+   */
+  private def renameSingleFieldStruct(df: DataFrame): (DataFrame, Seq[String], 
Seq[String]) = {
+    val ivSchema = df.schema(0).dataType.asInstanceOf[StructType]

Review Comment:
   This is a bit of a no-no from the connect POV. We should not be getting the 
schema, because it can change before we submit the dataset for execution.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to