dajac commented on a change in pull request #9526:
URL: https://github.com/apache/kafka/pull/9526#discussion_r518187681



##########
File path: 
clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java
##########
@@ -346,6 +346,10 @@ public ProduceResponse getErrorResponse(int 
throttleTimeMs, Throwable e) {
         return partitionSizes.keySet();
     }
 
+    public Map<TopicPartition, Integer> partitionSizes() {
+        return partitionSizes;
+    }

Review comment:
       Is this one still used? It seems that it is not the case.

##########
File path: 
generator/src/main/java/org/apache/kafka/message/JsonConverterGenerator.java
##########
@@ -227,9 +227,13 @@ private void generateVariableLengthTargetFromJson(Target 
target, Versions curVer
             headerGenerator.addImport(MessageGenerator.MESSAGE_UTIL_CLASS);
             headerGenerator.addImport(MessageGenerator.BYTE_BUFFER_CLASS);
             headerGenerator.addImport(MessageGenerator.MEMORY_RECORDS_CLASS);
+            buffer.printf("if (_verbose) {%n");
+            buffer.incrementIndent();

Review comment:
       I think that we can safely assume that when a request/response is 
serialized with `verbose` equals to `false`, we are not going to deserialize 
it. Therefore, I suggest to drop the handling of `verbose` on the read path.

##########
File path: core/src/main/scala/kafka/tools/TestRaftRequestHandler.scala
##########
@@ -39,7 +40,7 @@ class TestRaftRequestHandler(
 
   override def handle(request: RequestChannel.Request): Unit = {
     try {
-      trace(s"Handling request:${request.requestDesc(true)} from connection 
${request.context.connectionId};" +
+      trace(s"Handling 
request:${RequestConvertToJson.requestDesc(request.header, 
request.loggableRequest, true)} from connection 
${request.context.connectionId};" +

Review comment:
       `verbose` should be `false` here (if we keep it).

##########
File path: 
generator/src/main/java/org/apache/kafka/message/JsonConverterGenerator.java
##########
@@ -261,7 +265,7 @@ private void generateWrite(String className,
                                StructSpec struct,
                                Versions parentVersions) {
         headerGenerator.addImport(MessageGenerator.JSON_NODE_CLASS);
-        buffer.printf("public static JsonNode write(%s _object, short 
_version) {%n",
+        buffer.printf("public static JsonNode write(%s _object, short 
_version, boolean _verbose) {%n",

Review comment:
       Instead of changing the usage of this method everywhere in the code 
base, how about generating an overloaded method which call this one with 
`verbose=true`? I only expect this one to be used by the request logger at the 
moment so it is also more convenient.

##########
File path: core/src/main/scala/kafka/network/RequestConvertToJson.scala
##########
@@ -0,0 +1,360 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.util
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.node.{ArrayNode, BinaryNode, DoubleNode, 
IntNode, JsonNodeFactory, LongNode, NullNode, ObjectNode, ShortNode, TextNode}
+import kafka.network.RequestChannel.{Response, Session}
+import org.apache.kafka.common.message._
+import org.apache.kafka.common.network.ClientInformation
+import org.apache.kafka.common.protocol.Errors
+import org.apache.kafka.common.record.RecordBatch
+import org.apache.kafka.common.requests._
+import org.apache.kafka.common.utils.CollectionUtils
+
+import scala.jdk.CollectionConverters._
+
+object RequestConvertToJson {
+  def request(request: AbstractRequest, verbose: Boolean): JsonNode = {
+    request match {
+      case req: AddOffsetsToTxnRequest => 
AddOffsetsToTxnRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: AddPartitionsToTxnRequest => 
AddPartitionsToTxnRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: AlterClientQuotasRequest => 
AlterClientQuotasRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: AlterConfigsRequest => 
AlterConfigsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: AlterIsrRequest => 
AlterIsrRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: AlterPartitionReassignmentsRequest => 
AlterPartitionReassignmentsRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: AlterReplicaLogDirsRequest => 
AlterReplicaLogDirsRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case res: AlterUserScramCredentialsRequest => 
AlterUserScramCredentialsRequestDataJsonConverter.write(res.data, 
request.version, verbose)
+      case req: ApiVersionsRequest => 
ApiVersionsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: BeginQuorumEpochRequest => 
BeginQuorumEpochRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: ControlledShutdownRequest => 
ControlledShutdownRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: CreateAclsRequest => 
CreateAclsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: CreateDelegationTokenRequest => 
CreateDelegationTokenRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: CreatePartitionsRequest => 
CreatePartitionsRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: CreateTopicsRequest => 
CreateTopicsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DeleteAclsRequest => 
DeleteAclsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DeleteGroupsRequest => 
DeleteGroupsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DeleteRecordsRequest => 
DeleteRecordsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DeleteTopicsRequest => 
DeleteTopicsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DescribeAclsRequest => 
DescribeAclsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DescribeClientQuotasRequest => 
DescribeClientQuotasRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: DescribeConfigsRequest => 
DescribeConfigsRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: DescribeDelegationTokenRequest => 
DescribeDelegationTokenRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: DescribeGroupsRequest => 
DescribeGroupsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DescribeLogDirsRequest => 
DescribeLogDirsRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: DescribeQuorumRequest => 
DescribeQuorumRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case res: DescribeUserScramCredentialsRequest => 
DescribeUserScramCredentialsRequestDataJsonConverter.write(res.data, 
request.version, verbose)
+      case req: ElectLeadersRequest => 
ElectLeadersRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: EndTxnRequest => 
EndTxnRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: EndQuorumEpochRequest => 
EndQuorumEpochRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: ExpireDelegationTokenRequest => 
ExpireDelegationTokenRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: FetchRequest => FetchRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: FindCoordinatorRequest => 
FindCoordinatorRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: HeartbeatRequest => 
HeartbeatRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: IncrementalAlterConfigsRequest => 
IncrementalAlterConfigsRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: InitProducerIdRequest => 
InitProducerIdRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: JoinGroupRequest => 
JoinGroupRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: LeaderAndIsrRequest => 
LeaderAndIsrRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: LeaveGroupRequest => 
LeaveGroupRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: ListGroupsRequest => 
ListGroupsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: ListOffsetRequest => 
ListOffsetRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: ListPartitionReassignmentsRequest => 
ListPartitionReassignmentsRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: MetadataRequest => 
MetadataRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: OffsetCommitRequest => 
OffsetCommitRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: OffsetDeleteRequest => 
OffsetDeleteRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: OffsetFetchRequest => 
OffsetFetchRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: OffsetsForLeaderEpochRequest => 
offsetsForLeaderEpochRequestNode(req, request.version, verbose)
+      case req: ProduceRequest => produceRequestNode(req, request.version, 
verbose)
+      case req: RenewDelegationTokenRequest => 
RenewDelegationTokenRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: SaslAuthenticateRequest => 
SaslAuthenticateRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: SaslHandshakeRequest => 
SaslHandshakeRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: StopReplicaRequest => 
StopReplicaRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: SyncGroupRequest => 
SyncGroupRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: TxnOffsetCommitRequest => 
TxnOffsetCommitRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: UpdateFeaturesRequest => 
UpdateFeaturesRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: UpdateMetadataRequest => 
UpdateMetadataRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: VoteRequest => VoteRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: WriteTxnMarkersRequest => 
WriteTxnMarkersRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case _ => throw new IllegalStateException(s"ApiKey ${request.api} is not 
currently handled in `request`, the " +
+        "code should be updated to do so.");
+    }
+  }
+
+  def response(response: AbstractResponse, version: Short, verbose: Boolean): 
JsonNode = {
+    response match {
+      case res: AddOffsetsToTxnResponse => 
AddOffsetsToTxnResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: AddPartitionsToTxnResponse => 
AddPartitionsToTxnResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: AlterClientQuotasResponse => 
AlterClientQuotasResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: AlterConfigsResponse => 
AlterConfigsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: AlterIsrResponse => 
AlterIsrResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: AlterPartitionReassignmentsResponse => 
AlterPartitionReassignmentsResponseDataJsonConverter.write(res.data, version, 
verbose)
+      case res: AlterReplicaLogDirsResponse => 
AlterReplicaLogDirsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: AlterUserScramCredentialsResponse => 
AlterUserScramCredentialsResponseDataJsonConverter.write(res.data, version, 
verbose)
+      case res: ApiVersionsResponse => 
ApiVersionsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: BeginQuorumEpochResponse => 
BeginQuorumEpochResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: ControlledShutdownResponse => 
ControlledShutdownResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: CreateAclsResponse => 
CreateAclsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: CreateDelegationTokenResponse => 
CreateDelegationTokenResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: CreatePartitionsResponse => 
CreatePartitionsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: CreateTopicsResponse => 
CreateTopicsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DeleteAclsResponse => 
DeleteAclsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DeleteGroupsResponse => 
DeleteGroupsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DeleteRecordsResponse => 
DeleteRecordsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DeleteTopicsResponse => 
DeleteTopicsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DescribeAclsResponse => 
DescribeAclsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DescribeClientQuotasResponse => 
DescribeClientQuotasResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DescribeConfigsResponse => 
DescribeConfigsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DescribeDelegationTokenResponse => 
DescribeDelegationTokenResponseDataJsonConverter.write(res.data, version, 
verbose)
+      case res: DescribeGroupsResponse => 
DescribeGroupsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DescribeLogDirsResponse => 
DescribeLogDirsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DescribeQuorumResponse => 
DescribeQuorumResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: DescribeUserScramCredentialsResponse => 
DescribeUserScramCredentialsResponseDataJsonConverter.write(res.data, version, 
verbose)
+      case res: ElectLeadersResponse => 
ElectLeadersResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: EndTxnResponse => 
EndTxnResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: EndQuorumEpochResponse => 
EndQuorumEpochResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: ExpireDelegationTokenResponse => 
ExpireDelegationTokenResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: FetchResponse[_] => 
FetchResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: FindCoordinatorResponse => 
FindCoordinatorResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: HeartbeatResponse => 
HeartbeatResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: IncrementalAlterConfigsResponse => 
IncrementalAlterConfigsResponseDataJsonConverter.write(res.data, version, 
verbose)
+      case res: InitProducerIdResponse => 
InitProducerIdResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: JoinGroupResponse => 
JoinGroupResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: LeaderAndIsrResponse => 
LeaderAndIsrResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: LeaveGroupResponse => 
LeaveGroupResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: ListGroupsResponse => 
ListGroupsResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: ListOffsetResponse => 
ListOffsetResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: ListPartitionReassignmentsResponse => 
ListPartitionReassignmentsResponseDataJsonConverter.write(res.data, version, 
verbose)
+      case res: MetadataResponse => 
MetadataResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: OffsetCommitResponse => 
OffsetCommitResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: OffsetDeleteResponse => 
OffsetDeleteResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: OffsetFetchResponse => 
OffsetFetchResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: OffsetsForLeaderEpochResponse => 
offsetsForLeaderEpochResponseNode(res, version, verbose)
+      case res: ProduceResponse => produceResponseNode(res, version, verbose)
+      case res: RenewDelegationTokenResponse => 
RenewDelegationTokenResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: SaslAuthenticateResponse => 
SaslAuthenticateResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: SaslHandshakeResponse => 
SaslHandshakeResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: StopReplicaResponse => 
StopReplicaResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: SyncGroupResponse => 
SyncGroupResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: TxnOffsetCommitResponse => 
TxnOffsetCommitResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: UpdateFeaturesResponse => 
UpdateFeaturesResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: UpdateMetadataResponse => 
UpdateMetadataResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: WriteTxnMarkersResponse => 
WriteTxnMarkersResponseDataJsonConverter.write(res.data, version, verbose)
+      case res: VoteResponse => VoteResponseDataJsonConverter.write(res.data, 
version, verbose)
+      case _ => throw new IllegalStateException(s"ApiKey $response is not 
currently handled in `response`, the " +
+        "code should be updated to do so.");
+    }
+  }
+
+  def requestHeaderNode(header: RequestHeader, verbose: Boolean): JsonNode = {
+    val node = RequestHeaderDataJsonConverter.write(header.data(), 
header.headerVersion(), verbose).asInstanceOf[ObjectNode]
+    node.set("requestApiKeyName", new TextNode(header.apiKey.toString))
+    node
+  }
+
+  def clientInfoNode(clientInfo: ClientInformation): JsonNode = {
+    val node = new ObjectNode(JsonNodeFactory.instance)
+    node.set("softwareName", new TextNode(clientInfo.softwareName()))
+    node.set("softwareVersion", new TextNode(clientInfo.softwareVersion()))
+    node
+  }
+
+  def requestDescMetrics(header: RequestHeader, res: Response, req: 
AbstractRequest,

Review comment:
       Could we add a unit test for this method?

##########
File path: 
generator/src/main/java/org/apache/kafka/message/JsonConverterGenerator.java
##########
@@ -380,8 +384,18 @@ private void generateVariableLengthTargetToJson(Target 
target, Versions versions
                         target.sourceVariable(), target.sourceVariable())));
             }
         } else if (target.field().type().isRecords()) {
+            headerGenerator.addImport(MessageGenerator.INT_NODE_CLASS);
             headerGenerator.addImport(MessageGenerator.BINARY_NODE_CLASS);
+            buffer.printf("if (_verbose) {%n");

Review comment:
       I wonder if we could find a better name than `verbose`. Perhaps, we 
could be more explicit and use something like `serializeRecords` as we actually 
only use it for this at the moment. What do you think?

##########
File path: core/src/main/scala/kafka/server/KafkaApis.scala
##########
@@ -3354,7 +3354,7 @@ class KafkaApis(val requestChannel: RequestChannel,
       case Some(response) =>
         val responseSend = request.context.buildResponse(response)
         val responseString =
-          if (RequestChannel.isRequestLoggingEnabled) 
Some(response.toString(request.context.apiVersion))
+          if (RequestChannel.isRequestLoggingEnabled) 
Some(RequestConvertToJson.response(response, request.context.apiVersion, true))

Review comment:
       I think that we should set this to `false`.

##########
File path: core/src/main/scala/kafka/server/KafkaApis.scala
##########
@@ -131,7 +131,7 @@ class KafkaApis(val requestChannel: RequestChannel,
    */
   override def handle(request: RequestChannel.Request): Unit = {
     try {
-      trace(s"Handling request:${request.requestDesc(true)} from connection 
${request.context.connectionId};" +
+      trace(s"Handling 
request:${RequestConvertToJson.requestDesc(request.header, 
request.loggableRequest, true).toString} from connection 
${request.context.connectionId};" +

Review comment:
       `verbose` should be `false` here (if we keep it).

##########
File path: core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala
##########
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.nio.ByteBuffer
+import java.util
+import java.util.{Collections, Optional}
+
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.message._
+import org.junit.Test
+import org.apache.kafka.common.protocol.{ApiKeys, Errors}
+import org.apache.kafka.common.record.{CompressionType, MemoryRecords, 
RecordBatch, TimestampType}
+import 
org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest.PartitionData
+import org.apache.kafka.common.requests._
+import org.junit.Assert.assertEquals
+
+import scala.collection.mutable.ArrayBuffer
+
+class RequestConvertToJsonTest {
+
+  @Test
+  def testAllRequestTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {
+      val version: Short = 0
+      var req: AbstractRequest = null
+      if (key == ApiKeys.PRODUCE) {
+        // There's inconsistency with the toStruct schema in ProduceRequest
+        // and ProduceRequestDataJsonConverters where the field names don't
+        // match so the struct does not have the correct field names. This is
+        // a temporary workaround until ProduceRequest starts using 
ProduceRequestData
+        req = ProduceRequest.Builder.forCurrentMagic(0.toShort, 10000, new 
util.HashMap[TopicPartition, MemoryRecords]()).build()
+      } else {
+        val struct = 
ApiMessageType.fromApiKey(key.id).newRequest().toStruct(version)
+        req = AbstractRequest.parseRequest(key, version, struct)
+      }
+      try {
+        RequestConvertToJson.request(req, false)
+      } catch {
+        case _ : AssertionError => unhandledKeys += key.toString

Review comment:
       `IllegalStateException`?

##########
File path: core/src/main/scala/kafka/network/RequestConvertToJson.scala
##########
@@ -0,0 +1,360 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.util
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.node.{ArrayNode, BinaryNode, DoubleNode, 
IntNode, JsonNodeFactory, LongNode, NullNode, ObjectNode, ShortNode, TextNode}
+import kafka.network.RequestChannel.{Response, Session}
+import org.apache.kafka.common.message._
+import org.apache.kafka.common.network.ClientInformation
+import org.apache.kafka.common.protocol.Errors
+import org.apache.kafka.common.record.RecordBatch
+import org.apache.kafka.common.requests._
+import org.apache.kafka.common.utils.CollectionUtils
+
+import scala.jdk.CollectionConverters._
+
+object RequestConvertToJson {
+  def request(request: AbstractRequest, verbose: Boolean): JsonNode = {

Review comment:
       I think that we can remove this `verbose` flag here and only set it to 
false for both the produce request and the fetch response. I don't think that 
we will ever want to print out the bytes in the request log. Previously, we 
were printing out different things based on the flag but we never printed out 
the bytes: 
https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java#L314.

##########
File path: core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala
##########
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.nio.ByteBuffer
+import java.util
+import java.util.{Collections, Optional}
+
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.message._
+import org.junit.Test
+import org.apache.kafka.common.protocol.{ApiKeys, Errors}
+import org.apache.kafka.common.record.{CompressionType, MemoryRecords, 
RecordBatch, TimestampType}
+import 
org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest.PartitionData
+import org.apache.kafka.common.requests._
+import org.junit.Assert.assertEquals
+
+import scala.collection.mutable.ArrayBuffer
+
+class RequestConvertToJsonTest {
+
+  @Test
+  def testAllRequestTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {
+      val version: Short = 0
+      var req: AbstractRequest = null
+      if (key == ApiKeys.PRODUCE) {
+        // There's inconsistency with the toStruct schema in ProduceRequest
+        // and ProduceRequestDataJsonConverters where the field names don't
+        // match so the struct does not have the correct field names. This is
+        // a temporary workaround until ProduceRequest starts using 
ProduceRequestData
+        req = ProduceRequest.Builder.forCurrentMagic(0.toShort, 10000, new 
util.HashMap[TopicPartition, MemoryRecords]()).build()
+      } else {
+        val struct = 
ApiMessageType.fromApiKey(key.id).newRequest().toStruct(version)
+        req = AbstractRequest.parseRequest(key, version, struct)
+      }
+      try {
+        RequestConvertToJson.request(req, false)
+      } catch {
+        case _ : AssertionError => unhandledKeys += key.toString
+      }
+    })
+    assertEquals("Unhandled request keys", ArrayBuffer.empty, unhandledKeys)
+  }
+
+  @Test
+  def testAllResponseTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {
+      val version: Short = 0
+      val struct = 
ApiMessageType.fromApiKey(key.id).newResponse().toStruct(version)
+      val res = AbstractResponse.parseResponse(key, struct, version)
+      try {
+        RequestConvertToJson.response(res, version, false)
+      } catch {
+        case _ : AssertionError => unhandledKeys += key.toString

Review comment:
       `IllegalStateException`?

##########
File path: 
generator/src/main/java/org/apache/kafka/message/JsonConverterGenerator.java
##########
@@ -380,8 +384,18 @@ private void generateVariableLengthTargetToJson(Target 
target, Versions versions
                         target.sourceVariable(), target.sourceVariable())));
             }
         } else if (target.field().type().isRecords()) {
+            headerGenerator.addImport(MessageGenerator.INT_NODE_CLASS);
             headerGenerator.addImport(MessageGenerator.BINARY_NODE_CLASS);
+            buffer.printf("if (_verbose) {%n");

Review comment:
       Could we also add a comment here which explains why we are doing this? 
We can also add the KIP number.

##########
File path: core/src/main/scala/kafka/network/RequestConvertToJson.scala
##########
@@ -0,0 +1,360 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.util
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.node.{ArrayNode, BinaryNode, DoubleNode, 
IntNode, JsonNodeFactory, LongNode, NullNode, ObjectNode, ShortNode, TextNode}
+import kafka.network.RequestChannel.{Response, Session}
+import org.apache.kafka.common.message._
+import org.apache.kafka.common.network.ClientInformation
+import org.apache.kafka.common.protocol.Errors
+import org.apache.kafka.common.record.RecordBatch
+import org.apache.kafka.common.requests._
+import org.apache.kafka.common.utils.CollectionUtils
+
+import scala.jdk.CollectionConverters._
+
+object RequestConvertToJson {
+  def request(request: AbstractRequest, verbose: Boolean): JsonNode = {
+    request match {
+      case req: AddOffsetsToTxnRequest => 
AddOffsetsToTxnRequestDataJsonConverter.write(req.data, request.version)
+      case req: AddPartitionsToTxnRequest => 
AddPartitionsToTxnRequestDataJsonConverter.write(req.data, request.version)
+      case req: AlterClientQuotasRequest => 
AlterClientQuotasRequestDataJsonConverter.write(req.data, request.version)
+      case req: AlterConfigsRequest => 
AlterConfigsRequestDataJsonConverter.write(req.data, request.version)
+      case req: AlterIsrRequest => 
AlterIsrRequestDataJsonConverter.write(req.data, request.version)
+      case req: AlterPartitionReassignmentsRequest => 
AlterPartitionReassignmentsRequestDataJsonConverter.write(req.data, 
request.version)
+      case req: AlterReplicaLogDirsRequest => 
AlterReplicaLogDirsRequestDataJsonConverter.write(req.data, request.version)
+      case res: AlterUserScramCredentialsRequest => 
AlterUserScramCredentialsRequestDataJsonConverter.write(res.data, 
request.version)
+      case req: ApiVersionsRequest => 
ApiVersionsRequestDataJsonConverter.write(req.data, request.version)
+      case req: BeginQuorumEpochRequest => 
BeginQuorumEpochRequestDataJsonConverter.write(req.data, request.version)
+      case req: ControlledShutdownRequest => 
ControlledShutdownRequestDataJsonConverter.write(req.data, request.version)
+      case req: CreateAclsRequest => 
CreateAclsRequestDataJsonConverter.write(req.data, request.version)
+      case req: CreateDelegationTokenRequest => 
CreateDelegationTokenRequestDataJsonConverter.write(req.data, request.version)
+      case req: CreatePartitionsRequest => 
CreatePartitionsRequestDataJsonConverter.write(req.data, request.version)
+      case req: CreateTopicsRequest => 
CreateTopicsRequestDataJsonConverter.write(req.data, request.version)
+      case req: DeleteAclsRequest => 
DeleteAclsRequestDataJsonConverter.write(req.data, request.version)
+      case req: DeleteGroupsRequest => 
DeleteGroupsRequestDataJsonConverter.write(req.data, request.version)
+      case req: DeleteRecordsRequest => 
DeleteRecordsRequestDataJsonConverter.write(req.data, request.version)
+      case req: DeleteTopicsRequest => 
DeleteTopicsRequestDataJsonConverter.write(req.data, request.version)
+      case req: DescribeAclsRequest => 
DescribeAclsRequestDataJsonConverter.write(req.data, request.version)
+      case req: DescribeClientQuotasRequest => 
DescribeClientQuotasRequestDataJsonConverter.write(req.data, request.version)
+      case req: DescribeConfigsRequest => 
DescribeConfigsRequestDataJsonConverter.write(req.data, request.version)
+      case req: DescribeDelegationTokenRequest => 
DescribeDelegationTokenRequestDataJsonConverter.write(req.data, request.version)
+      case req: DescribeGroupsRequest => 
DescribeGroupsRequestDataJsonConverter.write(req.data, request.version)
+      case req: DescribeLogDirsRequest => 
DescribeLogDirsRequestDataJsonConverter.write(req.data, request.version)
+      case req: DescribeQuorumRequest => 
DescribeQuorumRequestDataJsonConverter.write(req.data, request.version)
+      case res: DescribeUserScramCredentialsRequest => 
DescribeUserScramCredentialsRequestDataJsonConverter.write(res.data, 
request.version)
+      case req: ElectLeadersRequest => 
ElectLeadersRequestDataJsonConverter.write(req.data, request.version)
+      case req: EndTxnRequest => 
EndTxnRequestDataJsonConverter.write(req.data, request.version)
+      case req: EndQuorumEpochRequest => 
EndQuorumEpochRequestDataJsonConverter.write(req.data, request.version)
+      case req: ExpireDelegationTokenRequest => 
ExpireDelegationTokenRequestDataJsonConverter.write(req.data, request.version)
+      case req: FetchRequest => FetchRequestDataJsonConverter.write(req.data, 
request.version)
+      case req: FindCoordinatorRequest => 
FindCoordinatorRequestDataJsonConverter.write(req.data, request.version)
+      case req: HeartbeatRequest => 
HeartbeatRequestDataJsonConverter.write(req.data, request.version)
+      case req: IncrementalAlterConfigsRequest => 
IncrementalAlterConfigsRequestDataJsonConverter.write(req.data, request.version)
+      case req: InitProducerIdRequest => 
InitProducerIdRequestDataJsonConverter.write(req.data, request.version)
+      case req: JoinGroupRequest => 
JoinGroupRequestDataJsonConverter.write(req.data, request.version)
+      case req: LeaderAndIsrRequest => 
LeaderAndIsrRequestDataJsonConverter.write(req.data, request.version)
+      case req: LeaveGroupRequest => 
LeaveGroupRequestDataJsonConverter.write(req.data, request.version)
+      case req: ListGroupsRequest => 
ListGroupsRequestDataJsonConverter.write(req.data, request.version)
+      case req: ListOffsetRequest => 
ListOffsetRequestDataJsonConverter.write(req.data, request.version)
+      case req: ListPartitionReassignmentsRequest => 
ListPartitionReassignmentsRequestDataJsonConverter.write(req.data, 
request.version)
+      case req: MetadataRequest => 
MetadataRequestDataJsonConverter.write(req.data, request.version)
+      case req: OffsetCommitRequest => 
OffsetCommitRequestDataJsonConverter.write(req.data, request.version)
+      case req: OffsetDeleteRequest => 
OffsetDeleteRequestDataJsonConverter.write(req.data, request.version)
+      case req: OffsetFetchRequest => 
OffsetFetchRequestDataJsonConverter.write(req.data, request.version)
+      case req: OffsetsForLeaderEpochRequest => 
offsetsForLeaderEpochRequestNode(req, request.version)
+      case req: ProduceRequest => produceRequestNode(req, request.version)
+      case req: RenewDelegationTokenRequest => 
RenewDelegationTokenRequestDataJsonConverter.write(req.data, request.version)
+      case req: SaslAuthenticateRequest => 
SaslAuthenticateRequestDataJsonConverter.write(req.data, request.version)
+      case req: SaslHandshakeRequest => 
SaslHandshakeRequestDataJsonConverter.write(req.data, request.version)
+      case req: StopReplicaRequest => 
StopReplicaRequestDataJsonConverter.write(req.data, request.version)
+      case req: SyncGroupRequest => 
SyncGroupRequestDataJsonConverter.write(req.data, request.version)
+      case req: TxnOffsetCommitRequest => 
TxnOffsetCommitRequestDataJsonConverter.write(req.data, request.version)
+      case req: UpdateFeaturesRequest => 
UpdateFeaturesRequestDataJsonConverter.write(req.data, request.version)
+      case req: UpdateMetadataRequest => 
UpdateMetadataRequestDataJsonConverter.write(req.data, request.version)
+      case req: VoteRequest => VoteRequestDataJsonConverter.write(req.data, 
request.version)
+      case req: WriteTxnMarkersRequest => 
WriteTxnMarkersRequestDataJsonConverter.write(req.data, request.version)
+      case _ => throw new AssertionError(s"ApiKey ${request.api} is not 
currently handled in `request`, the " +
+        "code should be updated to do so.");
+    }
+  }
+
+  def response(response: AbstractResponse, version: Short): JsonNode = {
+    response match {
+      case res: AddOffsetsToTxnResponse => 
AddOffsetsToTxnResponseDataJsonConverter.write(res.data, version)
+      case res: AddPartitionsToTxnResponse => 
AddPartitionsToTxnResponseDataJsonConverter.write(res.data, version)
+      case res: AlterClientQuotasResponse => 
AlterClientQuotasResponseDataJsonConverter.write(res.data, version)
+      case res: AlterConfigsResponse => 
AlterConfigsResponseDataJsonConverter.write(res.data, version)
+      case res: AlterIsrResponse => 
AlterIsrResponseDataJsonConverter.write(res.data, version)
+      case res: AlterPartitionReassignmentsResponse => 
AlterPartitionReassignmentsResponseDataJsonConverter.write(res.data, version)
+      case res: AlterReplicaLogDirsResponse => 
AlterReplicaLogDirsResponseDataJsonConverter.write(res.data, version)
+      case res: AlterUserScramCredentialsResponse => 
AlterUserScramCredentialsResponseDataJsonConverter.write(res.data, version)
+      case res: ApiVersionsResponse => 
ApiVersionsResponseDataJsonConverter.write(res.data, version)
+      case res: BeginQuorumEpochResponse => 
BeginQuorumEpochResponseDataJsonConverter.write(res.data, version)
+      case res: ControlledShutdownResponse => 
ControlledShutdownResponseDataJsonConverter.write(res.data, version)
+      case res: CreateAclsResponse => 
CreateAclsResponseDataJsonConverter.write(res.data, version)
+      case res: CreateDelegationTokenResponse => 
CreateDelegationTokenResponseDataJsonConverter.write(res.data, version)
+      case res: CreatePartitionsResponse => 
CreatePartitionsResponseDataJsonConverter.write(res.data, version)
+      case res: CreateTopicsResponse => 
CreateTopicsResponseDataJsonConverter.write(res.data, version)
+      case res: DeleteAclsResponse => 
DeleteAclsResponseDataJsonConverter.write(res.data, version)
+      case res: DeleteGroupsResponse => 
DeleteGroupsResponseDataJsonConverter.write(res.data, version)
+      case res: DeleteRecordsResponse => 
DeleteRecordsResponseDataJsonConverter.write(res.data, version)
+      case res: DeleteTopicsResponse => 
DeleteTopicsResponseDataJsonConverter.write(res.data, version)
+      case res: DescribeAclsResponse => 
DescribeAclsResponseDataJsonConverter.write(res.data, version)
+      case res: DescribeClientQuotasResponse => 
DescribeClientQuotasResponseDataJsonConverter.write(res.data, version)
+      case res: DescribeConfigsResponse => 
DescribeConfigsResponseDataJsonConverter.write(res.data, version)
+      case res: DescribeDelegationTokenResponse => 
DescribeDelegationTokenResponseDataJsonConverter.write(res.data, version)
+      case res: DescribeGroupsResponse => 
DescribeGroupsResponseDataJsonConverter.write(res.data, version)
+      case res: DescribeLogDirsResponse => 
DescribeLogDirsResponseDataJsonConverter.write(res.data, version)
+      case res: DescribeQuorumResponse => 
DescribeQuorumResponseDataJsonConverter.write(res.data, version)
+      case res: DescribeUserScramCredentialsResponse => 
DescribeUserScramCredentialsResponseDataJsonConverter.write(res.data, version)
+      case res: ElectLeadersResponse => 
ElectLeadersResponseDataJsonConverter.write(res.data, version)
+      case res: EndTxnResponse => 
EndTxnResponseDataJsonConverter.write(res.data, version)
+      case res: EndQuorumEpochResponse => 
EndQuorumEpochResponseDataJsonConverter.write(res.data, version)
+      case res: ExpireDelegationTokenResponse => 
ExpireDelegationTokenResponseDataJsonConverter.write(res.data, version)
+      case res: FetchResponse[_] => 
FetchResponseDataJsonConverter.write(res.data, version)
+      case res: FindCoordinatorResponse => 
FindCoordinatorResponseDataJsonConverter.write(res.data, version)
+      case res: HeartbeatResponse => 
HeartbeatResponseDataJsonConverter.write(res.data, version)
+      case res: IncrementalAlterConfigsResponse => 
IncrementalAlterConfigsResponseDataJsonConverter.write(res.data, version)
+      case res: InitProducerIdResponse => 
InitProducerIdResponseDataJsonConverter.write(res.data, version)
+      case res: JoinGroupResponse => 
JoinGroupResponseDataJsonConverter.write(res.data, version)
+      case res: LeaderAndIsrResponse => 
LeaderAndIsrResponseDataJsonConverter.write(res.data, version)
+      case res: LeaveGroupResponse => 
LeaveGroupResponseDataJsonConverter.write(res.data, version)
+      case res: ListGroupsResponse => 
ListGroupsResponseDataJsonConverter.write(res.data, version)
+      case res: ListOffsetResponse => 
ListOffsetResponseDataJsonConverter.write(res.data, version)
+      case res: ListPartitionReassignmentsResponse => 
ListPartitionReassignmentsResponseDataJsonConverter.write(res.data, version)
+      case res: MetadataResponse => 
MetadataResponseDataJsonConverter.write(res.data, version)
+      case res: OffsetCommitResponse => 
OffsetCommitResponseDataJsonConverter.write(res.data, version)
+      case res: OffsetDeleteResponse => 
OffsetDeleteResponseDataJsonConverter.write(res.data, version)
+      case res: OffsetFetchResponse => 
OffsetFetchResponseDataJsonConverter.write(res.data, version)
+      case res: OffsetsForLeaderEpochResponse => 
offsetsForLeaderEpochResponseNode(res, version)
+      case res: ProduceResponse => produceResponseNode(res, version)
+      case res: RenewDelegationTokenResponse => 
RenewDelegationTokenResponseDataJsonConverter.write(res.data, version)
+      case res: SaslAuthenticateResponse => 
SaslAuthenticateResponseDataJsonConverter.write(res.data, version)
+      case res: SaslHandshakeResponse => 
SaslHandshakeResponseDataJsonConverter.write(res.data, version)
+      case res: StopReplicaResponse => 
StopReplicaResponseDataJsonConverter.write(res.data, version)
+      case res: SyncGroupResponse => 
SyncGroupResponseDataJsonConverter.write(res.data, version)
+      case res: TxnOffsetCommitResponse => 
TxnOffsetCommitResponseDataJsonConverter.write(res.data, version)
+      case res: UpdateFeaturesResponse => 
UpdateFeaturesResponseDataJsonConverter.write(res.data, version)
+      case res: UpdateMetadataResponse => 
UpdateMetadataResponseDataJsonConverter.write(res.data, version)
+      case res: WriteTxnMarkersResponse => 
WriteTxnMarkersResponseDataJsonConverter.write(res.data, version)
+      case res: VoteResponse => VoteResponseDataJsonConverter.write(res.data, 
version)
+      case _ => throw new AssertionError(s"ApiKey $response is not currently 
handled in `response`, the " +
+        "code should be updated to do so.");
+    }
+  }
+
+  def requestHeaderNode(header: RequestHeader): JsonNode = {
+    val node = RequestHeaderDataJsonConverter.write(header.data(), 
header.headerVersion()).asInstanceOf[ObjectNode]
+    node.set("requestApiKeyName", new TextNode(header.apiKey.toString))
+    node
+  }
+
+  def clientInfoNode(clientInfo: ClientInformation): JsonNode = {
+    val node = new ObjectNode(JsonNodeFactory.instance)
+    node.set("softwareName", new TextNode(clientInfo.softwareName()))
+    node.set("softwareName", new TextNode(clientInfo.softwareVersion()))
+    node
+  }
+
+  def requestDescMetrics(header: RequestHeader, res: Response, req: 
AbstractRequest,
+                         context: RequestContext, session: Session, verbose: 
Boolean,
+                         totalTimeMs: Double, requestQueueTimeMs: Double, 
apiLocalTimeMs: Double,
+                         apiRemoteTimeMs: Double, apiThrottleTimeMs: Long, 
responseQueueTimeMs: Double,
+                         responseSendTimeMs: Double, temporaryMemoryBytes: 
Long,
+                         messageConversionsTimeMs: Double): JsonNode = {
+    val node = new ObjectNode(JsonNodeFactory.instance)
+    node.set("requestHeader", requestHeaderNode(header))
+    node.set("request", request(req, verbose))
+    node.set("response", res.responseLog.getOrElse(new TextNode("")))
+    node.set("connection", new TextNode(context.connectionId))
+    node.set("totalTimeMs", new DoubleNode(totalTimeMs))
+    node.set("requestQueueTimeMs", new DoubleNode(requestQueueTimeMs))
+    node.set("localTimeMs", new DoubleNode(apiLocalTimeMs))
+    node.set("remoteTimeMs", new DoubleNode(apiRemoteTimeMs))
+    node.set("throttleTimeMs", new LongNode(apiThrottleTimeMs))
+    node.set("responseQueueTimeMs", new DoubleNode(responseQueueTimeMs))
+    node.set("sendTimeMs", new DoubleNode(responseSendTimeMs))
+    node.set("securityProtocol", new 
TextNode(context.securityProtocol.toString))
+    node.set("principal", new TextNode(session.principal.toString))
+    node.set("listener", new TextNode(context.listenerName.value))
+    node.set("clientInformation", clientInfoNode(context.clientInformation))
+    if (temporaryMemoryBytes > 0)
+      node.set("temporaryMemoryBytes", new LongNode(temporaryMemoryBytes))
+    if (messageConversionsTimeMs > 0)
+      node.set("messageConversionsTime", new 
DoubleNode(messageConversionsTimeMs))
+    node
+  }
+
+  def requestDesc(header: RequestHeader, req: AbstractRequest, verbose: 
Boolean): JsonNode = {
+    val node = new ObjectNode(JsonNodeFactory.instance)
+    node.set("requestHeader", requestHeaderNode(header))
+    node.set("request", request(req, verbose))
+    node
+  }
+
+  /**
+   * Temporary until switch to use the generated schemas.
+   */
+  def offsetsForLeaderEpochRequestNode(request: OffsetsForLeaderEpochRequest, 
version: Short): JsonNode = {
+    val node = new ObjectNode(JsonNodeFactory.instance)
+    if (version >= 3) {
+      node.set("replicaId", new IntNode(request.replicaId))
+    }
+    val topics = 
CollectionUtils.groupPartitionDataByTopic(request.epochsByTopicPartition)
+    val topicsArray = new ArrayNode(JsonNodeFactory.instance)
+    topics.forEach { (topicName, partitions) =>
+      val topicNode = new ObjectNode(JsonNodeFactory.instance)
+      topicNode.set("name", new TextNode(topicName))
+      val partitionsArray = new ArrayNode(JsonNodeFactory.instance)
+      partitions.forEach { (partitionIndex, partitionData) =>
+        val partitionNode = new ObjectNode(JsonNodeFactory.instance)
+        partitionNode.set("partitionIndex", new IntNode(partitionIndex))
+        if (version >= 2) {
+          val leaderEpoch = partitionData.currentLeaderEpoch
+          partitionNode.set("currentLeaderEpoch", new 
IntNode(leaderEpoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH)))
+        }
+        partitionNode.set("leaderEpoch", new 
IntNode(partitionData.leaderEpoch))
+        partitionsArray.add(partitionNode)
+      }
+      topicNode.set("partitions", partitionsArray)
+      topicsArray.add(topicNode)
+    }
+    node.set("topics", topicsArray)
+    node
+  }
+
+  /**
+   * Temporary until switch to use the generated schemas.
+   */
+  def produceRequestNode(request: ProduceRequest, version: Short): JsonNode = {
+    val node = new ObjectNode(JsonNodeFactory.instance)
+    if (version >= 3) {
+      if (request.transactionalId == null) {
+        node.set("transactionalId", NullNode.instance)
+      } else {
+        node.set("transactionalId", new TextNode(request.transactionalId))
+      }
+    }
+    node.set("acks", new ShortNode(request.acks))
+    node.set("timeoutMs", new IntNode(request.timeout))
+    val topics = 
CollectionUtils.groupPartitionDataByTopic(request.partitionRecordsOrFail())
+    val topicsArray = new ArrayNode(JsonNodeFactory.instance)
+    topics.forEach { (topicName, partitions) =>
+      val topicNode = new ObjectNode(JsonNodeFactory.instance)
+      topicNode.set("name", new TextNode(topicName))
+      val partitionsArray = new ArrayNode(JsonNodeFactory.instance)
+      partitions.forEach { (partitionIndex, partitionData)=>
+        val partitionNode = new ObjectNode(JsonNodeFactory.instance)
+        partitionNode.set("partitionIndex", new IntNode(partitionIndex))
+        if (partitionData == null)
+          partitionNode.set("records", NullNode.instance)
+        else
+          partitionNode.set("records", new 
BinaryNode(util.Arrays.copyOf(partitionData.buffer().array(), 
partitionData.validBytes())))

Review comment:
       Right. I think that we will change it to `records` type when we will 
migrate the produce request. Anyway, we should not serialize the bytes to JSON 
here but rather put the size. We should retain the size of the records set for 
both the produce request and the fetch response.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to