dajac commented on a change in pull request #9526:
URL: https://github.com/apache/kafka/pull/9526#discussion_r520340843



##########
File path: 
clients/src/test/java/org/apache/kafka/common/message/SimpleExampleMessageTest.java
##########
@@ -394,7 +394,7 @@ private void testRoundTrip(SimpleExampleMessageData message,
         assertEquals(message.hashCode(), messageFromStruct.hashCode());
 
         // Check JSON serialization
-        JsonNode serializedJson = 
SimpleExampleMessageDataJsonConverter.write(message, version);
+        JsonNode serializedJson = 
SimpleExampleMessageDataJsonConverter.write(message, version, true);

Review comment:
       It seems that we can revert this back as we have the overloaded method 
now.

##########
File path: core/src/main/scala/kafka/network/RequestConvertToJson.scala
##########
@@ -0,0 +1,362 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.util
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.node.{ArrayNode, BinaryNode, DoubleNode, 
IntNode, JsonNodeFactory, LongNode, NullNode, ObjectNode, ShortNode, TextNode}
+import kafka.network.RequestChannel.{Response, Session}
+import org.apache.kafka.common.message._
+import org.apache.kafka.common.network.ClientInformation
+import org.apache.kafka.common.protocol.Errors
+import org.apache.kafka.common.record.RecordBatch
+import org.apache.kafka.common.requests._
+import org.apache.kafka.common.utils.CollectionUtils
+
+import scala.jdk.CollectionConverters._
+
+object RequestConvertToJson {
+  def request(request: AbstractRequest, verbose: Boolean): JsonNode = {

Review comment:
       It seems that we can remove `verbose` as we don't really use it anymore. 
Instead, we could set it to false for the produce request below.

##########
File path: core/src/main/scala/kafka/network/RequestConvertToJson.scala
##########
@@ -0,0 +1,362 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.util
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.node.{ArrayNode, BinaryNode, DoubleNode, 
IntNode, JsonNodeFactory, LongNode, NullNode, ObjectNode, ShortNode, TextNode}
+import kafka.network.RequestChannel.{Response, Session}
+import org.apache.kafka.common.message._
+import org.apache.kafka.common.network.ClientInformation
+import org.apache.kafka.common.protocol.Errors
+import org.apache.kafka.common.record.RecordBatch
+import org.apache.kafka.common.requests._
+import org.apache.kafka.common.utils.CollectionUtils
+
+import scala.jdk.CollectionConverters._
+
+object RequestConvertToJson {
+  def request(request: AbstractRequest, verbose: Boolean): JsonNode = {
+    request match {
+      case req: AddOffsetsToTxnRequest => 
AddOffsetsToTxnRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: AddPartitionsToTxnRequest => 
AddPartitionsToTxnRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: AlterClientQuotasRequest => 
AlterClientQuotasRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: AlterConfigsRequest => 
AlterConfigsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: AlterIsrRequest => 
AlterIsrRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: AlterPartitionReassignmentsRequest => 
AlterPartitionReassignmentsRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: AlterReplicaLogDirsRequest => 
AlterReplicaLogDirsRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case res: AlterUserScramCredentialsRequest => 
AlterUserScramCredentialsRequestDataJsonConverter.write(res.data, 
request.version, verbose)
+      case req: ApiVersionsRequest => 
ApiVersionsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: BeginQuorumEpochRequest => 
BeginQuorumEpochRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: ControlledShutdownRequest => 
ControlledShutdownRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: CreateAclsRequest => 
CreateAclsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: CreateDelegationTokenRequest => 
CreateDelegationTokenRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: CreatePartitionsRequest => 
CreatePartitionsRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: CreateTopicsRequest => 
CreateTopicsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DeleteAclsRequest => 
DeleteAclsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DeleteGroupsRequest => 
DeleteGroupsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DeleteRecordsRequest => 
DeleteRecordsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DeleteTopicsRequest => 
DeleteTopicsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DescribeAclsRequest => 
DescribeAclsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DescribeClientQuotasRequest => 
DescribeClientQuotasRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: DescribeConfigsRequest => 
DescribeConfigsRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: DescribeDelegationTokenRequest => 
DescribeDelegationTokenRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: DescribeGroupsRequest => 
DescribeGroupsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: DescribeLogDirsRequest => 
DescribeLogDirsRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: DescribeQuorumRequest => 
DescribeQuorumRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case res: DescribeUserScramCredentialsRequest => 
DescribeUserScramCredentialsRequestDataJsonConverter.write(res.data, 
request.version, verbose)
+      case req: ElectLeadersRequest => 
ElectLeadersRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: EndTxnRequest => 
EndTxnRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: EndQuorumEpochRequest => 
EndQuorumEpochRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: ExpireDelegationTokenRequest => 
ExpireDelegationTokenRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: FetchRequest => FetchRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: FindCoordinatorRequest => 
FindCoordinatorRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: HeartbeatRequest => 
HeartbeatRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: IncrementalAlterConfigsRequest => 
IncrementalAlterConfigsRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: InitProducerIdRequest => 
InitProducerIdRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: JoinGroupRequest => 
JoinGroupRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: LeaderAndIsrRequest => 
LeaderAndIsrRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: LeaveGroupRequest => 
LeaveGroupRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: ListGroupsRequest => 
ListGroupsRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: ListOffsetRequest => 
ListOffsetRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: ListPartitionReassignmentsRequest => 
ListPartitionReassignmentsRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: MetadataRequest => 
MetadataRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: OffsetCommitRequest => 
OffsetCommitRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: OffsetDeleteRequest => 
OffsetDeleteRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: OffsetFetchRequest => 
OffsetFetchRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: OffsetsForLeaderEpochRequest => 
offsetsForLeaderEpochRequestNode(req, request.version, verbose)
+      case req: ProduceRequest => produceRequestNode(req, request.version, 
verbose)
+      case req: RenewDelegationTokenRequest => 
RenewDelegationTokenRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: SaslAuthenticateRequest => 
SaslAuthenticateRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: SaslHandshakeRequest => 
SaslHandshakeRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: StopReplicaRequest => 
StopReplicaRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: SyncGroupRequest => 
SyncGroupRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: TxnOffsetCommitRequest => 
TxnOffsetCommitRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case req: UpdateFeaturesRequest => 
UpdateFeaturesRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: UpdateMetadataRequest => 
UpdateMetadataRequestDataJsonConverter.write(req.data, request.version, verbose)
+      case req: VoteRequest => VoteRequestDataJsonConverter.write(req.data, 
request.version, verbose)
+      case req: WriteTxnMarkersRequest => 
WriteTxnMarkersRequestDataJsonConverter.write(req.data, request.version, 
verbose)
+      case _ => throw new IllegalStateException(s"ApiKey ${request.api} is not 
currently handled in `request`, the " +
+        "code should be updated to do so.");
+    }
+  }
+
+  def response(response: AbstractResponse, version: Short, verbose: Boolean): 
JsonNode = {

Review comment:
       Same here. We can remove `verbose` and set it to false for the fetch 
response.

##########
File path: core/src/main/scala/kafka/tools/TestRaftRequestHandler.scala
##########
@@ -93,10 +94,8 @@ class TestRaftRequestHandler(
     val response = responseOpt match {
       case Some(response) =>
         val responseSend = request.context.buildResponse(response)
-        val responseString =
-          if (RequestChannel.isRequestLoggingEnabled) 
Some(response.toString(request.context.apiVersion))
-          else None
-        new RequestChannel.SendResponse(request, responseSend, responseString, 
None)
+        val headerLog = RequestConvertToJson.requestHeaderNode(request.header)
+        new RequestChannel.SendResponse(request, responseSend, 
Some(headerLog), None)

Review comment:
       I suppose that we need to keep checking `if 
(RequestChannel.isRequestLoggingEnabled)` here.

##########
File path: core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala
##########
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.net.InetAddress
+import java.nio.ByteBuffer
+import java.util
+import java.util.{Collections, Optional}
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.node.ObjectNode
+import kafka.network
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.memory.MemoryPool
+import org.apache.kafka.common.message._
+import org.apache.kafka.common.network.{ClientInformation, ListenerName, 
NetworkSend}
+import org.junit.Test
+import org.apache.kafka.common.protocol.{ApiKeys, Errors}
+import org.apache.kafka.common.record.{MemoryRecords, RecordBatch}
+import 
org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest.PartitionData
+import org.apache.kafka.common.requests._
+import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
+import org.easymock.EasyMock.createNiceMock
+import org.junit.Assert.assertEquals
+
+import scala.collection.mutable.ArrayBuffer
+
+class RequestConvertToJsonTest {
+
+  @Test
+  def testAllRequestTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {
+      val version: Short = key.latestVersion()
+      var req: AbstractRequest = null
+      if (key == ApiKeys.PRODUCE) {
+        // There's inconsistency with the toStruct schema in ProduceRequest
+        // and ProduceRequestDataJsonConverters where the field names don't
+        // match so the struct does not have the correct field names. This is
+        // a temporary workaround until ProduceRequest starts using 
ProduceRequestData
+        req = ProduceRequest.Builder.forCurrentMagic(0.toShort, 10000, new 
util.HashMap[TopicPartition, MemoryRecords]).build()
+      } else {
+        val struct = 
ApiMessageType.fromApiKey(key.id).newRequest().toStruct(version)
+        req = AbstractRequest.parseRequest(key, version, struct)
+      }
+      try {
+        RequestConvertToJson.request(req, false)
+      } catch {
+        case _ : IllegalStateException => unhandledKeys += key.toString
+      }
+    })
+    assertEquals("Unhandled request keys", ArrayBuffer.empty, unhandledKeys)
+  }
+
+  @Test
+  def testAllResponseTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {
+      val version: Short = key.latestVersion()
+      val struct = 
ApiMessageType.fromApiKey(key.id).newResponse().toStruct(version)
+      val res = AbstractResponse.parseResponse(key, struct, version)
+      try {
+        RequestConvertToJson.response(res, version, false)
+      } catch {
+        case _ : IllegalStateException => unhandledKeys += key.toString
+      }
+    })
+    assertEquals("Unhandled response keys", ArrayBuffer.empty, unhandledKeys)
+  }
+
+  @Test
+  def testFormatOfOffsetsForLeaderEpochRequestNode(): Unit = {
+    val partitionDataMap = new util.HashMap[TopicPartition, PartitionData]
+    partitionDataMap.put(new TopicPartition("topic1", 0), new 
PartitionData(Optional.of(0),  1))
+
+    val version: Short = ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion
+    val request = 
OffsetsForLeaderEpochRequest.Builder.forConsumer(partitionDataMap).build(version)
+    val actualNode = RequestConvertToJson.request(request, true)
+
+    val requestData = 
OffsetForLeaderEpochRequestDataJsonConverter.read(actualNode, version)
+    val expectedNode = 
OffsetForLeaderEpochRequestDataJsonConverter.write(requestData, version, true)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test
+  def testFormatOfProduceRequestNode(): Unit = {
+    val produceDataMap = new util.HashMap[TopicPartition, MemoryRecords]
+
+    val version: Short = ApiKeys.PRODUCE.latestVersion
+    val serializeRecords: Boolean = false;
+    val request = ProduceRequest.Builder.forMagic(2, 0.toShort, 0, 
produceDataMap, "").build()
+    val actualNode = RequestConvertToJson.request(request, serializeRecords)
+
+    val requestData = new ProduceRequestData()
+    val expectedNode = ProduceRequestDataJsonConverter.write(requestData, 
version, serializeRecords)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test
+  def testFormatOfOffsetsForLeaderEpochResponseNode(): Unit = {
+    val endOffsetMap = new util.HashMap[TopicPartition, EpochEndOffset]
+    endOffsetMap.put(new TopicPartition("topic1", 0), new EpochEndOffset(1, 
10L))
+
+    val version: Short = ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion
+    val response = new OffsetsForLeaderEpochResponse(endOffsetMap)
+    val actualNode = RequestConvertToJson.response(response, version, true)
+
+    val requestData = 
OffsetForLeaderEpochResponseDataJsonConverter.read(actualNode, version)
+    val expectedNode = 
OffsetForLeaderEpochResponseDataJsonConverter.write(requestData, version, true)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test
+  def testFormatOfProduceResponseNode(): Unit = {
+    val responseData = new util.HashMap[TopicPartition, 
ProduceResponse.PartitionResponse]
+    val partResponse = new ProduceResponse.PartitionResponse(Errors.NONE, 
10000, RecordBatch.NO_TIMESTAMP, 100, Collections.singletonList(new 
ProduceResponse.RecordError(3, "Record error")), "Produce failed")
+    responseData.put(new TopicPartition("topic1", 0), partResponse)
+
+    val version: Short = ApiKeys.PRODUCE.latestVersion
+    val response = new ProduceResponse(responseData)
+    val actualNode = RequestConvertToJson.response(response, version, true)
+
+    val requestData = ProduceResponseDataJsonConverter.read(actualNode, 
version)
+    val expectedNode = ProduceResponseDataJsonConverter.write(requestData, 
version, true)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test def testFieldsRequestDescMetrics(): Unit = {
+    val expectedFields = Set("requestHeader", "request", "response", 
"connection",
+      "totalTimeMs", "requestQueueTimeMs", "localTimeMs", "remoteTimeMs", 
"throttleTimeMs",
+      "responseQueueTimeMs", "sendTimeMs", "securityProtocol", "principal", 
"listener",
+      "clientInformation", "softwareName", "softwareVersion", 
"temporaryMemoryBytes", "messageConversionsTime")
+
+    val req = request(new AlterIsrRequest(new AlterIsrRequestData(), 0))
+    val byteBuffer = req.body[AbstractRequest].serialize(req.header)
+    val send = new NetworkSend(req.context.connectionId, byteBuffer)
+    val headerLog = RequestConvertToJson.requestHeaderNode(req.header)
+    val res = new RequestChannel.SendResponse(req, send, Some(headerLog), None)
+
+    val node = RequestConvertToJson.requestDescMetrics(req.header, res, 
req.loggableRequest, req.context, req.session,
+      1, 1, 1, 1, 1, 1, 1, 1, 1).asInstanceOf[ObjectNode]
+    val foundFields = getFieldNames(node)

Review comment:
       Thinking a bit more about this. I think that it may be worth validating 
the values as well to ensure that the input maps to the output. If we use 
different values for each field, we could verify that the output JSON object is 
correct. `JSONNode` are comparable so we could construct the expected output 
and use it in `assertEquals`. What do you think?

##########
File path: core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala
##########
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.net.InetAddress
+import java.nio.ByteBuffer
+import java.util
+import java.util.{Collections, Optional}
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.node.ObjectNode
+import kafka.network
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.memory.MemoryPool
+import org.apache.kafka.common.message._
+import org.apache.kafka.common.network.{ClientInformation, ListenerName, 
NetworkSend}
+import org.junit.Test
+import org.apache.kafka.common.protocol.{ApiKeys, Errors}
+import org.apache.kafka.common.record.{MemoryRecords, RecordBatch}
+import 
org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest.PartitionData
+import org.apache.kafka.common.requests._
+import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
+import org.easymock.EasyMock.createNiceMock
+import org.junit.Assert.assertEquals
+
+import scala.collection.mutable.ArrayBuffer
+
+class RequestConvertToJsonTest {
+
+  @Test
+  def testAllRequestTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {
+      val version: Short = key.latestVersion()
+      var req: AbstractRequest = null
+      if (key == ApiKeys.PRODUCE) {
+        // There's inconsistency with the toStruct schema in ProduceRequest
+        // and ProduceRequestDataJsonConverters where the field names don't
+        // match so the struct does not have the correct field names. This is
+        // a temporary workaround until ProduceRequest starts using 
ProduceRequestData
+        req = ProduceRequest.Builder.forCurrentMagic(0.toShort, 10000, new 
util.HashMap[TopicPartition, MemoryRecords]).build()
+      } else {
+        val struct = 
ApiMessageType.fromApiKey(key.id).newRequest().toStruct(version)
+        req = AbstractRequest.parseRequest(key, version, struct)
+      }
+      try {
+        RequestConvertToJson.request(req, false)
+      } catch {
+        case _ : IllegalStateException => unhandledKeys += key.toString
+      }
+    })
+    assertEquals("Unhandled request keys", ArrayBuffer.empty, unhandledKeys)
+  }
+
+  @Test
+  def testAllResponseTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {
+      val version: Short = key.latestVersion()
+      val struct = 
ApiMessageType.fromApiKey(key.id).newResponse().toStruct(version)
+      val res = AbstractResponse.parseResponse(key, struct, version)
+      try {
+        RequestConvertToJson.response(res, version, false)
+      } catch {
+        case _ : IllegalStateException => unhandledKeys += key.toString
+      }
+    })
+    assertEquals("Unhandled response keys", ArrayBuffer.empty, unhandledKeys)
+  }
+
+  @Test
+  def testFormatOfOffsetsForLeaderEpochRequestNode(): Unit = {
+    val partitionDataMap = new util.HashMap[TopicPartition, PartitionData]
+    partitionDataMap.put(new TopicPartition("topic1", 0), new 
PartitionData(Optional.of(0),  1))
+
+    val version: Short = ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion
+    val request = 
OffsetsForLeaderEpochRequest.Builder.forConsumer(partitionDataMap).build(version)
+    val actualNode = RequestConvertToJson.request(request, true)
+
+    val requestData = 
OffsetForLeaderEpochRequestDataJsonConverter.read(actualNode, version)
+    val expectedNode = 
OffsetForLeaderEpochRequestDataJsonConverter.write(requestData, version, true)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test
+  def testFormatOfProduceRequestNode(): Unit = {
+    val produceDataMap = new util.HashMap[TopicPartition, MemoryRecords]
+
+    val version: Short = ApiKeys.PRODUCE.latestVersion
+    val serializeRecords: Boolean = false;
+    val request = ProduceRequest.Builder.forMagic(2, 0.toShort, 0, 
produceDataMap, "").build()
+    val actualNode = RequestConvertToJson.request(request, serializeRecords)
+
+    val requestData = new ProduceRequestData()
+    val expectedNode = ProduceRequestDataJsonConverter.write(requestData, 
version, serializeRecords)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test
+  def testFormatOfOffsetsForLeaderEpochResponseNode(): Unit = {
+    val endOffsetMap = new util.HashMap[TopicPartition, EpochEndOffset]
+    endOffsetMap.put(new TopicPartition("topic1", 0), new EpochEndOffset(1, 
10L))
+
+    val version: Short = ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion
+    val response = new OffsetsForLeaderEpochResponse(endOffsetMap)
+    val actualNode = RequestConvertToJson.response(response, version, true)
+
+    val requestData = 
OffsetForLeaderEpochResponseDataJsonConverter.read(actualNode, version)
+    val expectedNode = 
OffsetForLeaderEpochResponseDataJsonConverter.write(requestData, version, true)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test
+  def testFormatOfProduceResponseNode(): Unit = {
+    val responseData = new util.HashMap[TopicPartition, 
ProduceResponse.PartitionResponse]
+    val partResponse = new ProduceResponse.PartitionResponse(Errors.NONE, 
10000, RecordBatch.NO_TIMESTAMP, 100, Collections.singletonList(new 
ProduceResponse.RecordError(3, "Record error")), "Produce failed")
+    responseData.put(new TopicPartition("topic1", 0), partResponse)
+
+    val version: Short = ApiKeys.PRODUCE.latestVersion
+    val response = new ProduceResponse(responseData)
+    val actualNode = RequestConvertToJson.response(response, version, true)
+
+    val requestData = ProduceResponseDataJsonConverter.read(actualNode, 
version)
+    val expectedNode = ProduceResponseDataJsonConverter.write(requestData, 
version, true)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test def testFieldsRequestDescMetrics(): Unit = {
+    val expectedFields = Set("requestHeader", "request", "response", 
"connection",
+      "totalTimeMs", "requestQueueTimeMs", "localTimeMs", "remoteTimeMs", 
"throttleTimeMs",
+      "responseQueueTimeMs", "sendTimeMs", "securityProtocol", "principal", 
"listener",
+      "clientInformation", "softwareName", "softwareVersion", 
"temporaryMemoryBytes", "messageConversionsTime")
+
+    val req = request(new AlterIsrRequest(new AlterIsrRequestData(), 0))
+    val byteBuffer = req.body[AbstractRequest].serialize(req.header)
+    val send = new NetworkSend(req.context.connectionId, byteBuffer)
+    val headerLog = RequestConvertToJson.requestHeaderNode(req.header)
+    val res = new RequestChannel.SendResponse(req, send, Some(headerLog), None)
+
+    val node = RequestConvertToJson.requestDescMetrics(req.header, res, 
req.loggableRequest, req.context, req.session,
+      1, 1, 1, 1, 1, 1, 1, 1, 1).asInstanceOf[ObjectNode]
+    val foundFields = getFieldNames(node)
+
+    assertEquals(expectedFields, foundFields)
+  }
+
+  def request(req: AbstractRequest): RequestChannel.Request = {

Review comment:
       nit: Could we make it private?

##########
File path: core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala
##########
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.net.InetAddress
+import java.nio.ByteBuffer
+import java.util
+import java.util.{Collections, Optional}
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.node.ObjectNode
+import kafka.network
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.memory.MemoryPool
+import org.apache.kafka.common.message._
+import org.apache.kafka.common.network.{ClientInformation, ListenerName, 
NetworkSend}
+import org.junit.Test
+import org.apache.kafka.common.protocol.{ApiKeys, Errors}
+import org.apache.kafka.common.record.{MemoryRecords, RecordBatch}
+import 
org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest.PartitionData
+import org.apache.kafka.common.requests._
+import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
+import org.easymock.EasyMock.createNiceMock
+import org.junit.Assert.assertEquals
+
+import scala.collection.mutable.ArrayBuffer
+
+class RequestConvertToJsonTest {
+
+  @Test
+  def testAllRequestTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {
+      val version: Short = key.latestVersion()
+      var req: AbstractRequest = null
+      if (key == ApiKeys.PRODUCE) {
+        // There's inconsistency with the toStruct schema in ProduceRequest
+        // and ProduceRequestDataJsonConverters where the field names don't
+        // match so the struct does not have the correct field names. This is
+        // a temporary workaround until ProduceRequest starts using 
ProduceRequestData
+        req = ProduceRequest.Builder.forCurrentMagic(0.toShort, 10000, new 
util.HashMap[TopicPartition, MemoryRecords]).build()
+      } else {
+        val struct = 
ApiMessageType.fromApiKey(key.id).newRequest().toStruct(version)
+        req = AbstractRequest.parseRequest(key, version, struct)
+      }
+      try {
+        RequestConvertToJson.request(req, false)
+      } catch {
+        case _ : IllegalStateException => unhandledKeys += key.toString
+      }
+    })
+    assertEquals("Unhandled request keys", ArrayBuffer.empty, unhandledKeys)
+  }
+
+  @Test
+  def testAllResponseTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {
+      val version: Short = key.latestVersion()
+      val struct = 
ApiMessageType.fromApiKey(key.id).newResponse().toStruct(version)
+      val res = AbstractResponse.parseResponse(key, struct, version)
+      try {
+        RequestConvertToJson.response(res, version, false)
+      } catch {
+        case _ : IllegalStateException => unhandledKeys += key.toString
+      }
+    })
+    assertEquals("Unhandled response keys", ArrayBuffer.empty, unhandledKeys)
+  }
+
+  @Test
+  def testFormatOfOffsetsForLeaderEpochRequestNode(): Unit = {
+    val partitionDataMap = new util.HashMap[TopicPartition, PartitionData]
+    partitionDataMap.put(new TopicPartition("topic1", 0), new 
PartitionData(Optional.of(0),  1))
+
+    val version: Short = ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion
+    val request = 
OffsetsForLeaderEpochRequest.Builder.forConsumer(partitionDataMap).build(version)
+    val actualNode = RequestConvertToJson.request(request, true)
+
+    val requestData = 
OffsetForLeaderEpochRequestDataJsonConverter.read(actualNode, version)
+    val expectedNode = 
OffsetForLeaderEpochRequestDataJsonConverter.write(requestData, version, true)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test
+  def testFormatOfProduceRequestNode(): Unit = {
+    val produceDataMap = new util.HashMap[TopicPartition, MemoryRecords]
+
+    val version: Short = ApiKeys.PRODUCE.latestVersion
+    val serializeRecords: Boolean = false;
+    val request = ProduceRequest.Builder.forMagic(2, 0.toShort, 0, 
produceDataMap, "").build()
+    val actualNode = RequestConvertToJson.request(request, serializeRecords)
+
+    val requestData = new ProduceRequestData()
+    val expectedNode = ProduceRequestDataJsonConverter.write(requestData, 
version, serializeRecords)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test
+  def testFormatOfOffsetsForLeaderEpochResponseNode(): Unit = {
+    val endOffsetMap = new util.HashMap[TopicPartition, EpochEndOffset]
+    endOffsetMap.put(new TopicPartition("topic1", 0), new EpochEndOffset(1, 
10L))
+
+    val version: Short = ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion
+    val response = new OffsetsForLeaderEpochResponse(endOffsetMap)
+    val actualNode = RequestConvertToJson.response(response, version, true)
+
+    val requestData = 
OffsetForLeaderEpochResponseDataJsonConverter.read(actualNode, version)
+    val expectedNode = 
OffsetForLeaderEpochResponseDataJsonConverter.write(requestData, version, true)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test
+  def testFormatOfProduceResponseNode(): Unit = {
+    val responseData = new util.HashMap[TopicPartition, 
ProduceResponse.PartitionResponse]
+    val partResponse = new ProduceResponse.PartitionResponse(Errors.NONE, 
10000, RecordBatch.NO_TIMESTAMP, 100, Collections.singletonList(new 
ProduceResponse.RecordError(3, "Record error")), "Produce failed")
+    responseData.put(new TopicPartition("topic1", 0), partResponse)
+
+    val version: Short = ApiKeys.PRODUCE.latestVersion
+    val response = new ProduceResponse(responseData)
+    val actualNode = RequestConvertToJson.response(response, version, true)
+
+    val requestData = ProduceResponseDataJsonConverter.read(actualNode, 
version)
+    val expectedNode = ProduceResponseDataJsonConverter.write(requestData, 
version, true)
+
+    assertEquals(expectedNode, actualNode)
+  }
+
+  @Test def testFieldsRequestDescMetrics(): Unit = {

Review comment:
       nit: Could we add a new line after `@Test`?

##########
File path: core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala
##########
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.net.InetAddress
+import java.nio.ByteBuffer
+import java.util
+import java.util.{Collections, Optional}
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.node.ObjectNode
+import kafka.network
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.memory.MemoryPool
+import org.apache.kafka.common.message._
+import org.apache.kafka.common.network.{ClientInformation, ListenerName, 
NetworkSend}
+import org.junit.Test
+import org.apache.kafka.common.protocol.{ApiKeys, Errors}
+import org.apache.kafka.common.record.{MemoryRecords, RecordBatch}
+import 
org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest.PartitionData
+import org.apache.kafka.common.requests._
+import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
+import org.easymock.EasyMock.createNiceMock
+import org.junit.Assert.assertEquals
+
+import scala.collection.mutable.ArrayBuffer
+
+class RequestConvertToJsonTest {
+
+  @Test
+  def testAllRequestTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {
+      val version: Short = key.latestVersion()
+      var req: AbstractRequest = null
+      if (key == ApiKeys.PRODUCE) {
+        // There's inconsistency with the toStruct schema in ProduceRequest
+        // and ProduceRequestDataJsonConverters where the field names don't
+        // match so the struct does not have the correct field names. This is
+        // a temporary workaround until ProduceRequest starts using 
ProduceRequestData
+        req = ProduceRequest.Builder.forCurrentMagic(0.toShort, 10000, new 
util.HashMap[TopicPartition, MemoryRecords]).build()
+      } else {
+        val struct = 
ApiMessageType.fromApiKey(key.id).newRequest().toStruct(version)
+        req = AbstractRequest.parseRequest(key, version, struct)
+      }

Review comment:
       nit: We would usually use a `val` here and write the block as follow:
   ```
   val req = if (key == ApiKeys.PRODUCE) {
     ...
   } else {
     ...
   }
   ```

##########
File path: core/src/test/scala/unit/kafka/network/RequestConvertToJsonTest.scala
##########
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.network
+
+import java.net.InetAddress
+import java.nio.ByteBuffer
+import java.util
+import java.util.{Collections, Optional}
+
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.node.ObjectNode
+import kafka.network
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.memory.MemoryPool
+import org.apache.kafka.common.message._
+import org.apache.kafka.common.network.{ClientInformation, ListenerName, 
NetworkSend}
+import org.junit.Test
+import org.apache.kafka.common.protocol.{ApiKeys, Errors}
+import org.apache.kafka.common.record.{MemoryRecords, RecordBatch}
+import 
org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest.PartitionData
+import org.apache.kafka.common.requests._
+import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
+import org.easymock.EasyMock.createNiceMock
+import org.junit.Assert.assertEquals
+
+import scala.collection.mutable.ArrayBuffer
+
+class RequestConvertToJsonTest {
+
+  @Test
+  def testAllRequestTypesHandled(): Unit = {
+    val unhandledKeys = ArrayBuffer[String]()
+    ApiKeys.values().foreach(key => {

Review comment:
       nit: We tend to write this as follow:
   ```
   ApiKeys.values().foreach { key => 
     ...
   }
   ```
   This is another similar case in `testAllResponseTypesHandled`.

##########
File path: 
generator/src/main/java/org/apache/kafka/message/JsonConverterGenerator.java
##########
@@ -375,13 +385,36 @@ private void generateVariableLengthTargetToJson(Target 
target, Versions versions
                         target.sourceVariable())));
             } else {
                 headerGenerator.addImport(MessageGenerator.ARRAYS_CLASS);
+                headerGenerator.addImport(MessageGenerator.INT_NODE_CLASS);
+                buffer.printf("if (_serializeRecords) {%n");

Review comment:
       I am not sure to understand why we are doing this change now. It was not 
there before. Could you elaborate?
   
   Is it because the produce request does use `bytes` as type? It seems `bytes` 
was used there by mistake. FYI, the PR which will migrate the produce request 
to the automated protocol will change it to `records`: 
https://github.com/apache/kafka/pull/9401/files#diff-e6dde0832c4873b546db2ad0d37be899855a436972b0a2c5008b100a0ffff64dR50.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to