lucasbru commented on code in PR #20244:
URL: https://github.com/apache/kafka/pull/20244#discussion_r2316966518


##########
core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala:
##########
@@ -2589,13 +2592,25 @@ class PlaintextAdminIntegrationTest extends 
BaseAdminIntegrationTest {
     shareGroupConfig.put(ConsumerConfig.GROUP_ID_CONFIG, shareGroupId)
     val shareGroup = createShareConsumer(configOverrides = shareGroupConfig)
 
+    val streamsConfig = new Properties()

Review Comment:
   same as below - why do we need to set the `AUTO_OFFSet_RESET_CONFIG`?



##########
core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala:
##########
@@ -2589,13 +2592,25 @@ class PlaintextAdminIntegrationTest extends 
BaseAdminIntegrationTest {
     shareGroupConfig.put(ConsumerConfig.GROUP_ID_CONFIG, shareGroupId)
     val shareGroup = createShareConsumer(configOverrides = shareGroupConfig)
 
+    val streamsConfig = new Properties()
+    streamsConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
+
+    val streamsGroup = createStreamsGroup(
+      configOverrides = streamsConfig ,
+      inputTopic = testTopicName ,
+      outputTopic = testStreamsOutputTopicName ,
+      streamsGroupId = streamsGroupId
+    )
+
     val config = createConfig
     client = Admin.create(config)
     try {
-      client.createTopics(util.Set.of(
-        new NewTopic(testTopicName, 1, 1.toShort)
-      )).all().get()
-      waitForTopics(client, List(testTopicName), List())
+      //      client.createTopics(util.Set.of(

Review Comment:
   Why are these lines commented out?



##########
core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala:
##########
@@ -4363,17 +4396,157 @@ class PlaintextAdminIntegrationTest extends 
BaseAdminIntegrationTest {
       }
     }
   }
+
+  @Test
+  def testDescribeStreamsGroups(): Unit = {
+    val streamsGroupId = "stream_group_id"
+    val testTopicName = "test_topic"
+    val testOutputTopicName = "test_output_topic"
+    val testNumPartitions = 1
+
+    val config = createConfig
+    client = Admin.create(config)
+
+    prepareTopics(List(testTopicName, testOutputTopicName), testNumPartitions)
+    prepareRecords(testTopicName)
+
+    val streamsConfig = new Properties()
+    streamsConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
+    val streams = createStreamsGroup(
+      configOverrides = streamsConfig,
+      inputTopic = testTopicName,
+      outputTopic = testOutputTopicName,
+      streamsGroupId = streamsGroupId
+    )
+    streams.poll(JDuration.ofMillis(500L))
+
+    try {
+      TestUtils.waitUntilTrue(() => {
+        val firstGroup = 
client.listGroups().all().get().stream().findFirst().orElse(null)
+        firstGroup.groupState().orElse(null) == GroupState.STABLE && 
firstGroup.groupId() == streamsGroupId
+      }, "Stream group not stable yet")
+
+      // Verify the describe call works correctly
+      val describedGroups = 
client.describeStreamsGroups(util.List.of(streamsGroupId)).all().get()
+      val group = describedGroups.get(streamsGroupId)
+      assertNotNull(group)
+      assertEquals(streamsGroupId, group.groupId())
+      assertFalse(group.members().isEmpty)
+      assertNotNull(group.subtopologies())
+      assertFalse(group.subtopologies().isEmpty)
+
+      // Verify the topology contains the expected source and sink topics
+      val subtopologies = group.subtopologies().asScala
+      assertTrue(subtopologies.exists(subtopology =>
+        subtopology.sourceTopics().contains(testTopicName)))
+
+      // Test describing a non-existing group
+      val nonExistingGroup = "non_existing_stream_group"
+      val describedNonExistingGroupResponse = 
client.describeStreamsGroups(util.List.of(nonExistingGroup))
+      assertFutureThrows(classOf[GroupIdNotFoundException], 
describedNonExistingGroupResponse.all())
+
+    } finally {
+      Utils.closeQuietly(streams, "streams")
+      Utils.closeQuietly(client, "adminClient")
+    }
+  }
+
+  @Test
+  def testDeleteStreamsGroups(): Unit = {
+    val testTopicName = "test_topic"
+    val testOutputTopicName = "test_output_topic"
+    val testNumPartitions = 3
+    val testNumStreamsGroup = 3
+
+    val targetDeletedGroups = util.List.of("stream_group_id_2", 
"stream_group_id_3")
+    val targetRemainingGroups = util.List.of("stream_group_id_1")
+
+    val config = createConfig
+    client = Admin.create(config)
+
+    prepareTopics(List(testTopicName, testOutputTopicName), testNumPartitions)
+    prepareRecords(testTopicName)
+
+    val streamsList = scala.collection.mutable.ListBuffer[(String, 
AsyncKafkaConsumer[_,_])]()
+
+    try {
+      for (i <- 1 to testNumStreamsGroup) {
+        val streamsGroupId = s"stream_group_id_$i"
+        val streamsConfig = new Properties()

Review Comment:
   I would rename this to `consumerConfig`, since this is, at this point, just 
a consumerConfig. But I wonder - do we need it at all?



##########
core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala:
##########
@@ -235,10 +239,57 @@ abstract class IntegrationTestHarness extends 
KafkaServerTestHarness {
     streamsConsumer
   }
 
+  def createStreamsGroup[K, V](configOverrides: Properties = new Properties,
+                               configsToRemove: List[String] = List(),
+                               inputTopic: String,
+                               outputTopic: String,
+                               streamsGroupId: String): AsyncKafkaConsumer[K, 
V] = {
+    val props = new Properties()
+    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers())
+    props.put(ConsumerConfig.GROUP_ID_CONFIG, streamsGroupId)
+    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
+    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, 
classOf[ByteArrayDeserializer].getName)
+    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
classOf[ByteArrayDeserializer].getName)
+    props ++= configOverrides
+    configsToRemove.foreach(props.remove(_))
+
+    val streamsRebalanceData = new StreamsRebalanceData(
+      UUID.randomUUID(),
+      Optional.empty(),
+      util.Map.of(
+        "subtopology-0", new StreamsRebalanceData.Subtopology(
+          util.Set.of(inputTopic),
+          util.Set.of(),
+          util.Map.of(),
+          util.Map.of(outputTopic + "-store-changelog", new 
StreamsRebalanceData.TopicInfo(Optional.of(1), Optional.empty(), 
util.Map.of())),
+          util.Set.of()
+        )),
+      Map.empty[String, String].asJava
+    )
+
+    val consumer = createStreamsConsumer(
+      keyDeserializer = new 
ByteArrayDeserializer().asInstanceOf[Deserializer[K]],
+      valueDeserializer = new 
ByteArrayDeserializer().asInstanceOf[Deserializer[V]],
+      configOverrides = props,
+      streamsRebalanceData = streamsRebalanceData
+    )
+    consumer.subscribe(util.Set.of(inputTopic, outputTopic),

Review Comment:
   Kafka streams only subscribes to the input topic, not the ouput topics. You 
can remove the output topic from this function altogether.



##########
core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala:
##########
@@ -4421,9 +4594,9 @@ object PlaintextAdminIntegrationTest {
   }
 
   def checkInvalidAlterConfigs(
-    test: KafkaServerTestHarness,
-    admin: Admin
-  ): Unit = {
+                                test: KafkaServerTestHarness,

Review Comment:
   Can you please revert these kinds of pure white-space changes from the PR?
   
   In the Apache Kafka project, we typically try to avoid changing lines that 
we don't have to, because the change history (git blame) is often quite 
important to understand things.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to