asdaraujo commented on a change in pull request #8730:
URL: https://github.com/apache/kafka/pull/8730#discussion_r432749206



##########
File path: 
connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorConnectorsIntegrationTest.java
##########
@@ -244,26 +251,50 @@ public void testReplication() throws InterruptedException 
{
 
         assertTrue("Offsets not translated downstream to backup cluster. 
Found: " + backupOffsets, backupOffsets.containsKey(
             new TopicPartition("primary.test-topic-1", 0)));
+        assertTrue("Offset of empty partition not translated downstream to 
backup cluster. Found: " + backupOffsets, backupOffsets.containsKey(
+            new TopicPartition("primary.test-topic-1", NUM_PARTITIONS - 1)));
+
+        // Produce additional messages.
+        for (int i = 0; i < NUM_RECORDS_PRODUCED; i++) {
+            // produce to all partitions this time
+            primary.kafka().produce("test-topic-1", i % NUM_PARTITIONS, "key", 
"message-2-" + i);
+            backup.kafka().produce("test-topic-1", i % NUM_PARTITIONS, "key", 
"message-2-" + i);
+        }
 
         // Failover consumer group to backup cluster.
-        Consumer<byte[], byte[]> consumer1 = 
backup.kafka().createConsumer(Collections.singletonMap("group.id", 
"consumer-group-1"));
-        consumer1.assign(backupOffsets.keySet());
+        Map<String, Object> consumerProps = new HashMap<String, Object>() {{
+                put("group.id", "consumer-group-1");
+                put("auto.offset.reset", "latest");
+            }};
+        Consumer<byte[], byte[]> consumer1 = 
backup.kafka().createConsumer(consumerProps);
+        List<TopicPartition> backupPartitions = IntStream.range(0, 
NUM_PARTITIONS)
+                .boxed()
+                .flatMap(p -> Stream.of(new TopicPartition("test-topic-1", p), 
new TopicPartition("primary.test-topic-1", p)))
+                .collect(Collectors.toList());
+        consumer1.assign(backupPartitions);
         backupOffsets.forEach(consumer1::seek);
-        consumer1.poll(Duration.ofMillis(500));
-        consumer1.commitSync();
 
         assertTrue("Consumer failedover to zero offset.", 
consumer1.position(new TopicPartition("primary.test-topic-1", 0)) > 0);
         assertTrue("Consumer failedover beyond expected offset.", 
consumer1.position(
-            new TopicPartition("primary.test-topic-1", 0)) <= 
NUM_RECORDS_PRODUCED);
+            new TopicPartition("primary.test-topic-1", 0)) <= 
Math.ceil((float) NUM_RECORDS_PRODUCED / (NUM_PARTITIONS - 1)));
+        assertEquals("Consumer failedover to non-zero offset on last 
partition.", 0,
+            consumer1.position(new TopicPartition("primary.test-topic-1", 
NUM_PARTITIONS - 1)));
         assertTrue("Checkpoints were not emitted upstream to primary 
cluster.", primary.kafka().consume(1,
             CHECKPOINT_DURATION_MS, "backup.checkpoints.internal").count() > 
0);
 
+        Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> messages1 = 
consumeAllMessages(consumer1);
+        System.out.println(messages1);
+        for (TopicPartition tp : backupPartitions) {
+            assertNotNull("No data consumed from partition " + tp + ".", 
messages1.get(tp));
+            int expectedMessageCount = tp.toString().equals("test-topic-1-0") 
? 22 : 10;

Review comment:
       You're right. This looks magic indeed! I had to think about it again to 
understand my own reasoning :)
   I added comments to explain what's going on here and why we expect 22 for 
one partition and 10 for the others.
   I left the tests as they were. I think the comment makes the test logic 
clear.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to