FrankYang0529 commented on code in PR #18012: URL: https://github.com/apache/kafka/pull/18012#discussion_r1914011112
########## storage/src/test/java/org/apache/kafka/storage/internals/log/LogSegmentTest.java: ########## @@ -779,6 +784,75 @@ public void testDeleteIfExistsWithGetParentIsNull() throws IOException { } } + @Test + public void testIndexForMultipleBatchesInMemoryRecords() throws IOException { + LogSegment segment = createSegment(0, 1, Time.SYSTEM); + + ByteBuffer buffer1 = ByteBuffer.allocate(1024); + // append first batch to buffer1 + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer1, Compression.NONE, TimestampType.CREATE_TIME, 0); + builder.append(0L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + // append second batch to buffer1 + builder = MemoryRecords.builder(buffer1, Compression.NONE, TimestampType.CREATE_TIME, 1); + builder.append(1L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + buffer1.flip(); + MemoryRecords record = MemoryRecords.readableRecords(buffer1); + segment.append(1L, record); + + ByteBuffer buffer2 = ByteBuffer.allocate(1024); + // append first batch to buffer2 + builder = MemoryRecords.builder(buffer2, Compression.NONE, TimestampType.CREATE_TIME, 2); + builder.append(2L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + buffer2.flip(); + record = MemoryRecords.readableRecords(buffer2); + segment.append(2L, record); + + assertEquals(2, segment.offsetIndex().entries()); + assertTrue(segment.offsetIndex().lookup(2L).position > segment.offsetIndex().lookup(1L).position); + + assertEquals(2, segment.timeIndex().entries()); + assertTrue(segment.timeIndex().lookup(2L).offset > segment.timeIndex().lookup(1L).offset); + } + + @Test + public void testNonMonotonicTimestampForMultipleBatchesInMemoryRecords() throws IOException { + LogSegment segment = createSegment(0, 1, Time.SYSTEM); + + ByteBuffer buffer1 = ByteBuffer.allocate(1024); + // append first batch to buffer1 + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer1, Compression.NONE, TimestampType.CREATE_TIME, 0); + builder.append(1L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + // append second batch to buffer1 + builder = MemoryRecords.builder(buffer1, Compression.NONE, TimestampType.CREATE_TIME, 1); + builder.append(0L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + // append third batch to buffer1 + builder = MemoryRecords.builder(buffer1, Compression.NONE, TimestampType.CREATE_TIME, 2); + builder.append(2L, "key1".getBytes(), "value1".getBytes()); + builder.close(); + + buffer1.flip(); + MemoryRecords record = MemoryRecords.readableRecords(buffer1); + segment.append(2L, record); + + assertEquals(2, segment.offsetIndex().entries()); + assertEquals(1, segment.offsetIndex().lookup(1L).offset); + assertEquals(2, segment.offsetIndex().lookup(2L).offset); + + assertEquals(2, segment.timeIndex().entries()); + assertEquals(new TimestampOffset(1, 0), segment.timeIndex().entry(0)); Review Comment: Thanks for the suggestion. I only check `offset` field for offset index, because the `position` field may looks like some kind of magic value. WDYT? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org