adixitconfluent commented on code in PR #18459:
URL: https://github.com/apache/kafka/pull/18459#discussion_r1910309874


##########
core/src/test/java/kafka/server/share/SharePartitionManagerTest.java:
##########
@@ -146,6 +146,7 @@ public class SharePartitionManagerTest {
     private static final short MAX_FETCH_RECORDS = 500;
     private static final int DELAYED_SHARE_FETCH_MAX_WAIT_MS = 2000;
     private static final int DELAYED_SHARE_FETCH_TIMEOUT_MS = 3000;
+    private static final int BATCH_SIZE = 500;

Review Comment:
   We have defined the same BATCH_SIZE = 500 in SharePartitionTest, 
DelayedShareFetchTest, ShareFetchUtilsTest and ShareFetchTest, can we reuse it? 



##########
core/src/test/java/kafka/server/share/SharePartitionTest.java:
##########
@@ -1104,6 +1116,109 @@ public void testAcquireWithEmptyFetchRecords() {
         assertEquals(0, sharePartition.nextFetchOffset());
     }
 
+    @Test
+    public void testAcquireWithBatchSizeAndSingleBatch() {
+        SharePartition sharePartition = 
SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
+        // Single batch has more records than batch size. Hence, only a single 
batch exceeding the batch size
+        // should be acquired.
+        MemoryRecords records = memoryRecords(5);
+        List<AcquiredRecords> acquiredRecordsList = 
fetchAcquiredRecords(sharePartition.acquire(
+            MEMBER_ID,
+            2 /* Batch size */,
+            10,
+            new FetchPartitionData(Errors.NONE, 20, 0, records,
+                Optional.empty(), OptionalLong.empty(), Optional.empty(), 
OptionalInt.empty(), false)),
+            5);
+
+        assertArrayEquals(expectedAcquiredRecord(0, 4, 1).toArray(), 
acquiredRecordsList.toArray());
+        assertEquals(5, sharePartition.nextFetchOffset());
+        assertEquals(1, sharePartition.cachedState().size());
+        assertEquals(0, sharePartition.cachedState().get(0L).firstOffset());
+        assertEquals(4, sharePartition.cachedState().get(0L).lastOffset());
+        assertEquals(RecordState.ACQUIRED, 
sharePartition.cachedState().get(0L).batchState());
+        assertEquals(MEMBER_ID, 
sharePartition.cachedState().get(0L).batchMemberId());
+        assertEquals(1, 
sharePartition.cachedState().get(0L).batchDeliveryCount());
+        assertNull(sharePartition.cachedState().get(0L).offsetState());
+    }
+
+    @Test
+    public void testAcquireWithBatchSizeAndMultipleBatches() {
+        SharePartition sharePartition = 
SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
+        // Create 3 batches of records.
+        ByteBuffer buffer = ByteBuffer.allocate(4096);
+        memoryRecordsBuilder(buffer, 5, 2).close();
+        memoryRecordsBuilder(buffer, 5, 10).close();
+        memoryRecordsBuilder(buffer, 7, 15).close();
+        memoryRecordsBuilder(buffer, 5, 22).close();
+        buffer.flip();
+        MemoryRecords records = MemoryRecords.readableRecords(buffer);
+
+        List<AcquiredRecords> acquiredRecordsList = 
fetchAcquiredRecords(sharePartition.acquire(
+            MEMBER_ID,
+            5 /* Batch size */,
+            100,
+            new FetchPartitionData(Errors.NONE, 20, 0, records,
+                Optional.empty(), OptionalLong.empty(), Optional.empty(), 
OptionalInt.empty(), false)),
+            25 /* Gap of 3 records will also be added to first batch */);
+
+        // Fetch expected records from 3 batches, but change the first 
expected record to include gap offsets.
+        List<AcquiredRecords> expectedAcquiredRecords = 
expectedAcquiredRecords(records, 1);
+        expectedAcquiredRecords.remove(0);
+        expectedAcquiredRecords.addAll(0, expectedAcquiredRecord(2, 9, 1));
+
+        assertArrayEquals(expectedAcquiredRecords.toArray(), 
acquiredRecordsList.toArray());
+        assertEquals(27, sharePartition.nextFetchOffset());
+        assertEquals(4, sharePartition.cachedState().size());
+        assertTrue(sharePartition.cachedState().containsKey(2L));
+        assertTrue(sharePartition.cachedState().containsKey(10L));
+        assertTrue(sharePartition.cachedState().containsKey(15L));
+        assertTrue(sharePartition.cachedState().containsKey(22L));
+        assertEquals(RecordState.ACQUIRED, 
sharePartition.cachedState().get(2L).batchState());
+        assertEquals(RecordState.ACQUIRED, 
sharePartition.cachedState().get(10L).batchState());
+        assertEquals(RecordState.ACQUIRED, 
sharePartition.cachedState().get(15L).batchState());
+        assertEquals(RecordState.ACQUIRED, 
sharePartition.cachedState().get(22L).batchState());
+        assertEquals(MEMBER_ID, 
sharePartition.cachedState().get(2L).batchMemberId());
+        assertEquals(MEMBER_ID, 
sharePartition.cachedState().get(10L).batchMemberId());
+        assertEquals(MEMBER_ID, 
sharePartition.cachedState().get(15L).batchMemberId());
+        assertEquals(MEMBER_ID, 
sharePartition.cachedState().get(22L).batchMemberId());
+        assertEquals(1, 
sharePartition.cachedState().get(2L).batchDeliveryCount());
+        assertEquals(1, 
sharePartition.cachedState().get(10L).batchDeliveryCount());
+        assertEquals(1, 
sharePartition.cachedState().get(15L).batchDeliveryCount());
+        assertEquals(1, 
sharePartition.cachedState().get(22L).batchDeliveryCount());
+        assertNull(sharePartition.cachedState().get(2L).offsetState());
+        assertNull(sharePartition.cachedState().get(10L).offsetState());
+        assertNull(sharePartition.cachedState().get(15L).offsetState());
+        assertNull(sharePartition.cachedState().get(22L).offsetState());
+    }
+
+    @Test
+    public void testAcquireWithBatchSizeWithBatchSize() {

Review Comment:
   the test name seems incorrect



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to