adixitconfluent commented on code in PR #19261:
URL: https://github.com/apache/kafka/pull/19261#discussion_r2026273027


##########
core/src/main/java/kafka/server/share/SharePartition.java:
##########
@@ -2484,6 +2505,174 @@ private long startOffsetDuringInitialization(long 
partitionDataStartOffset) thro
         }
     }
 
+    private ShareAcquiredRecords 
maybeFilterAbortedTransactionalAcquiredRecords(
+        FetchPartitionData fetchPartitionData,
+        FetchIsolation isolationLevel,
+        ShareAcquiredRecords shareAcquiredRecords
+    ) {
+        if (isolationLevel != FetchIsolation.TXN_COMMITTED || 
fetchPartitionData.abortedTransactions.isEmpty() || 
fetchPartitionData.abortedTransactions.get().isEmpty())
+            return shareAcquiredRecords;
+        // When FetchIsolation.TXN_COMMITTED is used as isolation level by the 
share group, we need to filter any
+        // transactions that were aborted/did not commit due to timeout.
+        List<AcquiredRecords> result = 
filterAbortedTransactionalAcquiredRecords(fetchPartitionData.records.batches(),
+            shareAcquiredRecords.acquiredRecords(), 
fetchPartitionData.abortedTransactions.get());
+        int acquiredCount = 0;
+        for (AcquiredRecords records : result) {
+            acquiredCount += (int) (records.lastOffset() - 
records.firstOffset() + 1);
+        }
+        return new ShareAcquiredRecords(result, acquiredCount);
+    }
+
+    private List<AcquiredRecords> filterAbortedTransactionalAcquiredRecords(
+        Iterable<? extends RecordBatch> batches,
+        List<AcquiredRecords> acquiredRecords,
+        List<FetchResponseData.AbortedTransaction> abortedTransactions
+    ) {
+        lock.writeLock().lock();
+        try {
+            // The record batches that need to be archived in cachedState 
because they were a part of aborted transactions.
+            List<RecordBatch> recordsToArchive = 
fetchAbortedTransactionRecordBatches(batches, abortedTransactions);
+            for (RecordBatch recordBatch : recordsToArchive) {
+                // Archive the offsets/batches in the cached state.
+                NavigableMap<Long, InFlightBatch> subMap = 
fetchSubMapOrException(recordBatch);
+                archiveAcquiredBatchRecords(subMap, recordBatch);
+            }
+            return filterRecordBatchesFromAcquiredRecords(acquiredRecords, 
recordsToArchive);
+        } finally {
+            lock.writeLock().unlock();
+        }
+    }
+
+    // Visible for testing.
+    List<AcquiredRecords> filterRecordBatchesFromAcquiredRecords(
+        List<AcquiredRecords> acquiredRecords,
+        List<RecordBatch> recordsToArchive
+    ) {
+        lock.writeLock().lock();
+        try {
+            List<AcquiredRecords> result = new ArrayList<>();
+
+            for (AcquiredRecords acquiredRecord : acquiredRecords) {
+                List<AcquiredRecords> tempAcquiredRecords = new ArrayList<>();
+                tempAcquiredRecords.add(acquiredRecord);
+                for (RecordBatch recordBatch : recordsToArchive) {
+                    List<AcquiredRecords> newAcquiredRecords = new 
ArrayList<>();
+                    for (AcquiredRecords temp : tempAcquiredRecords) {
+                        // Check if record batch overlaps with the acquired 
records.
+                        if (temp.firstOffset() <= recordBatch.lastOffset() && 
temp.lastOffset() >= recordBatch.baseOffset()) {
+                            // Split the acquired record into parts before, 
inside, and after the overlapping record batch.
+                            if (temp.firstOffset() < recordBatch.baseOffset()) 
{
+                                newAcquiredRecords.add(new AcquiredRecords()
+                                    .setFirstOffset(temp.firstOffset())
+                                    .setLastOffset(recordBatch.baseOffset() - 
1)
+                                    .setDeliveryCount((short) 1));
+                            }
+                            if (temp.lastOffset() > recordBatch.lastOffset()) {
+                                newAcquiredRecords.add(new AcquiredRecords()
+                                    .setFirstOffset(recordBatch.lastOffset() + 
1)
+                                    .setLastOffset(temp.lastOffset())
+                                    .setDeliveryCount((short) 1));
+                            }
+                        } else {
+                            newAcquiredRecords.add(temp);
+                        }
+                    }
+                    tempAcquiredRecords = newAcquiredRecords;
+                }
+                result.addAll(tempAcquiredRecords);
+            }
+            return result;
+        } finally {
+            lock.writeLock().unlock();
+        }
+    }
+
+    private void archiveAcquiredBatchRecords(NavigableMap<Long, InFlightBatch> 
subMap, RecordBatch recordBatch) {
+        lock.writeLock().lock();
+        try {
+            archiveRecords(recordBatch.baseOffset(), recordBatch.lastOffset() 
+ 1, subMap, RecordState.ACQUIRED);
+        } finally {
+            lock.writeLock().unlock();
+        }
+    }
+
+    /**
+     * This function fetches the sub map from cachedState where all the offset 
details present in the recordBatch can be referred to
+     * OR it gives an exception if those offsets are not present in 
cachedState.
+     * @param recordBatch The record batch for which we want to find the sub 
map.
+     * @return the sub map containing all the offset details.
+     */
+    private NavigableMap<Long, InFlightBatch> 
fetchSubMapOrException(RecordBatch recordBatch) {

Review Comment:
   I agree to your point. Earlier I had accepted the suggestion as per review 
comment https://github.com/apache/kafka/pull/19261#discussion_r2021438959, but 
I'll change it back to `fetchSubMap`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to