AndrewJSchofield commented on code in PR #19261: URL: https://github.com/apache/kafka/pull/19261#discussion_r2025141887
########## core/src/main/scala/kafka/server/KafkaApis.scala: ########## @@ -3199,13 +3199,21 @@ class KafkaApis(val requestChannel: RequestChannel, request.context.principal, request.context.listenerName.value)) + def fetchIsolation(): FetchIsolation = { + if (groupConfigManager.groupConfig(groupId).isPresent) { + FetchIsolation.of(FetchRequest.CONSUMER_REPLICA_ID, groupConfigManager.groupConfig(groupId).get().shareIsolationLevel()) + } else { + FetchIsolation.of(FetchRequest.CONSUMER_REPLICA_ID, GroupConfig.defaultShareIsolationLevel) + } + } + val params = new FetchParams( FetchRequest.CONSUMER_REPLICA_ID, -1, shareFetchRequest.maxWait, fetchMinBytes, fetchMaxBytes, - FetchIsolation.HIGH_WATERMARK, + fetchIsolation(), Review Comment: Maybe this could be simplified to something like ```FetchIsolation.of(FetchRequest.CONSUMER_REPLICA_ID, groupConfigManager.groupConfig(groupId).map(config => config.shareIsolationLevel()).orElse(GroupConfig.defaultShareIsolationLevel))``` ########## core/src/main/java/kafka/server/share/SharePartition.java: ########## @@ -1246,7 +1259,9 @@ private boolean archiveAvailableRecords(long startOffset, long endOffset, Naviga private boolean archivePerOffsetBatchRecords(InFlightBatch inFlightBatch, long startOffsetToArchive, - long endOffsetToArchive) { + long endOffsetToArchive, + RecordState initialState + ) { Review Comment: nit: This source file tends to have the `) {` on the end of the parameter line. ########## core/src/main/scala/kafka/server/KafkaApis.scala: ########## @@ -3199,13 +3199,21 @@ class KafkaApis(val requestChannel: RequestChannel, request.context.principal, request.context.listenerName.value)) + def fetchIsolation(): FetchIsolation = { + if (groupConfigManager.groupConfig(groupId).isPresent) { + FetchIsolation.of(FetchRequest.CONSUMER_REPLICA_ID, groupConfigManager.groupConfig(groupId).get().shareIsolationLevel()) + } else { + FetchIsolation.of(FetchRequest.CONSUMER_REPLICA_ID, GroupConfig.defaultShareIsolationLevel) + } + } + val params = new FetchParams( FetchRequest.CONSUMER_REPLICA_ID, -1, shareFetchRequest.maxWait, fetchMinBytes, fetchMaxBytes, - FetchIsolation.HIGH_WATERMARK, + fetchIsolation(), Review Comment: Although "simplified" might be stretching the point. ########## group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupConfig.java: ########## @@ -137,7 +149,13 @@ public final class GroupConfig extends AbstractConfig { GroupCoordinatorConfig.STREAMS_GROUP_NUM_STANDBY_REPLICAS_DEFAULT, atLeast(0), MEDIUM, - GroupCoordinatorConfig.STREAMS_GROUP_NUM_STANDBY_REPLICAS_DOC); + GroupCoordinatorConfig.STREAMS_GROUP_NUM_STANDBY_REPLICAS_DOC) + .define(SHARE_ISOLATION_LEVEL_CONFIG, Review Comment: Please put this config adjacent to the other share group configs above. ########## core/src/main/java/kafka/server/share/SharePartition.java: ########## @@ -1206,7 +1219,7 @@ private boolean archiveAvailableRecordsOnLsoMovement(long logStartOffset) { * @param map The map containing the in-flight records. * @return A boolean which indicates whether any record is archived or not. Review Comment: nit: Add `initialState` to the javadoc comment. ########## core/src/main/java/kafka/server/share/SharePartition.java: ########## @@ -2484,6 +2505,174 @@ private long startOffsetDuringInitialization(long partitionDataStartOffset) thro } } + private ShareAcquiredRecords maybeFilterAbortedTransactionalAcquiredRecords( + FetchPartitionData fetchPartitionData, + FetchIsolation isolationLevel, + ShareAcquiredRecords shareAcquiredRecords + ) { + if (isolationLevel != FetchIsolation.TXN_COMMITTED || fetchPartitionData.abortedTransactions.isEmpty() || fetchPartitionData.abortedTransactions.get().isEmpty()) + return shareAcquiredRecords; + // When FetchIsolation.TXN_COMMITTED is used as isolation level by the share group, we need to filter any + // transactions that were aborted/did not commit due to timeout. + List<AcquiredRecords> result = filterAbortedTransactionalAcquiredRecords(fetchPartitionData.records.batches(), + shareAcquiredRecords.acquiredRecords(), fetchPartitionData.abortedTransactions.get()); + int acquiredCount = 0; + for (AcquiredRecords records : result) { + acquiredCount += (int) (records.lastOffset() - records.firstOffset() + 1); + } + return new ShareAcquiredRecords(result, acquiredCount); + } + + private List<AcquiredRecords> filterAbortedTransactionalAcquiredRecords( + Iterable<? extends RecordBatch> batches, + List<AcquiredRecords> acquiredRecords, + List<FetchResponseData.AbortedTransaction> abortedTransactions + ) { + lock.writeLock().lock(); + try { + // The record batches that need to be archived in cachedState because they were a part of aborted transactions. + List<RecordBatch> recordsToArchive = fetchAbortedTransactionRecordBatches(batches, abortedTransactions); + for (RecordBatch recordBatch : recordsToArchive) { + // Archive the offsets/batches in the cached state. + NavigableMap<Long, InFlightBatch> subMap = fetchSubMapOrException(recordBatch); + archiveAcquiredBatchRecords(subMap, recordBatch); Review Comment: Given that this method is only really a single line of code, I would remove the method and just have the line of code here. ########## core/src/test/java/kafka/server/share/SharePartitionTest.java: ########## @@ -116,6 +124,7 @@ public class SharePartitionTest { private static final int DEFAULT_FETCH_OFFSET = 0; private static final int MAX_FETCH_RECORDS = Integer.MAX_VALUE; private static final byte ACKNOWLEDGE_TYPE_GAP_ID = 0; + private static final FetchIsolation FETCH_ISOLATION = FetchIsolation.HIGH_WATERMARK; Review Comment: This constant name really tells us the type, not the value. I wonder if `FETCH_ISOLATION_HWM` might be more instructive. ########## core/src/main/java/kafka/server/share/SharePartition.java: ########## @@ -2484,6 +2505,174 @@ private long startOffsetDuringInitialization(long partitionDataStartOffset) thro } } + private ShareAcquiredRecords maybeFilterAbortedTransactionalAcquiredRecords( + FetchPartitionData fetchPartitionData, + FetchIsolation isolationLevel, + ShareAcquiredRecords shareAcquiredRecords + ) { + if (isolationLevel != FetchIsolation.TXN_COMMITTED || fetchPartitionData.abortedTransactions.isEmpty() || fetchPartitionData.abortedTransactions.get().isEmpty()) + return shareAcquiredRecords; + // When FetchIsolation.TXN_COMMITTED is used as isolation level by the share group, we need to filter any Review Comment: nit: I'd add a blank line to make it a bit more readable at a glance. The general case is to return on line 2514. ########## core/src/main/java/kafka/server/share/SharePartition.java: ########## @@ -2484,6 +2505,174 @@ private long startOffsetDuringInitialization(long partitionDataStartOffset) thro } } + private ShareAcquiredRecords maybeFilterAbortedTransactionalAcquiredRecords( + FetchPartitionData fetchPartitionData, + FetchIsolation isolationLevel, + ShareAcquiredRecords shareAcquiredRecords + ) { + if (isolationLevel != FetchIsolation.TXN_COMMITTED || fetchPartitionData.abortedTransactions.isEmpty() || fetchPartitionData.abortedTransactions.get().isEmpty()) + return shareAcquiredRecords; + // When FetchIsolation.TXN_COMMITTED is used as isolation level by the share group, we need to filter any + // transactions that were aborted/did not commit due to timeout. + List<AcquiredRecords> result = filterAbortedTransactionalAcquiredRecords(fetchPartitionData.records.batches(), + shareAcquiredRecords.acquiredRecords(), fetchPartitionData.abortedTransactions.get()); + int acquiredCount = 0; + for (AcquiredRecords records : result) { + acquiredCount += (int) (records.lastOffset() - records.firstOffset() + 1); + } + return new ShareAcquiredRecords(result, acquiredCount); + } + + private List<AcquiredRecords> filterAbortedTransactionalAcquiredRecords( + Iterable<? extends RecordBatch> batches, + List<AcquiredRecords> acquiredRecords, + List<FetchResponseData.AbortedTransaction> abortedTransactions + ) { + lock.writeLock().lock(); + try { + // The record batches that need to be archived in cachedState because they were a part of aborted transactions. + List<RecordBatch> recordsToArchive = fetchAbortedTransactionRecordBatches(batches, abortedTransactions); + for (RecordBatch recordBatch : recordsToArchive) { + // Archive the offsets/batches in the cached state. + NavigableMap<Long, InFlightBatch> subMap = fetchSubMapOrException(recordBatch); + archiveAcquiredBatchRecords(subMap, recordBatch); + } + return filterRecordBatchesFromAcquiredRecords(acquiredRecords, recordsToArchive); + } finally { + lock.writeLock().unlock(); + } + } + + // Visible for testing. + List<AcquiredRecords> filterRecordBatchesFromAcquiredRecords( + List<AcquiredRecords> acquiredRecords, + List<RecordBatch> recordsToArchive + ) { + lock.writeLock().lock(); + try { + List<AcquiredRecords> result = new ArrayList<>(); + + for (AcquiredRecords acquiredRecord : acquiredRecords) { + List<AcquiredRecords> tempAcquiredRecords = new ArrayList<>(); + tempAcquiredRecords.add(acquiredRecord); + for (RecordBatch recordBatch : recordsToArchive) { + List<AcquiredRecords> newAcquiredRecords = new ArrayList<>(); + for (AcquiredRecords temp : tempAcquiredRecords) { + // Check if record batch overlaps with the acquired records. + if (temp.firstOffset() <= recordBatch.lastOffset() && temp.lastOffset() >= recordBatch.baseOffset()) { + // Split the acquired record into parts before, inside, and after the overlapping record batch. + if (temp.firstOffset() < recordBatch.baseOffset()) { + newAcquiredRecords.add(new AcquiredRecords() + .setFirstOffset(temp.firstOffset()) + .setLastOffset(recordBatch.baseOffset() - 1) + .setDeliveryCount((short) 1)); + } + if (temp.lastOffset() > recordBatch.lastOffset()) { + newAcquiredRecords.add(new AcquiredRecords() + .setFirstOffset(recordBatch.lastOffset() + 1) + .setLastOffset(temp.lastOffset()) + .setDeliveryCount((short) 1)); + } + } else { + newAcquiredRecords.add(temp); + } + } + tempAcquiredRecords = newAcquiredRecords; + } + result.addAll(tempAcquiredRecords); + } + return result; + } finally { + lock.writeLock().unlock(); + } + } + + private void archiveAcquiredBatchRecords(NavigableMap<Long, InFlightBatch> subMap, RecordBatch recordBatch) { + lock.writeLock().lock(); + try { + archiveRecords(recordBatch.baseOffset(), recordBatch.lastOffset() + 1, subMap, RecordState.ACQUIRED); + } finally { + lock.writeLock().unlock(); + } + } + + /** + * This function fetches the sub map from cachedState where all the offset details present in the recordBatch can be referred to + * OR it gives an exception if those offsets are not present in cachedState. + * @param recordBatch The record batch for which we want to find the sub map. + * @return the sub map containing all the offset details. + */ + private NavigableMap<Long, InFlightBatch> fetchSubMapOrException(RecordBatch recordBatch) { Review Comment: I think this style is a bit strange. If I understand correctly, we never expect to see the exception. That would be a bug. So, I would just call it `getSubMap` or `getCachedSubMap` or similar. Yes, it throws an exception if it's called in an invalid state, but then the share-partition would be messed up already. ########## core/src/main/java/kafka/server/share/SharePartition.java: ########## @@ -2484,6 +2505,174 @@ private long startOffsetDuringInitialization(long partitionDataStartOffset) thro } } + private ShareAcquiredRecords maybeFilterAbortedTransactionalAcquiredRecords( + FetchPartitionData fetchPartitionData, + FetchIsolation isolationLevel, + ShareAcquiredRecords shareAcquiredRecords + ) { + if (isolationLevel != FetchIsolation.TXN_COMMITTED || fetchPartitionData.abortedTransactions.isEmpty() || fetchPartitionData.abortedTransactions.get().isEmpty()) + return shareAcquiredRecords; + // When FetchIsolation.TXN_COMMITTED is used as isolation level by the share group, we need to filter any + // transactions that were aborted/did not commit due to timeout. + List<AcquiredRecords> result = filterAbortedTransactionalAcquiredRecords(fetchPartitionData.records.batches(), + shareAcquiredRecords.acquiredRecords(), fetchPartitionData.abortedTransactions.get()); + int acquiredCount = 0; + for (AcquiredRecords records : result) { + acquiredCount += (int) (records.lastOffset() - records.firstOffset() + 1); + } + return new ShareAcquiredRecords(result, acquiredCount); + } + + private List<AcquiredRecords> filterAbortedTransactionalAcquiredRecords( + Iterable<? extends RecordBatch> batches, + List<AcquiredRecords> acquiredRecords, + List<FetchResponseData.AbortedTransaction> abortedTransactions + ) { + lock.writeLock().lock(); + try { + // The record batches that need to be archived in cachedState because they were a part of aborted transactions. + List<RecordBatch> recordsToArchive = fetchAbortedTransactionRecordBatches(batches, abortedTransactions); + for (RecordBatch recordBatch : recordsToArchive) { + // Archive the offsets/batches in the cached state. + NavigableMap<Long, InFlightBatch> subMap = fetchSubMapOrException(recordBatch); + archiveAcquiredBatchRecords(subMap, recordBatch); + } + return filterRecordBatchesFromAcquiredRecords(acquiredRecords, recordsToArchive); + } finally { + lock.writeLock().unlock(); + } + } + + // Visible for testing. + List<AcquiredRecords> filterRecordBatchesFromAcquiredRecords( + List<AcquiredRecords> acquiredRecords, + List<RecordBatch> recordsToArchive + ) { + lock.writeLock().lock(); + try { + List<AcquiredRecords> result = new ArrayList<>(); + + for (AcquiredRecords acquiredRecord : acquiredRecords) { + List<AcquiredRecords> tempAcquiredRecords = new ArrayList<>(); + tempAcquiredRecords.add(acquiredRecord); + for (RecordBatch recordBatch : recordsToArchive) { + List<AcquiredRecords> newAcquiredRecords = new ArrayList<>(); + for (AcquiredRecords temp : tempAcquiredRecords) { + // Check if record batch overlaps with the acquired records. + if (temp.firstOffset() <= recordBatch.lastOffset() && temp.lastOffset() >= recordBatch.baseOffset()) { + // Split the acquired record into parts before, inside, and after the overlapping record batch. + if (temp.firstOffset() < recordBatch.baseOffset()) { + newAcquiredRecords.add(new AcquiredRecords() + .setFirstOffset(temp.firstOffset()) + .setLastOffset(recordBatch.baseOffset() - 1) + .setDeliveryCount((short) 1)); + } + if (temp.lastOffset() > recordBatch.lastOffset()) { + newAcquiredRecords.add(new AcquiredRecords() + .setFirstOffset(recordBatch.lastOffset() + 1) + .setLastOffset(temp.lastOffset()) + .setDeliveryCount((short) 1)); Review Comment: Also here. ########## core/src/main/java/kafka/server/share/SharePartition.java: ########## @@ -2484,6 +2505,174 @@ private long startOffsetDuringInitialization(long partitionDataStartOffset) thro } } + private ShareAcquiredRecords maybeFilterAbortedTransactionalAcquiredRecords( + FetchPartitionData fetchPartitionData, + FetchIsolation isolationLevel, + ShareAcquiredRecords shareAcquiredRecords + ) { + if (isolationLevel != FetchIsolation.TXN_COMMITTED || fetchPartitionData.abortedTransactions.isEmpty() || fetchPartitionData.abortedTransactions.get().isEmpty()) + return shareAcquiredRecords; + // When FetchIsolation.TXN_COMMITTED is used as isolation level by the share group, we need to filter any + // transactions that were aborted/did not commit due to timeout. + List<AcquiredRecords> result = filterAbortedTransactionalAcquiredRecords(fetchPartitionData.records.batches(), + shareAcquiredRecords.acquiredRecords(), fetchPartitionData.abortedTransactions.get()); + int acquiredCount = 0; + for (AcquiredRecords records : result) { + acquiredCount += (int) (records.lastOffset() - records.firstOffset() + 1); + } + return new ShareAcquiredRecords(result, acquiredCount); + } + + private List<AcquiredRecords> filterAbortedTransactionalAcquiredRecords( + Iterable<? extends RecordBatch> batches, + List<AcquiredRecords> acquiredRecords, + List<FetchResponseData.AbortedTransaction> abortedTransactions + ) { + lock.writeLock().lock(); + try { + // The record batches that need to be archived in cachedState because they were a part of aborted transactions. + List<RecordBatch> recordsToArchive = fetchAbortedTransactionRecordBatches(batches, abortedTransactions); + for (RecordBatch recordBatch : recordsToArchive) { + // Archive the offsets/batches in the cached state. + NavigableMap<Long, InFlightBatch> subMap = fetchSubMapOrException(recordBatch); + archiveAcquiredBatchRecords(subMap, recordBatch); + } + return filterRecordBatchesFromAcquiredRecords(acquiredRecords, recordsToArchive); + } finally { + lock.writeLock().unlock(); + } + } + + // Visible for testing. + List<AcquiredRecords> filterRecordBatchesFromAcquiredRecords( + List<AcquiredRecords> acquiredRecords, + List<RecordBatch> recordsToArchive + ) { + lock.writeLock().lock(); + try { + List<AcquiredRecords> result = new ArrayList<>(); + + for (AcquiredRecords acquiredRecord : acquiredRecords) { Review Comment: It is a bit odd having the loop variable of type `AcquiredRecords` called `acquiredRecord`, and the list of `AcquiredRecords` called `acquiredRecords`. ########## core/src/main/java/kafka/server/share/SharePartition.java: ########## @@ -2484,6 +2505,174 @@ private long startOffsetDuringInitialization(long partitionDataStartOffset) thro } } + private ShareAcquiredRecords maybeFilterAbortedTransactionalAcquiredRecords( + FetchPartitionData fetchPartitionData, + FetchIsolation isolationLevel, + ShareAcquiredRecords shareAcquiredRecords + ) { + if (isolationLevel != FetchIsolation.TXN_COMMITTED || fetchPartitionData.abortedTransactions.isEmpty() || fetchPartitionData.abortedTransactions.get().isEmpty()) + return shareAcquiredRecords; + // When FetchIsolation.TXN_COMMITTED is used as isolation level by the share group, we need to filter any + // transactions that were aborted/did not commit due to timeout. + List<AcquiredRecords> result = filterAbortedTransactionalAcquiredRecords(fetchPartitionData.records.batches(), + shareAcquiredRecords.acquiredRecords(), fetchPartitionData.abortedTransactions.get()); + int acquiredCount = 0; + for (AcquiredRecords records : result) { + acquiredCount += (int) (records.lastOffset() - records.firstOffset() + 1); + } + return new ShareAcquiredRecords(result, acquiredCount); + } + + private List<AcquiredRecords> filterAbortedTransactionalAcquiredRecords( + Iterable<? extends RecordBatch> batches, + List<AcquiredRecords> acquiredRecords, + List<FetchResponseData.AbortedTransaction> abortedTransactions + ) { + lock.writeLock().lock(); + try { + // The record batches that need to be archived in cachedState because they were a part of aborted transactions. + List<RecordBatch> recordsToArchive = fetchAbortedTransactionRecordBatches(batches, abortedTransactions); + for (RecordBatch recordBatch : recordsToArchive) { + // Archive the offsets/batches in the cached state. + NavigableMap<Long, InFlightBatch> subMap = fetchSubMapOrException(recordBatch); + archiveAcquiredBatchRecords(subMap, recordBatch); + } + return filterRecordBatchesFromAcquiredRecords(acquiredRecords, recordsToArchive); + } finally { + lock.writeLock().unlock(); + } + } + + // Visible for testing. + List<AcquiredRecords> filterRecordBatchesFromAcquiredRecords( + List<AcquiredRecords> acquiredRecords, + List<RecordBatch> recordsToArchive + ) { + lock.writeLock().lock(); + try { + List<AcquiredRecords> result = new ArrayList<>(); + + for (AcquiredRecords acquiredRecord : acquiredRecords) { + List<AcquiredRecords> tempAcquiredRecords = new ArrayList<>(); + tempAcquiredRecords.add(acquiredRecord); + for (RecordBatch recordBatch : recordsToArchive) { Review Comment: If you renamed `recordsToArchive` as `batchesToArchive`, and then renamed `recordBatch` to `batchToArchive`, I think that would help with the readability. ########## core/src/main/java/kafka/server/share/SharePartition.java: ########## @@ -2484,6 +2505,174 @@ private long startOffsetDuringInitialization(long partitionDataStartOffset) thro } } + private ShareAcquiredRecords maybeFilterAbortedTransactionalAcquiredRecords( + FetchPartitionData fetchPartitionData, + FetchIsolation isolationLevel, + ShareAcquiredRecords shareAcquiredRecords + ) { + if (isolationLevel != FetchIsolation.TXN_COMMITTED || fetchPartitionData.abortedTransactions.isEmpty() || fetchPartitionData.abortedTransactions.get().isEmpty()) + return shareAcquiredRecords; + // When FetchIsolation.TXN_COMMITTED is used as isolation level by the share group, we need to filter any + // transactions that were aborted/did not commit due to timeout. + List<AcquiredRecords> result = filterAbortedTransactionalAcquiredRecords(fetchPartitionData.records.batches(), + shareAcquiredRecords.acquiredRecords(), fetchPartitionData.abortedTransactions.get()); + int acquiredCount = 0; + for (AcquiredRecords records : result) { + acquiredCount += (int) (records.lastOffset() - records.firstOffset() + 1); + } + return new ShareAcquiredRecords(result, acquiredCount); + } + + private List<AcquiredRecords> filterAbortedTransactionalAcquiredRecords( + Iterable<? extends RecordBatch> batches, + List<AcquiredRecords> acquiredRecords, + List<FetchResponseData.AbortedTransaction> abortedTransactions + ) { + lock.writeLock().lock(); + try { + // The record batches that need to be archived in cachedState because they were a part of aborted transactions. + List<RecordBatch> recordsToArchive = fetchAbortedTransactionRecordBatches(batches, abortedTransactions); + for (RecordBatch recordBatch : recordsToArchive) { + // Archive the offsets/batches in the cached state. + NavigableMap<Long, InFlightBatch> subMap = fetchSubMapOrException(recordBatch); + archiveAcquiredBatchRecords(subMap, recordBatch); + } + return filterRecordBatchesFromAcquiredRecords(acquiredRecords, recordsToArchive); + } finally { + lock.writeLock().unlock(); + } + } + + // Visible for testing. + List<AcquiredRecords> filterRecordBatchesFromAcquiredRecords( + List<AcquiredRecords> acquiredRecords, + List<RecordBatch> recordsToArchive + ) { + lock.writeLock().lock(); + try { + List<AcquiredRecords> result = new ArrayList<>(); + + for (AcquiredRecords acquiredRecord : acquiredRecords) { + List<AcquiredRecords> tempAcquiredRecords = new ArrayList<>(); + tempAcquiredRecords.add(acquiredRecord); + for (RecordBatch recordBatch : recordsToArchive) { + List<AcquiredRecords> newAcquiredRecords = new ArrayList<>(); + for (AcquiredRecords temp : tempAcquiredRecords) { + // Check if record batch overlaps with the acquired records. + if (temp.firstOffset() <= recordBatch.lastOffset() && temp.lastOffset() >= recordBatch.baseOffset()) { + // Split the acquired record into parts before, inside, and after the overlapping record batch. + if (temp.firstOffset() < recordBatch.baseOffset()) { + newAcquiredRecords.add(new AcquiredRecords() + .setFirstOffset(temp.firstOffset()) + .setLastOffset(recordBatch.baseOffset() - 1) + .setDeliveryCount((short) 1)); Review Comment: Wouldn't this be `temp.deliveryCount()`? ########## core/src/main/java/kafka/server/share/SharePartition.java: ########## @@ -1206,7 +1219,7 @@ private boolean archiveAvailableRecordsOnLsoMovement(long logStartOffset) { * @param map The map containing the in-flight records. * @return A boolean which indicates whether any record is archived or not. Review Comment: And correct the description so it doesn't just return to the available records. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org