satishd commented on a change in pull request #11390: URL: https://github.com/apache/kafka/pull/11390#discussion_r773143045
########## File path: core/src/main/scala/kafka/server/ReplicaFetcherThread.scala ########## @@ -386,11 +396,76 @@ class ReplicaFetcherThread(name: String, } /** - * To avoid ISR thrashing, we only throttle a replica on the follower if it's in the throttled replica list, - * the quota is exceeded and the replica is not in sync. + * To avoid ISR thrashing, we only throttle a replica on the follower if it's in the throttled replica list, + * the quota is exceeded and the replica is not in sync. */ private def shouldFollowerThrottle(quota: ReplicaQuota, fetchState: PartitionFetchState, topicPartition: TopicPartition): Boolean = { !fetchState.isReplicaInSync && quota.isThrottled(topicPartition) && quota.isQuotaExceeded } + override protected def buildRemoteLogAuxState(partition: TopicPartition, + currentLeaderEpoch: Int, + leaderLocalLogStartOffset: Long, + leaderLogStartOffset: Long): Unit = { + replicaMgr.localLog(partition).foreach(log => + if (log.remoteStorageSystemEnable && log.config.remoteLogConfig.remoteStorageEnable) { + replicaMgr.remoteLogManager.foreach(rlm => { + var rlsMetadata: Optional[RemoteLogSegmentMetadata] = Optional.empty() + val epoch = log.leaderEpochCache.flatMap(cache => cache.epochForOffset(leaderLocalLogStartOffset)) + if (epoch.isDefined) { + rlsMetadata = rlm.fetchRemoteLogSegmentMetadata(partition, epoch.get, leaderLocalLogStartOffset) + } else { + // If epoch is not available, then it might be possible that this broker might lost its entire local storage. + // We may also have to build the leader epoch cache. To find out the remote log segment metadata for the + // leaderLocalLogStartOffset-1, start from the current leader epoch and subtract one to the epoch till + // finding the metadata. + var previousLeaderEpoch = currentLeaderEpoch + while (!rlsMetadata.isPresent && previousLeaderEpoch >= 0) { + rlsMetadata = rlm.fetchRemoteLogSegmentMetadata(partition, previousLeaderEpoch, leaderLocalLogStartOffset - 1) + previousLeaderEpoch -= 1 + } + } + if (rlsMetadata.isPresent) { + val epochStream = rlm.storageManager().fetchIndex(rlsMetadata.get(), RemoteStorageManager.IndexType.LEADER_EPOCH) + val epochs = readLeaderEpochCheckpoint(epochStream, log.dir) + + // Truncate the existing local log before restoring the leader epoch cache and producer snapshots. + truncateFullyAndStartAt(partition, leaderLocalLogStartOffset) + + log.maybeIncrementLogStartOffset(leaderLogStartOffset, LeaderOffsetIncremented) + epochs.foreach(epochEntry => { + log.leaderEpochCache.map(cache => cache.assign(epochEntry.epoch, epochEntry.startOffset)) + }) + info(s"Updated the epoch cache from remote tier till offset: $leaderLocalLogStartOffset " + + s"with size: ${epochs.size} for $partition") + + // Restore producer snapshot + val snapshotFile = UnifiedLog.producerSnapshotFile(log.dir, leaderLocalLogStartOffset) + Files.copy(rlm.storageManager().fetchIndex(rlsMetadata.get(), RemoteStorageManager.IndexType.PRODUCER_SNAPSHOT), Review comment: Filed https://issues.apache.org/jira/browse/KAFKA-13560 to add the suggested improvement. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org