k-apol commented on code in PR #20326: URL: https://github.com/apache/kafka/pull/20326#discussion_r2280644221
########## streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopicManager.java: ########## @@ -461,120 +466,119 @@ public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) { // have existed with the expected number of partitions, or some create topic returns fatal errors. log.debug("Starting to validate internal topics {} in partition assignor.", topics); - long currentWallClockMs = time.milliseconds(); + final long currentWallClockMs = time.milliseconds(); final long deadlineMs = currentWallClockMs + retryTimeoutMs; - Set<String> topicsNotReady = new HashSet<>(topics.keySet()); - final Set<String> newlyCreatedTopics = new HashSet<>(); + final Set<String> topicsNotReady = new HashSet<>(topics.keySet()); + final Set<String> newTopics = new HashSet<>(); while (!topicsNotReady.isEmpty()) { - final Set<String> tempUnknownTopics = new HashSet<>(); - topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics); - newlyCreatedTopics.addAll(topicsNotReady); - + final Set<NewTopic> topicsToCreate = computeTopicsToCreate(topics, topicsNotReady, newTopics); + if (!topicsToCreate.isEmpty()) { + readyTopics(topicsToCreate, topicsNotReady); + } if (!topicsNotReady.isEmpty()) { - final Set<NewTopic> newTopics = new HashSet<>(); + maybeThrowTimeout(new TimeoutContext( + Collections.singleton("makeReadyCheck"), // dummy collection just to trigger if `topicsNotReady` is non-empty + deadlineMs, + "MakeReady timeout", + String.format("Could not create topics within %d milliseconds. This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs), + null + )); Review Comment: Pushing an update for this, adding the log line and backoff back (I did not mean to remove this, thanks for catching it) I want to exponentially backoff here so that I am not spamming the broker with requests to create the topics right? If we didn't have this here, we could potentially overload our broker? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org