junrao commented on a change in pull request #9364:
URL: https://github.com/apache/kafka/pull/9364#discussion_r512317288



##########
File path: core/src/test/scala/unit/kafka/log/LogTest.scala
##########
@@ -978,10 +1051,10 @@ class LogTest {
       producerIdExpirationCheckIntervalMs = 30000,
       topicPartition = Log.parseTopicPartitionName(logDir),
       producerStateManager = stateManager,
-      logDirFailureChannel = null)
+      logDirFailureChannel = null,
+      hadCleanShutdown = true)

Review comment:
       It seems we don't need to set hadCleanShutdown to true.

##########
File path: core/src/test/scala/unit/kafka/log/LogTest.scala
##########
@@ -1021,10 +1092,10 @@ class LogTest {
       producerIdExpirationCheckIntervalMs = 30000,
       topicPartition = Log.parseTopicPartitionName(logDir),
       producerStateManager = stateManager,
-      logDirFailureChannel = null)
+      logDirFailureChannel = null,
+      hadCleanShutdown = true)

Review comment:
       It seems we don't need to set hadCleanShutdown to true.

##########
File path: core/src/main/scala/kafka/log/LogManager.scala
##########
@@ -298,26 +300,32 @@ class LogManager(logDirs: Seq[File],
   /**
    * Recover and load all logs in the given data directories
    */
-  private def loadLogs(): Unit = {
+  private[log] def loadLogs(): Unit = {
     info(s"Loading logs from log dirs $liveLogDirs")
     val startMs = time.hiResClockMs()
     val threadPools = ArrayBuffer.empty[ExecutorService]
     val offlineDirs = mutable.Set.empty[(String, IOException)]
-    val jobs = mutable.Map.empty[File, Seq[Future[_]]]
+    val jobs = ArrayBuffer.empty[Seq[Future[_]]]
     var numTotalLogs = 0
 
     for (dir <- liveLogDirs) {
       val logDirAbsolutePath = dir.getAbsolutePath
+      var hadCleanShutdown: Boolean = false
       try {
         val pool = Executors.newFixedThreadPool(numRecoveryThreadsPerDataDir)
         threadPools.append(pool)
 
         val cleanShutdownFile = new File(dir, Log.CleanShutdownFile)
         if (cleanShutdownFile.exists) {
           info(s"Skipping recovery for all logs in $logDirAbsolutePath since 
clean shutdown file was found")
+          // Cache the clean shutdown status and use that for rest of log 
loading workflow. Delete the CleanShutdownFile
+          // so that if broker crashes while loading the log, it is considered 
hard shutdown during the next boot up. KAFKA-10471
+          cleanShutdownFile.delete()
+          hadCleanShutdown = true
         } else {
           // log recovery itself is being performed by `Log` class during 
initialization
           info(s"Attempting recovery for all logs in $logDirAbsolutePath since 
no clean shutdown file was found")
+          hadCleanShutdown = false

Review comment:
       This line seems unnecessary since hadCleanShutdown is initialized to 
false. 




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to