Tejaskriya commented on code in PR #8264:
URL: https://github.com/apache/ozone/pull/8264#discussion_r2071152738


##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ContainerInfoCommand.java:
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug.logs.container;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.Callable;
+import org.apache.hadoop.hdds.cli.AbstractSubcommand;
+import 
org.apache.hadoop.ozone.debug.logs.container.utils.ContainerDatanodeDatabase;
+import org.apache.hadoop.ozone.debug.logs.container.utils.SQLDBConstants;
+import picocli.CommandLine;
+
+/**
+ * Command to display detailed information of a single container by ID.
+ */
+
[email protected](
+    name = "info",
+    description = "provides complete state transition history of each replica 
for a single container along with " +

Review Comment:
   nit: Capitalize the first letter as this will be the help message seen by 
users.



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ContainerInfoCommand.java:
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug.logs.container;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.Callable;
+import org.apache.hadoop.hdds.cli.AbstractSubcommand;
+import 
org.apache.hadoop.ozone.debug.logs.container.utils.ContainerDatanodeDatabase;
+import org.apache.hadoop.ozone.debug.logs.container.utils.SQLDBConstants;
+import picocli.CommandLine;
+
+/**
+ * Command to display detailed information of a single container by ID.
+ */
+
[email protected](
+    name = "info",
+    description = "provides complete state transition history of each replica 
for a single container along with " +
+        "analysis over the container"
+)
+public class ContainerInfoCommand extends AbstractSubcommand implements 
Callable<Void> {
+
+  @CommandLine.Parameters(index = "0", description = "Container ID")
+  private Long containerId;
+
+  @CommandLine.ParentCommand
+  private ContainerLogController parent;
+
+  @Override
+  public Void call() throws Exception {
+
+    if (containerId < 0) {
+      err().println("Invalid container ID: " + containerId);
+      return null;
+    }
+
+    Path providedDbPath;
+    if (parent.getDbPath() == null) {
+      providedDbPath = Paths.get(System.getProperty("user.dir"), 
SQLDBConstants.DEFAULT_DB_FILENAME);
+
+      if (Files.exists(providedDbPath) && Files.isRegularFile(providedDbPath)) 
{
+        out().println("Using default database file found in current directory: 
" + providedDbPath);
+      } else {
+        err().println("No database path provided and default file '" + 
SQLDBConstants.DEFAULT_DB_FILENAME + "' not " +
+            "found in current directory. Please provide a valid database 
path");
+        return null;
+      }
+    } else {
+      providedDbPath = Paths.get(parent.getDbPath());
+      Path parentDir = providedDbPath.getParent();
+
+      if (parentDir != null && !Files.exists(parentDir)) {
+        err().println("The parent directory of the provided database path does 
not exist: " + parentDir);
+        return null;
+      }
+    }
+
+    ContainerDatanodeDatabase.setDatabasePath(providedDbPath.toString());
+
+    ContainerDatanodeDatabase cdd = new ContainerDatanodeDatabase();
+    try {
+      cdd.showContainerDetails(containerId);
+    } catch (Exception e) {
+      throw e;

Review Comment:
   If we are simply throwing the same exception, I don't think we need to have 
the try-catch block.



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/utils/ContainerDatanodeDatabase.java:
##########
@@ -0,0 +1,555 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug.logs.container.utils;
+
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.nio.charset.StandardCharsets;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.sqlite.SQLiteConfig;
+
+/**
+ * Handles creation and interaction with the database.
+ * Provides methods for table creation, log data insertion, and index setup.
+ */
+public class ContainerDatanodeDatabase {
+  
+  private static String databasePath;
+  private static final int DEFAULT_REPLICATION_FACTOR;
+  
+  static {
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    final String replication = configuration.getTrimmed(
+        OMConfigKeys.OZONE_SERVER_DEFAULT_REPLICATION_KEY,
+        OMConfigKeys.OZONE_SERVER_DEFAULT_REPLICATION_DEFAULT);
+
+    DEFAULT_REPLICATION_FACTOR = Integer.parseInt(replication.toUpperCase());
+  }
+  
+  public static void setDatabasePath(String dbPath) {
+    if (databasePath == null) {
+      databasePath = dbPath;
+    }
+  }
+
+  private static Connection getConnection() throws Exception {
+    if (databasePath == null) {
+      throw new IllegalStateException("Database path not set");
+    }
+    
+    Class.forName(SQLDBConstants.DRIVER);
+
+    SQLiteConfig config = new SQLiteConfig();
+
+    config.setJournalMode(SQLiteConfig.JournalMode.OFF);
+    config.setCacheSize(SQLDBConstants.CACHE_SIZE);
+    config.setLockingMode(SQLiteConfig.LockingMode.EXCLUSIVE);
+    config.setSynchronous(SQLiteConfig.SynchronousMode.OFF);
+    config.setTempStore(SQLiteConfig.TempStore.MEMORY);
+
+    return DriverManager.getConnection(SQLDBConstants.CONNECTION_PREFIX + 
databasePath, config.toProperties());
+  }
+
+  public void createDatanodeContainerLogTable() throws SQLException {
+    String createTableSQL = SQLDBConstants.CREATE_DATANODE_CONTAINER_LOG_TABLE;
+    try (Connection connection = getConnection();
+         Statement dropStmt = connection.createStatement();
+         Statement createStmt = connection.createStatement()) {
+      dropTable(SQLDBConstants.DATANODE_CONTAINER_LOG_TABLE_NAME, dropStmt);
+      createStmt.execute(createTableSQL);
+      createDatanodeContainerIndex(createStmt);
+    } catch (SQLException e) {
+      System.err.println("Error while creating the table: " + e.getMessage());
+      throw e;
+    } catch (Exception e) {
+      System.err.println("Unexpected error: " + e.getMessage());
+      throw new RuntimeException(e);

Review Comment:
   Same as above,
   ```suggestion
         throw new RuntimeException(""Unexpected error: "  + e);
   ```
   We can do the same for the other occurrences in the file.



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/utils/ContainerDatanodeDatabase.java:
##########
@@ -0,0 +1,555 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug.logs.container.utils;
+
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.nio.charset.StandardCharsets;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.sqlite.SQLiteConfig;
+
+/**
+ * Handles creation and interaction with the database.
+ * Provides methods for table creation, log data insertion, and index setup.
+ */
+public class ContainerDatanodeDatabase {
+  
+  private static String databasePath;
+  private static final int DEFAULT_REPLICATION_FACTOR;
+  
+  static {
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    final String replication = configuration.getTrimmed(
+        OMConfigKeys.OZONE_SERVER_DEFAULT_REPLICATION_KEY,
+        OMConfigKeys.OZONE_SERVER_DEFAULT_REPLICATION_DEFAULT);
+
+    DEFAULT_REPLICATION_FACTOR = Integer.parseInt(replication.toUpperCase());
+  }
+  
+  public static void setDatabasePath(String dbPath) {
+    if (databasePath == null) {
+      databasePath = dbPath;
+    }
+  }
+
+  private static Connection getConnection() throws Exception {
+    if (databasePath == null) {
+      throw new IllegalStateException("Database path not set");
+    }
+    
+    Class.forName(SQLDBConstants.DRIVER);
+
+    SQLiteConfig config = new SQLiteConfig();
+
+    config.setJournalMode(SQLiteConfig.JournalMode.OFF);
+    config.setCacheSize(SQLDBConstants.CACHE_SIZE);
+    config.setLockingMode(SQLiteConfig.LockingMode.EXCLUSIVE);
+    config.setSynchronous(SQLiteConfig.SynchronousMode.OFF);
+    config.setTempStore(SQLiteConfig.TempStore.MEMORY);
+
+    return DriverManager.getConnection(SQLDBConstants.CONNECTION_PREFIX + 
databasePath, config.toProperties());
+  }
+
+  public void createDatanodeContainerLogTable() throws SQLException {
+    String createTableSQL = SQLDBConstants.CREATE_DATANODE_CONTAINER_LOG_TABLE;
+    try (Connection connection = getConnection();
+         Statement dropStmt = connection.createStatement();
+         Statement createStmt = connection.createStatement()) {
+      dropTable(SQLDBConstants.DATANODE_CONTAINER_LOG_TABLE_NAME, dropStmt);
+      createStmt.execute(createTableSQL);
+      createDatanodeContainerIndex(createStmt);
+    } catch (SQLException e) {
+      System.err.println("Error while creating the table: " + e.getMessage());
+      throw e;
+    } catch (Exception e) {
+      System.err.println("Unexpected error: " + e.getMessage());
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void createContainerLogTable() throws SQLException {
+    String createTableSQL = SQLDBConstants.CREATE_CONTAINER_LOG_TABLE;
+    try (Connection connection = getConnection();
+         Statement dropStmt = connection.createStatement();
+         Statement createStmt = connection.createStatement()) {
+      dropTable(SQLDBConstants.CONTAINER_LOG_TABLE_NAME, dropStmt);
+      createStmt.execute(createTableSQL);
+    } catch (SQLException e) {
+      System.err.println("Error while creating the table: " + e.getMessage());
+      throw e;
+    } catch (Exception e) {
+      System.err.println("Unexpected error: " + e.getMessage());
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Inserts a list of container log entries into the 
DatanodeContainerLogTable.
+   *
+   * @param transitionList List of container log entries to insert into the 
table.
+   */
+  
+  public synchronized void 
insertContainerDatanodeData(List<DatanodeContainerInfo> transitionList) throws 
SQLException {
+
+    String insertSQL = SQLDBConstants.INSERT_DATANODE_CONTAINER_LOG;
+
+    long containerId = 0;
+    String datanodeId = null;
+    
+    try (Connection connection = getConnection();
+         PreparedStatement preparedStatement = 
connection.prepareStatement(insertSQL)) {
+
+      int count = 0;
+
+      for (DatanodeContainerInfo info : transitionList) {
+        datanodeId = info.getDatanodeId();
+        containerId = info.getContainerId();
+
+        preparedStatement.setString(1, datanodeId);
+        preparedStatement.setLong(2, containerId);
+        preparedStatement.setString(3, info.getTimestamp());
+        preparedStatement.setString(4, info.getState());
+        preparedStatement.setLong(5, info.getBcsid());
+        preparedStatement.setString(6, info.getErrorMessage());
+        preparedStatement.setString(7, info.getLogLevel());
+        preparedStatement.setInt(8, info.getIndexValue());
+        preparedStatement.addBatch();
+
+        count++;
+
+        if (count % SQLDBConstants.BATCH_SIZE == 0) {
+          preparedStatement.executeBatch();
+          count = 0;
+        }
+      }
+
+      if (count != 0) {
+        preparedStatement.executeBatch();
+      }
+    } catch (SQLException e) {
+      System.err.println("Failed to insert container log for container " + 
containerId + " on datanode " + datanodeId);
+      throw e;
+    } catch (Exception e) {
+      System.err.println("Unexpected error: " + e.getMessage());
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void createDatanodeContainerIndex(Statement stmt) throws 
SQLException {
+    String createIndexSQL = SQLDBConstants.CREATE_DATANODE_CONTAINER_INDEX;
+    stmt.execute(createIndexSQL);
+  }
+
+  /**
+   * Extracts the latest container log data from the DatanodeContainerLogTable
+   * and inserts it into ContainerLogTable.
+   */
+
+  public void insertLatestContainerLogData() throws SQLException {
+    createContainerLogTable();
+    String selectSQL = SQLDBConstants.SELECT_LATEST_CONTAINER_LOG;
+    String insertSQL = SQLDBConstants.INSERT_CONTAINER_LOG;
+
+    try (Connection connection = getConnection();
+         PreparedStatement selectStmt = connection.prepareStatement(selectSQL);
+         ResultSet resultSet = selectStmt.executeQuery();
+         PreparedStatement insertStmt = 
connection.prepareStatement(insertSQL)) {
+      
+      int count = 0;
+      
+      while (resultSet.next()) {
+        String datanodeId = resultSet.getString("datanode_id");
+        long containerId = resultSet.getLong("container_id");
+        String containerState = resultSet.getString("container_state");
+        long bcsid = resultSet.getLong("bcsid");
+        try {
+          insertStmt.setString(1, datanodeId);
+          insertStmt.setLong(2, containerId);
+          insertStmt.setString(3, containerState);
+          insertStmt.setLong(4, bcsid);
+          insertStmt.addBatch();
+
+          count++;
+
+          if (count % SQLDBConstants.BATCH_SIZE == 0) {
+            insertStmt.executeBatch();
+            count = 0;
+          }
+        } catch (SQLException e) {
+          System.err.println("Failed to insert container log entry for 
container " + containerId + " on datanode "
+              + datanodeId);
+          throw e;
+        }
+      }
+
+      if (count != 0) {
+        insertStmt.executeBatch();
+      }
+    } catch (SQLException e) {
+      System.err.println("Failed to insert container log entry: " + 
e.getMessage());
+      throw e;
+    } catch (Exception e) {
+      System.err.println("Unexpected error: " + e.getMessage());
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void dropTable(String tableName, Statement stmt) throws SQLException 
{
+    String dropTableSQL = SQLDBConstants.DROP_TABLE.replace("{table_name}", 
tableName);
+    stmt.executeUpdate(dropTableSQL);
+  }
+
+  private void createContainerLogIndex(Statement stmt) throws SQLException {
+    String createIndexSQL = SQLDBConstants.CREATE_INDEX_LATEST_STATE;
+    stmt.execute(createIndexSQL);
+  }
+
+  /**
+   * Lists containers filtered by the specified state and writes their details 
to stdout 
+   * unless redirected to a file explicitly.
+   * The output includes timestamp, datanode ID, container ID, BCSID, error 
message, and index value,
+   * written in a human-readable table format to a file or console.
+   * Behavior based on the {@code limit} parameter:
+   * If {@code limit} is provided, only up to the specified number of rows are 
printed.
+   * If the number of matching containers exceeds the {@code limit},
+   * a note is printed indicating more containers exist.
+   *
+   * @param state the container state to filter by (e.g., "OPEN", "CLOSED", 
"QUASI_CLOSED")
+   * @param limit the maximum number of rows to display; use {@link 
Integer#MAX_VALUE} to fetch all rows
+   */
+
+  public void listContainersByState(String state, Integer limit) throws 
SQLException {
+    int count = 0;
+
+    boolean limitProvided = limit != Integer.MAX_VALUE;
+
+    String baseQuery = SQLDBConstants.SELECT_LATEST_CONTAINER_LOGS_BY_STATE;
+    String finalQuery = limitProvided ? baseQuery + " LIMIT ?" : baseQuery;
+
+    try (Connection connection = getConnection();
+         Statement stmt = connection.createStatement()) {
+
+      createContainerLogIndex(stmt);
+
+      try (PreparedStatement pstmt = connection.prepareStatement(finalQuery)) {
+        pstmt.setString(1, state);
+        if (limitProvided) {
+          pstmt.setInt(2, limit + 1);
+        }
+
+        try (ResultSet rs = pstmt.executeQuery();
+             PrintWriter writer = new PrintWriter(new 
OutputStreamWriter(System.out,
+                 StandardCharsets.UTF_8), true)) {
+
+          writer.printf("%-25s | %-35s | %-15s | %-15s | %-40s | %-12s%n",
+              "Timestamp", "Datanode ID", "Container ID", "BCSID", "Message", 
"Index Value");
+          
writer.println("-------------------------------------------------------------------------------------"
 +
+              
"---------------------------------------------------------------------------------------");
+
+          while (rs.next()) {
+            if (limitProvided && count >= limit) {
+              writer.println("Note: There might be more containers. Use -all 
option to list all entries");
+              break;
+            }
+            String timestamp = rs.getString("timestamp");
+            String datanodeId = rs.getString("datanode_id");
+            long containerId = rs.getLong("container_id");
+            long latestBcsid = rs.getLong("latest_bcsid");
+            String errorMessage = rs.getString("error_message");
+            int indexValue = rs.getInt("index_value");
+            count++;
+
+            writer.printf("%-25s | %-35s | %-15d | %-15d | %-40s | %-12d%n",
+                timestamp, datanodeId, containerId, latestBcsid, errorMessage, 
indexValue);
+          }
+          
+          if (count == 0) {
+            writer.printf("No containers found for state: %s%n", state);
+          } else {
+            writer.printf("Number of containers listed: %d%n", count);
+          }
+        }
+      }
+    } catch (SQLException e) {
+      throw new SQLException("Error while retrieving containers with state " + 
state);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void createIdxDclContainerStateTime(Connection conn) throws 
SQLException {
+    String sql = SQLDBConstants.CREATE_DCL_CONTAINER_STATE_TIME_INDEX;
+    try (Statement stmt = conn.createStatement()) {
+      stmt.execute(sql);
+    }
+  }
+
+  /**
+   * Displays detailed information about a container based on its ID, 
including its state, BCSID, 
+   * timestamp, message, and index value. It also checks for issues such as 
UNHEALTHY 
+   * replicas, under-replication, over-replication, OPEN_UNHEALTHY, 
OUASI_CLOSED_STUCK, mismatched replication
+   * and duplicate open.
+   *
+   * @param containerID The ID of the container to display details for.
+   */
+
+  public void showContainerDetails(Long containerID) throws SQLException {
+
+    try (Connection connection = getConnection()) {
+      createIdxDclContainerStateTime(connection);
+      List<DatanodeContainerInfo> logEntries = 
getContainerLogData(containerID, connection);
+
+      if (logEntries.isEmpty()) {
+        System.out.println("Missing container with ID: " + containerID);
+        return;
+      }
+
+      System.out.printf("%-25s | %-15s | %-35s | %-20s | %-10s | %-30s | 
%-12s%n",
+          "Timestamp", "Container ID", "Datanode ID", "Container State", 
"BCSID", "Message", "Index Value");
+      
System.out.println("-----------------------------------------------------------------------------------"
 +
+          
"-------------------------------------------------------------------------------------------------");
+
+      for (DatanodeContainerInfo entry : logEntries) {
+        System.out.printf("%-25s | %-15d | %-35s | %-20s | %-10d | %-30s | 
%-12d%n",
+            entry.getTimestamp(),
+            entry.getContainerId(),
+            entry.getDatanodeId(),
+            entry.getState(),
+            entry.getBcsid(),
+            entry.getErrorMessage(),
+            entry.getIndexValue());
+      }
+
+      
logEntries.sort(Comparator.comparing(DatanodeContainerInfo::getTimestamp));
+
+      if (checkForMultipleOpenStates(logEntries)) {
+        System.out.println("Container " + containerID + " might have duplicate 
OPEN state.");
+        return;
+      }
+
+      Map<String, DatanodeContainerInfo> latestPerDatanode = new HashMap<>();
+      for (DatanodeContainerInfo entry : logEntries) {
+        String datanodeId = entry.getDatanodeId();
+        DatanodeContainerInfo existing = latestPerDatanode.get(datanodeId);
+        if (existing == null || 
entry.getTimestamp().compareTo(existing.getTimestamp()) > 0) {
+          latestPerDatanode.put(datanodeId, entry);
+        }
+      }
+
+      analyzeContainerHealth(containerID, latestPerDatanode);
+
+    } catch (SQLException e) {
+      throw new SQLException("Error while retrieving container with ID " + 
containerID);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void analyzeContainerHealth(Long containerID,
+                                      Map<String, DatanodeContainerInfo> 
latestPerDatanode) {
+
+    Set<String> lifeCycleStates = new HashSet<>();
+    for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) 
{
+      lifeCycleStates.add(state.name());
+    }
+
+    Set<String> healthStates = new HashSet<>();
+    for (ReplicationManagerReport.HealthState state : 
ReplicationManagerReport.HealthState.values()) {
+      healthStates.add(state.name());
+    }
+
+    Set<String> unhealthyReplicas = new HashSet<>();
+    Set<String> closedReplicas = new HashSet<>();
+    Set<String> openReplicas = new HashSet<>();
+    Set<String> quasiclosedReplicas = new HashSet<>();
+    Set<String> deletedReplicas = new HashSet<>();
+    Set<Long> bcsids = new HashSet<>();
+    Set<String> datanodeIds = new HashSet<>();
+    List<String> unhealthyTimestamps = new ArrayList<>();
+    List<String> closedTimestamps = new ArrayList<>();
+
+    for (DatanodeContainerInfo entry : latestPerDatanode.values()) {
+      String datanodeId = entry.getDatanodeId();
+      String state = entry.getState();
+      long bcsid = entry.getBcsid();
+      String stateTimestamp = entry.getTimestamp();
+
+      datanodeIds.add(datanodeId);
+
+
+

Review Comment:
   nit: unnecessary extra lines, can be removed



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ContainerLogController.java:
##########
@@ -27,7 +27,8 @@
     name = "container",
     subcommands = {
         ContainerLogParser.class,
-        ListContainers.class
+        ListContainers.class,
+        ContainerInfoCommand.class

Review Comment:
   nit: alphabetical order of adding the subcommands is better



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/utils/ContainerDatanodeDatabase.java:
##########
@@ -0,0 +1,555 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug.logs.container.utils;
+
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.nio.charset.StandardCharsets;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.sqlite.SQLiteConfig;
+
+/**
+ * Handles creation and interaction with the database.
+ * Provides methods for table creation, log data insertion, and index setup.
+ */
+public class ContainerDatanodeDatabase {
+  
+  private static String databasePath;
+  private static final int DEFAULT_REPLICATION_FACTOR;
+  
+  static {
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    final String replication = configuration.getTrimmed(
+        OMConfigKeys.OZONE_SERVER_DEFAULT_REPLICATION_KEY,
+        OMConfigKeys.OZONE_SERVER_DEFAULT_REPLICATION_DEFAULT);
+
+    DEFAULT_REPLICATION_FACTOR = Integer.parseInt(replication.toUpperCase());
+  }
+  
+  public static void setDatabasePath(String dbPath) {
+    if (databasePath == null) {
+      databasePath = dbPath;
+    }
+  }
+
+  private static Connection getConnection() throws Exception {
+    if (databasePath == null) {
+      throw new IllegalStateException("Database path not set");
+    }
+    
+    Class.forName(SQLDBConstants.DRIVER);
+
+    SQLiteConfig config = new SQLiteConfig();
+
+    config.setJournalMode(SQLiteConfig.JournalMode.OFF);
+    config.setCacheSize(SQLDBConstants.CACHE_SIZE);
+    config.setLockingMode(SQLiteConfig.LockingMode.EXCLUSIVE);
+    config.setSynchronous(SQLiteConfig.SynchronousMode.OFF);
+    config.setTempStore(SQLiteConfig.TempStore.MEMORY);
+
+    return DriverManager.getConnection(SQLDBConstants.CONNECTION_PREFIX + 
databasePath, config.toProperties());
+  }
+
+  public void createDatanodeContainerLogTable() throws SQLException {
+    String createTableSQL = SQLDBConstants.CREATE_DATANODE_CONTAINER_LOG_TABLE;
+    try (Connection connection = getConnection();
+         Statement dropStmt = connection.createStatement();
+         Statement createStmt = connection.createStatement()) {
+      dropTable(SQLDBConstants.DATANODE_CONTAINER_LOG_TABLE_NAME, dropStmt);
+      createStmt.execute(createTableSQL);
+      createDatanodeContainerIndex(createStmt);
+    } catch (SQLException e) {
+      System.err.println("Error while creating the table: " + e.getMessage());
+      throw e;
+    } catch (Exception e) {
+      System.err.println("Unexpected error: " + e.getMessage());
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void createContainerLogTable() throws SQLException {
+    String createTableSQL = SQLDBConstants.CREATE_CONTAINER_LOG_TABLE;
+    try (Connection connection = getConnection();
+         Statement dropStmt = connection.createStatement();
+         Statement createStmt = connection.createStatement()) {
+      dropTable(SQLDBConstants.CONTAINER_LOG_TABLE_NAME, dropStmt);
+      createStmt.execute(createTableSQL);
+    } catch (SQLException e) {
+      System.err.println("Error while creating the table: " + e.getMessage());
+      throw e;
+    } catch (Exception e) {
+      System.err.println("Unexpected error: " + e.getMessage());
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Inserts a list of container log entries into the 
DatanodeContainerLogTable.
+   *
+   * @param transitionList List of container log entries to insert into the 
table.
+   */
+  
+  public synchronized void 
insertContainerDatanodeData(List<DatanodeContainerInfo> transitionList) throws 
SQLException {
+
+    String insertSQL = SQLDBConstants.INSERT_DATANODE_CONTAINER_LOG;
+
+    long containerId = 0;
+    String datanodeId = null;
+    
+    try (Connection connection = getConnection();
+         PreparedStatement preparedStatement = 
connection.prepareStatement(insertSQL)) {
+
+      int count = 0;
+
+      for (DatanodeContainerInfo info : transitionList) {
+        datanodeId = info.getDatanodeId();
+        containerId = info.getContainerId();
+
+        preparedStatement.setString(1, datanodeId);
+        preparedStatement.setLong(2, containerId);
+        preparedStatement.setString(3, info.getTimestamp());
+        preparedStatement.setString(4, info.getState());
+        preparedStatement.setLong(5, info.getBcsid());
+        preparedStatement.setString(6, info.getErrorMessage());
+        preparedStatement.setString(7, info.getLogLevel());
+        preparedStatement.setInt(8, info.getIndexValue());
+        preparedStatement.addBatch();
+
+        count++;
+
+        if (count % SQLDBConstants.BATCH_SIZE == 0) {
+          preparedStatement.executeBatch();
+          count = 0;
+        }
+      }
+
+      if (count != 0) {
+        preparedStatement.executeBatch();
+      }
+    } catch (SQLException e) {
+      System.err.println("Failed to insert container log for container " + 
containerId + " on datanode " + datanodeId);
+      throw e;
+    } catch (Exception e) {
+      System.err.println("Unexpected error: " + e.getMessage());
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void createDatanodeContainerIndex(Statement stmt) throws 
SQLException {
+    String createIndexSQL = SQLDBConstants.CREATE_DATANODE_CONTAINER_INDEX;
+    stmt.execute(createIndexSQL);
+  }
+
+  /**
+   * Extracts the latest container log data from the DatanodeContainerLogTable
+   * and inserts it into ContainerLogTable.
+   */
+
+  public void insertLatestContainerLogData() throws SQLException {
+    createContainerLogTable();
+    String selectSQL = SQLDBConstants.SELECT_LATEST_CONTAINER_LOG;
+    String insertSQL = SQLDBConstants.INSERT_CONTAINER_LOG;
+
+    try (Connection connection = getConnection();
+         PreparedStatement selectStmt = connection.prepareStatement(selectSQL);
+         ResultSet resultSet = selectStmt.executeQuery();
+         PreparedStatement insertStmt = 
connection.prepareStatement(insertSQL)) {
+      
+      int count = 0;
+      
+      while (resultSet.next()) {
+        String datanodeId = resultSet.getString("datanode_id");
+        long containerId = resultSet.getLong("container_id");
+        String containerState = resultSet.getString("container_state");
+        long bcsid = resultSet.getLong("bcsid");
+        try {
+          insertStmt.setString(1, datanodeId);
+          insertStmt.setLong(2, containerId);
+          insertStmt.setString(3, containerState);
+          insertStmt.setLong(4, bcsid);
+          insertStmt.addBatch();
+
+          count++;
+
+          if (count % SQLDBConstants.BATCH_SIZE == 0) {
+            insertStmt.executeBatch();
+            count = 0;
+          }
+        } catch (SQLException e) {
+          System.err.println("Failed to insert container log entry for 
container " + containerId + " on datanode "
+              + datanodeId);
+          throw e;
+        }
+      }
+
+      if (count != 0) {
+        insertStmt.executeBatch();
+      }
+    } catch (SQLException e) {
+      System.err.println("Failed to insert container log entry: " + 
e.getMessage());
+      throw e;
+    } catch (Exception e) {
+      System.err.println("Unexpected error: " + e.getMessage());
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void dropTable(String tableName, Statement stmt) throws SQLException 
{
+    String dropTableSQL = SQLDBConstants.DROP_TABLE.replace("{table_name}", 
tableName);
+    stmt.executeUpdate(dropTableSQL);
+  }
+
+  private void createContainerLogIndex(Statement stmt) throws SQLException {
+    String createIndexSQL = SQLDBConstants.CREATE_INDEX_LATEST_STATE;
+    stmt.execute(createIndexSQL);
+  }
+
+  /**
+   * Lists containers filtered by the specified state and writes their details 
to stdout 
+   * unless redirected to a file explicitly.
+   * The output includes timestamp, datanode ID, container ID, BCSID, error 
message, and index value,
+   * written in a human-readable table format to a file or console.
+   * Behavior based on the {@code limit} parameter:
+   * If {@code limit} is provided, only up to the specified number of rows are 
printed.
+   * If the number of matching containers exceeds the {@code limit},
+   * a note is printed indicating more containers exist.
+   *
+   * @param state the container state to filter by (e.g., "OPEN", "CLOSED", 
"QUASI_CLOSED")
+   * @param limit the maximum number of rows to display; use {@link 
Integer#MAX_VALUE} to fetch all rows
+   */
+
+  public void listContainersByState(String state, Integer limit) throws 
SQLException {
+    int count = 0;
+
+    boolean limitProvided = limit != Integer.MAX_VALUE;
+
+    String baseQuery = SQLDBConstants.SELECT_LATEST_CONTAINER_LOGS_BY_STATE;
+    String finalQuery = limitProvided ? baseQuery + " LIMIT ?" : baseQuery;
+
+    try (Connection connection = getConnection();
+         Statement stmt = connection.createStatement()) {
+
+      createContainerLogIndex(stmt);
+
+      try (PreparedStatement pstmt = connection.prepareStatement(finalQuery)) {
+        pstmt.setString(1, state);
+        if (limitProvided) {
+          pstmt.setInt(2, limit + 1);
+        }
+
+        try (ResultSet rs = pstmt.executeQuery();
+             PrintWriter writer = new PrintWriter(new 
OutputStreamWriter(System.out,
+                 StandardCharsets.UTF_8), true)) {
+
+          writer.printf("%-25s | %-35s | %-15s | %-15s | %-40s | %-12s%n",
+              "Timestamp", "Datanode ID", "Container ID", "BCSID", "Message", 
"Index Value");
+          
writer.println("-------------------------------------------------------------------------------------"
 +
+              
"---------------------------------------------------------------------------------------");
+
+          while (rs.next()) {
+            if (limitProvided && count >= limit) {
+              writer.println("Note: There might be more containers. Use -all 
option to list all entries");
+              break;
+            }
+            String timestamp = rs.getString("timestamp");
+            String datanodeId = rs.getString("datanode_id");
+            long containerId = rs.getLong("container_id");
+            long latestBcsid = rs.getLong("latest_bcsid");
+            String errorMessage = rs.getString("error_message");
+            int indexValue = rs.getInt("index_value");
+            count++;
+
+            writer.printf("%-25s | %-35s | %-15d | %-15d | %-40s | %-12d%n",
+                timestamp, datanodeId, containerId, latestBcsid, errorMessage, 
indexValue);
+          }
+          
+          if (count == 0) {
+            writer.printf("No containers found for state: %s%n", state);
+          } else {
+            writer.printf("Number of containers listed: %d%n", count);
+          }
+        }
+      }
+    } catch (SQLException e) {
+      throw new SQLException("Error while retrieving containers with state " + 
state);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void createIdxDclContainerStateTime(Connection conn) throws 
SQLException {
+    String sql = SQLDBConstants.CREATE_DCL_CONTAINER_STATE_TIME_INDEX;
+    try (Statement stmt = conn.createStatement()) {
+      stmt.execute(sql);
+    }
+  }
+
+  /**
+   * Displays detailed information about a container based on its ID, 
including its state, BCSID, 
+   * timestamp, message, and index value. It also checks for issues such as 
UNHEALTHY 
+   * replicas, under-replication, over-replication, OPEN_UNHEALTHY, 
OUASI_CLOSED_STUCK, mismatched replication
+   * and duplicate open.
+   *
+   * @param containerID The ID of the container to display details for.
+   */
+
+  public void showContainerDetails(Long containerID) throws SQLException {
+
+    try (Connection connection = getConnection()) {
+      createIdxDclContainerStateTime(connection);
+      List<DatanodeContainerInfo> logEntries = 
getContainerLogData(containerID, connection);
+
+      if (logEntries.isEmpty()) {
+        System.out.println("Missing container with ID: " + containerID);
+        return;
+      }
+
+      System.out.printf("%-25s | %-15s | %-35s | %-20s | %-10s | %-30s | 
%-12s%n",

Review Comment:
   A suggestion w.r.t. using System.out for printing the results. Can we pass a 
PrintStream or a PrintWriter to this class/method, and use that to print 
everything? Maybe add it in the constructor itself.
   Even for `listContainersByState()`, while creating the PrintWriter the same 
can be used. As a fallback, we can use System.out as the default.
   
   That would give some flexibility. 



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ContainerLogParser.java:
##########


Review Comment:
   Thanks for making the changes as suggested in the other PR!



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/utils/ContainerDatanodeDatabase.java:
##########
@@ -0,0 +1,555 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug.logs.container.utils;
+
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.nio.charset.StandardCharsets;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.sqlite.SQLiteConfig;
+
+/**
+ * Handles creation and interaction with the database.
+ * Provides methods for table creation, log data insertion, and index setup.
+ */
+public class ContainerDatanodeDatabase {
+  
+  private static String databasePath;
+  private static final int DEFAULT_REPLICATION_FACTOR;
+  
+  static {
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    final String replication = configuration.getTrimmed(
+        OMConfigKeys.OZONE_SERVER_DEFAULT_REPLICATION_KEY,
+        OMConfigKeys.OZONE_SERVER_DEFAULT_REPLICATION_DEFAULT);
+
+    DEFAULT_REPLICATION_FACTOR = Integer.parseInt(replication.toUpperCase());
+  }
+  
+  public static void setDatabasePath(String dbPath) {
+    if (databasePath == null) {
+      databasePath = dbPath;
+    }
+  }
+
+  private static Connection getConnection() throws Exception {
+    if (databasePath == null) {
+      throw new IllegalStateException("Database path not set");
+    }
+    
+    Class.forName(SQLDBConstants.DRIVER);
+
+    SQLiteConfig config = new SQLiteConfig();
+
+    config.setJournalMode(SQLiteConfig.JournalMode.OFF);
+    config.setCacheSize(SQLDBConstants.CACHE_SIZE);
+    config.setLockingMode(SQLiteConfig.LockingMode.EXCLUSIVE);
+    config.setSynchronous(SQLiteConfig.SynchronousMode.OFF);
+    config.setTempStore(SQLiteConfig.TempStore.MEMORY);
+
+    return DriverManager.getConnection(SQLDBConstants.CONNECTION_PREFIX + 
databasePath, config.toProperties());
+  }
+
+  public void createDatanodeContainerLogTable() throws SQLException {
+    String createTableSQL = SQLDBConstants.CREATE_DATANODE_CONTAINER_LOG_TABLE;
+    try (Connection connection = getConnection();
+         Statement dropStmt = connection.createStatement();
+         Statement createStmt = connection.createStatement()) {
+      dropTable(SQLDBConstants.DATANODE_CONTAINER_LOG_TABLE_NAME, dropStmt);
+      createStmt.execute(createTableSQL);
+      createDatanodeContainerIndex(createStmt);
+    } catch (SQLException e) {
+      System.err.println("Error while creating the table: " + e.getMessage());

Review Comment:
   It would be better to wrap the exception with a custom message, and throw it 
(instead of printing the error and then throwing the exception again). 
   ```suggestion
         throw new SQLException("Error while creating the table: " + 
e.getMessage());
   ```
   As this is now a part of the utils package, any error message printing being 
handled in the command would be cleaner.
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to