ArafatKhan2198 commented on code in PR #8995:
URL: https://github.com/apache/ozone/pull/8995#discussion_r2350002453


##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java:
##########
@@ -875,4 +886,105 @@ public static String 
constructObjectPathWithPrefix(long... ids) {
     }
     return pathBuilder.toString();
   }
+
+  private static HttpURLConnection makeHttpGetCall(String urlString) throws 
IOException {
+    Objects.requireNonNull(urlString, "urlString");
+    URL url = new URL(urlString);
+    final HttpURLConnection conn = openURLConnection(url);
+    conn.setRequestMethod("GET");
+    conn.setConnectTimeout(HTTP_TIMEOUT_MS);
+    conn.setReadTimeout(HTTP_TIMEOUT_MS);
+    conn.setRequestProperty("Accept", "application/json");
+    return conn;
+  }
+
+  private static HttpURLConnection openURLConnection(URL url) throws 
IOException {

Review Comment:
   Each JMX call creates new HTTP connections.
   If we have 100's of DN's would it be a good idea to have a connection pool 
and reusing connections?
   
   Because on every time a request is made to this storage endpoint multiple 
new connections will be made on every time the user clicks the page.



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java:
##########
@@ -0,0 +1,328 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import javax.inject.Inject;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Response;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionSummary;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
+import org.apache.hadoop.ozone.recon.api.types.DeletionPendingBytesByStage;
+import org.apache.hadoop.ozone.recon.api.types.GlobalNamespaceReport;
+import org.apache.hadoop.ozone.recon.api.types.GlobalStorageReport;
+import 
org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.UsedSpaceBreakDown;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask;
+import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
+import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This endpoint handles requests related to storage distribution across
+ * different datanodes in a Recon instance. It provides detailed reports
+ * on storage capacity, utilization, and associated metrics.
+ * <p>
+ * The data is aggregated from multiple sources, including node manager
+ * statistics, and is used to construct responses with information
+ * about global storage and namespace usage, storage usage breakdown,
+ * and deletion operations in progress.
+ * <p>
+ * An instance of {@link ReconNodeManager} is used to fetch detailed
+ * node-specific statistics required for generating the report.
+ */
+@Path("/storageDistribution")
+@Produces("application/json")
+public class StorageDistributionEndpoint {
+  private final ReconNodeManager nodeManager;
+  private final OMDBInsightEndpoint omdbInsightEndpoint;
+  private final NSSummaryEndpoint nsSummaryEndpoint;
+  private final StorageContainerLocationProtocol scmClient;
+  private static Logger log = 
LoggerFactory.getLogger(StorageDistributionEndpoint.class);
+  private Map<DatanodeDetails, Long> blockDeletionMetricsMap = new HashMap<>();

Review Comment:
   `blockDeletionMetricsMap` is accessed concurrently without synchronization.
   Can we use a ConcurrentHashMap ?



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java:
##########
@@ -21,17 +21,21 @@
  * Metadata object that contains storage report of a Datanode.
  */
 public class DatanodeStorageReport {
+  private String dataNodeId;
   private long capacity;
   private long used;
   private long remaining;
   private long committed;
+  private long pendingDeletion;
 
-  public DatanodeStorageReport(long capacity, long used, long remaining,
-                               long committed) {
+  public DatanodeStorageReport(String dataNodeId, long capacity, long used, 
long remaining,
+                               long committed, long pendingDeletion) {
+    this.dataNodeId = dataNodeId;
     this.capacity = capacity;
     this.used = used;
     this.remaining = remaining;
     this.committed = committed;
+    this.pendingDeletion = pendingDeletion;

Review Comment:
   Quick question is there a possibility for getting negative values for these?
   
   If there is we should add a validation method something like this that would 
prompt the user of these problems - 
   
   ```
   public void validate() {
       Objects.requireNonNull(dataNodeId, "dataNodeId cannot be null");
       
       if (capacity < 0) throw new IllegalArgumentException("capacity cannot be 
negative");
       if (used < 0) throw new IllegalArgumentException("used cannot be 
negative");
       if (remaining < 0) throw new IllegalArgumentException("remaining cannot 
be negative");
       if (committed < 0) throw new IllegalArgumentException("committed cannot 
be negative");
       if (pendingDeletion < 0) throw new 
IllegalArgumentException("pendingDeletion cannot be negative");
       
       // Logical consistency checks
       if (used + remaining > capacity) {
           log.warn("Inconsistent storage report for {}: used({}) + 
remaining({}) > capacity({})", 
                   dataNodeId, used, remaining, capacity);
       }
   }
   ```
   
   But if these values don't get negative values due to bugs then its all good!
   I had some doubt on the newly added fields pendingDeletion. 



##########
hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestStorageDistributionEndpoint.java:
##########
@@ -0,0 +1,267 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.anyMap;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import javax.ws.rs.core.Response;
+import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionSummary;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.LongMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.DeletionPendingBytesByStage;
+import org.apache.hadoop.ozone.recon.api.types.GlobalNamespaceReport;
+import org.apache.hadoop.ozone.recon.api.types.GlobalStorageReport;
+import 
org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.UsedSpaceBreakDown;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
+import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.junit.jupiter.api.Test;
+
+/**
+ * Test class for StorageDistributionEndpoint, responsible for testing
+ * the behavior and responses of the storage distribution endpoint in
+ * different scenarios, including successful responses and exception cases.
+ */
+class StorageDistributionEndpointTest {
+
+  private ReconNodeManager mockNodeManager;
+  private OMDBInsightEndpoint mockOmdbInsightEndpoint;
+  private NSSummaryEndpoint mockNsSummaryEndpoint;
+  private StorageContainerLocationProtocol mockScmClient;
+  private GlobalStatsDao globalStatsDao;
+  private OzoneStorageContainerManager mockReconScm;
+  private DatanodeInfo datanodeDetails;
+  private SCMNodeStat mockNodeStat;
+  private SCMNodeStat globalStats;
+  private DUResponse mockDuResponse;
+
+  @Test
+  void testGetStorageDistributionSuccessfulResponse() throws IOException {
+    setupMockDependencies();
+    setupSuccessfulScenario();
+    StorageDistributionEndpoint endpoint = new 
StorageDistributionEndpoint(mockReconScm, mockOmdbInsightEndpoint,
+        mockNsSummaryEndpoint, globalStatsDao, mockScmClient);
+    Response response = endpoint.getStorageDistribution();
+
+    assertNotNull(response);
+    assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
+    StorageCapacityDistributionResponse responsePayload = 
(StorageCapacityDistributionResponse) response.getEntity();
+    assertNotNull(responsePayload);
+
+    GlobalStorageReport globalStorage = responsePayload.getGlobalStorage();
+    assertEquals(2000L, globalStorage.getTotalUsedSpace());
+    assertEquals(3000L, globalStorage.getTotalFreeSpace());
+    assertEquals(5000L, globalStorage.getTotalCapacity());

Review Comment:
   Magic Numbers Without Context - 
   Problem: Hard to understand what these values represent or why they're 
expected.
   Fix: Use named constants and calculation verification like we don in a lot 
of other test classes.
   
   ```
     public static final long NODE_CAPACITY = 5000L;
     public static final long NODE_USED = 2000L;
     public static final long NODE_FREE = 3000L;
     public static final long OPEN_KEYS_SIZE = 150L;
     public static final long COMMITTED_SIZE = 300L;
     public static final long OM_PENDING_DIR = 25L;
     public static final long OM_PENDING_KEY = 25L;
     public static final long SCM_PENDING = 75L;
     
     // Calculated expectations
     public static final long EXPECTED_OM_TOTAL = OM_PENDING_DIR + 
OM_PENDING_KEY;
     public static final long EXPECTED_TOTAL_PENDING = EXPECTED_OM_TOTAL + 
SCM_PENDING;
   ```



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java:
##########
@@ -21,17 +21,21 @@
  * Metadata object that contains storage report of a Datanode.
  */
 public class DatanodeStorageReport {
+  private String dataNodeId;
   private long capacity;
   private long used;
   private long remaining;
   private long committed;
+  private long pendingDeletion;

Review Comment:
   Not a critical Issue but a something we can take up later - 
   `DatanodeStorageReport` has 6 parameters with 5 of the same type (long) - 
prone to parameter order mistakes.
   
   Fix :- We can use Builder pattern to implement this class, but like I said 
can be taken up later not critical.



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java:
##########
@@ -0,0 +1,328 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import javax.inject.Inject;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Response;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionSummary;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
+import org.apache.hadoop.ozone.recon.api.types.DeletionPendingBytesByStage;
+import org.apache.hadoop.ozone.recon.api.types.GlobalNamespaceReport;
+import org.apache.hadoop.ozone.recon.api.types.GlobalStorageReport;
+import 
org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.UsedSpaceBreakDown;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask;
+import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
+import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This endpoint handles requests related to storage distribution across
+ * different datanodes in a Recon instance. It provides detailed reports
+ * on storage capacity, utilization, and associated metrics.
+ * <p>
+ * The data is aggregated from multiple sources, including node manager
+ * statistics, and is used to construct responses with information
+ * about global storage and namespace usage, storage usage breakdown,
+ * and deletion operations in progress.
+ * <p>
+ * An instance of {@link ReconNodeManager} is used to fetch detailed
+ * node-specific statistics required for generating the report.
+ */
+@Path("/storageDistribution")
+@Produces("application/json")
+public class StorageDistributionEndpoint {
+  private final ReconNodeManager nodeManager;
+  private final OMDBInsightEndpoint omdbInsightEndpoint;
+  private final NSSummaryEndpoint nsSummaryEndpoint;
+  private final StorageContainerLocationProtocol scmClient;
+  private static Logger log = 
LoggerFactory.getLogger(StorageDistributionEndpoint.class);
+  private Map<DatanodeDetails, Long> blockDeletionMetricsMap = new HashMap<>();
+  private GlobalStatsDao globalStatsDao;
+
+  @Inject
+  public StorageDistributionEndpoint(OzoneStorageContainerManager reconSCM,
+                                     OMDBInsightEndpoint omDbInsightEndpoint,
+                                     NSSummaryEndpoint nsSummaryEndpoint,
+                                     GlobalStatsDao globalStatsDao,
+                                     StorageContainerLocationProtocol 
scmClient) {
+    this.nodeManager = (ReconNodeManager) reconSCM.getScmNodeManager();
+    this.omdbInsightEndpoint = omDbInsightEndpoint;
+    this.nsSummaryEndpoint = nsSummaryEndpoint;
+    this.scmClient = scmClient;
+    this.globalStatsDao = globalStatsDao;
+  }
+
+  @GET
+  public Response getStorageDistribution() {
+    try {
+      initializeBlockDeletionMetricsMap();
+      List<DatanodeStorageReport> nodeStorageReports = 
collectDatanodeReports();
+      GlobalStorageReport globalStorageReport = calculateGlobalStorageReport();
+
+      Map<String, Long> namespaceMetrics = new HashMap<>();
+      try {
+        namespaceMetrics = calculateNamespaceMetrics();
+      } catch (Exception e) {
+        log.error("Error calculating namespace metrics", e);
+        // Initialize with default values
+        namespaceMetrics.put("totalUsedNamespace", 0L);
+        namespaceMetrics.put("totalOpenKeySize", 0L);
+        namespaceMetrics.put("totalCommittedSize", 0L);
+        namespaceMetrics.put("pendingDirectorySize", 0L);
+        namespaceMetrics.put("pendingKeySize", 0L);
+        namespaceMetrics.put("totalKeys", 0L);
+      }
+
+      StorageCapacityDistributionResponse response = 
buildStorageDistributionResponse(
+              nodeStorageReports, globalStorageReport, namespaceMetrics);
+      return Response.ok(response).build();
+    } catch (Exception e) {
+      log.error("Error getting storage distribution", e);
+      return Response.status(Response.Status.INTERNAL_SERVER_ERROR)
+              .entity("Error retrieving storage distribution: " + 
e.getMessage())
+              .build();
+    }
+  }

Review Comment:
   No User-Facing Timeout - 
   Users calling the API have no idea how long it might take or when to expect 
a timeout.
   We can implement something like this if possible.
   
   ```
       try {
           StorageCapacityDistributionResponse response = 
               future.get(60, TimeUnit.SECONDS); // 1 minute API timeout
           return Response.ok(response).build();
       } catch (TimeoutException e) {
           return Response.status(Response.Status.REQUEST_TIMEOUT)
               .entity("Storage distribution collection timed out")
               .build();
       }
   ```



##########
hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestStorageDistributionEndpoint.java:
##########
@@ -0,0 +1,267 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.anyMap;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import javax.ws.rs.core.Response;
+import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionSummary;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.LongMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.DeletionPendingBytesByStage;
+import org.apache.hadoop.ozone.recon.api.types.GlobalNamespaceReport;
+import org.apache.hadoop.ozone.recon.api.types.GlobalStorageReport;
+import 
org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.UsedSpaceBreakDown;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
+import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.junit.jupiter.api.Test;
+
+/**
+ * Test class for StorageDistributionEndpoint, responsible for testing
+ * the behavior and responses of the storage distribution endpoint in
+ * different scenarios, including successful responses and exception cases.
+ */
+class StorageDistributionEndpointTest {
+
+  private ReconNodeManager mockNodeManager;
+  private OMDBInsightEndpoint mockOmdbInsightEndpoint;
+  private NSSummaryEndpoint mockNsSummaryEndpoint;
+  private StorageContainerLocationProtocol mockScmClient;
+  private GlobalStatsDao globalStatsDao;
+  private OzoneStorageContainerManager mockReconScm;
+  private DatanodeInfo datanodeDetails;
+  private SCMNodeStat mockNodeStat;
+  private SCMNodeStat globalStats;
+  private DUResponse mockDuResponse;
+
+  @Test
+  void testGetStorageDistributionSuccessfulResponse() throws IOException {
+    setupMockDependencies();
+    setupSuccessfulScenario();
+    StorageDistributionEndpoint endpoint = new 
StorageDistributionEndpoint(mockReconScm, mockOmdbInsightEndpoint,
+        mockNsSummaryEndpoint, globalStatsDao, mockScmClient);
+    Response response = endpoint.getStorageDistribution();
+
+    assertNotNull(response);
+    assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
+    StorageCapacityDistributionResponse responsePayload = 
(StorageCapacityDistributionResponse) response.getEntity();
+    assertNotNull(responsePayload);
+
+    GlobalStorageReport globalStorage = responsePayload.getGlobalStorage();
+    assertEquals(2000L, globalStorage.getTotalUsedSpace());
+    assertEquals(3000L, globalStorage.getTotalFreeSpace());
+    assertEquals(5000L, globalStorage.getTotalCapacity());
+
+    GlobalNamespaceReport namespaceReport = 
responsePayload.getGlobalNamespace();
+    assertEquals(500L, namespaceReport.getTotalUsedSpace());
+
+    UsedSpaceBreakDown usedSpaceBreakDown = 
responsePayload.getUsedSpaceBreakDown();
+    assertEquals(150L, usedSpaceBreakDown.getOpenKeysBytes());
+    assertEquals(300L, usedSpaceBreakDown.getCommittedBytes());
+
+    DeletionPendingBytesByStage deletionBreakdown = 
usedSpaceBreakDown.getDeletionPendingBytesByStage();
+    assertEquals(50L, 
deletionBreakdown.getByStage().get("OM").get("totalBytes"));
+    assertEquals(75L, 
deletionBreakdown.getByStage().get("SCM").get("pendingBytes"));
+    assertEquals(0L, 
deletionBreakdown.getByStage().get("DN").get("pendingBytes"));
+  }
+
+  @Test
+  void testGetStorageDistributionWithSCMExceptionResponse() throws IOException 
{
+    setupMockDependencies();
+    setupScmExceptionScenario();
+    StorageDistributionEndpoint endpoint = new 
StorageDistributionEndpoint(mockReconScm, mockOmdbInsightEndpoint,
+        mockNsSummaryEndpoint, globalStatsDao, mockScmClient);
+    Response response = endpoint.getStorageDistribution();
+    assertNotNull(response);
+    assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
+    StorageCapacityDistributionResponse responsePayload = 
(StorageCapacityDistributionResponse) response.getEntity();
+    assertNotNull(responsePayload);
+    GlobalStorageReport globalStorage = responsePayload.getGlobalStorage();
+    assertEquals(2000L, globalStorage.getTotalUsedSpace());
+    assertEquals(3000L, globalStorage.getTotalFreeSpace());
+    assertEquals(5000L, globalStorage.getTotalCapacity());
+
+    GlobalNamespaceReport namespaceReport = 
responsePayload.getGlobalNamespace();
+    assertEquals(500L, namespaceReport.getTotalUsedSpace());
+
+    UsedSpaceBreakDown usedSpaceBreakDown = 
responsePayload.getUsedSpaceBreakDown();
+    assertEquals(150L, usedSpaceBreakDown.getOpenKeysBytes());
+    assertEquals(300L, usedSpaceBreakDown.getCommittedBytes());
+
+    DeletionPendingBytesByStage deletionBreakdown = 
usedSpaceBreakDown.getDeletionPendingBytesByStage();
+    assertEquals(50L, 
deletionBreakdown.getByStage().get("OM").get("totalBytes"));
+    assertEquals(0, 
deletionBreakdown.getByStage().get("SCM").get("pendingBytes"));
+  }
+

Review Comment:
   We can always test it out locally by shutting down a datanode to timeout the 
jmx call from recon to DN.
   
   But later on we should add a small test here trying to simulate that 
scenario - 
   
   ```
   @Test
   void testStorageDistributionWithJMXTimeouts() {
       // Test behavior when JMX calls timeout
       // Verify partial results are returned
       // Test timeout configuration
   }
   ```



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/StorageCapacityDistributionResponse.java:
##########
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api.types;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.List;
+
+/**
+ * Represents the response structure for storage capacity distribution in the 
system.
+ * Provides aggregated information about global storage, namespace, space 
usage breakdown,
+ * and individual data node storage reports.

Review Comment:
   Since we are no longer using storage reports do you think we should changes 
the java docs as well?
   



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java:
##########
@@ -49,4 +53,12 @@ public long getRemaining() {
   public long getCommitted() {
     return committed;
   }
+
+  public long getPendingDeletion() {
+    return pendingDeletion;
+  }

Review Comment:
   Btw we are going with metrics approach now?
   Why are we still adding this to the DN storage report ?



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java:
##########
@@ -0,0 +1,328 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import javax.inject.Inject;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Response;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionSummary;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
+import org.apache.hadoop.ozone.recon.api.types.DeletionPendingBytesByStage;
+import org.apache.hadoop.ozone.recon.api.types.GlobalNamespaceReport;
+import org.apache.hadoop.ozone.recon.api.types.GlobalStorageReport;
+import 
org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.UsedSpaceBreakDown;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask;
+import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
+import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This endpoint handles requests related to storage distribution across
+ * different datanodes in a Recon instance. It provides detailed reports
+ * on storage capacity, utilization, and associated metrics.
+ * <p>
+ * The data is aggregated from multiple sources, including node manager
+ * statistics, and is used to construct responses with information
+ * about global storage and namespace usage, storage usage breakdown,
+ * and deletion operations in progress.
+ * <p>
+ * An instance of {@link ReconNodeManager} is used to fetch detailed
+ * node-specific statistics required for generating the report.
+ */
+@Path("/storageDistribution")
+@Produces("application/json")
+public class StorageDistributionEndpoint {
+  private final ReconNodeManager nodeManager;
+  private final OMDBInsightEndpoint omdbInsightEndpoint;
+  private final NSSummaryEndpoint nsSummaryEndpoint;
+  private final StorageContainerLocationProtocol scmClient;
+  private static Logger log = 
LoggerFactory.getLogger(StorageDistributionEndpoint.class);
+  private Map<DatanodeDetails, Long> blockDeletionMetricsMap = new HashMap<>();
+  private GlobalStatsDao globalStatsDao;
+
+  @Inject
+  public StorageDistributionEndpoint(OzoneStorageContainerManager reconSCM,
+                                     OMDBInsightEndpoint omDbInsightEndpoint,
+                                     NSSummaryEndpoint nsSummaryEndpoint,
+                                     GlobalStatsDao globalStatsDao,
+                                     StorageContainerLocationProtocol 
scmClient) {
+    this.nodeManager = (ReconNodeManager) reconSCM.getScmNodeManager();
+    this.omdbInsightEndpoint = omDbInsightEndpoint;
+    this.nsSummaryEndpoint = nsSummaryEndpoint;
+    this.scmClient = scmClient;
+    this.globalStatsDao = globalStatsDao;
+  }
+
+  @GET
+  public Response getStorageDistribution() {
+    try {
+      initializeBlockDeletionMetricsMap();
+      List<DatanodeStorageReport> nodeStorageReports = 
collectDatanodeReports();
+      GlobalStorageReport globalStorageReport = calculateGlobalStorageReport();
+
+      Map<String, Long> namespaceMetrics = new HashMap<>();
+      try {
+        namespaceMetrics = calculateNamespaceMetrics();
+      } catch (Exception e) {
+        log.error("Error calculating namespace metrics", e);
+        // Initialize with default values
+        namespaceMetrics.put("totalUsedNamespace", 0L);
+        namespaceMetrics.put("totalOpenKeySize", 0L);
+        namespaceMetrics.put("totalCommittedSize", 0L);
+        namespaceMetrics.put("pendingDirectorySize", 0L);
+        namespaceMetrics.put("pendingKeySize", 0L);
+        namespaceMetrics.put("totalKeys", 0L);
+      }
+
+      StorageCapacityDistributionResponse response = 
buildStorageDistributionResponse(
+              nodeStorageReports, globalStorageReport, namespaceMetrics);
+      return Response.ok(response).build();
+    } catch (Exception e) {
+      log.error("Error getting storage distribution", e);
+      return Response.status(Response.Status.INTERNAL_SERVER_ERROR)
+              .entity("Error retrieving storage distribution: " + 
e.getMessage())
+              .build();
+    }
+  }
+
+  private GlobalStorageReport calculateGlobalStorageReport() {
+    try {
+      SCMNodeStat stats = nodeManager.getStats();
+      if (stats == null) {
+        log.warn("Node manager stats are null, returning default values");
+        return new GlobalStorageReport(0L, 0L, 0L);
+      }
+
+      long scmUsed = stats.getScmUsed() != null ? stats.getScmUsed().get() : 
0L;
+      long remaining = stats.getRemaining() != null ? 
stats.getRemaining().get() : 0L;
+      long capacity = stats.getCapacity() != null ? stats.getCapacity().get() 
: 0L;
+
+      return new GlobalStorageReport(scmUsed, remaining, capacity);
+    } catch (Exception e) {
+      log.error("Error calculating global storage report", e);
+      return new GlobalStorageReport(0L, 0L, 0L);
+    }
+  }
+
+  private Map<String, Long> calculateNamespaceMetrics() {
+    Map<String, Long> metrics = new HashMap<>();
+    Map<String, Long> totalPendingAtOmSide = calculatePendingSizes();
+    long totalOpenKeySize = calculateOpenKeySizes();
+    long totalCommittedSize = calculateCommittedSize();
+    long pendingDirectorySize = 
totalPendingAtOmSide.getOrDefault("pendingDirectorySize", 0L);
+    long pendingKeySize = totalPendingAtOmSide.getOrDefault("pendingKeySize", 
0L);
+    long totalUsedNamespace = pendingDirectorySize + pendingKeySize + 
totalOpenKeySize + totalCommittedSize;
+
+    long totalKeys = 0L;
+    // Keys from OBJECT_STORE buckets.
+    GlobalStats keyRecord = globalStatsDao.findById(
+            OmTableInsightTask.getTableCountKeyFromTable(KEY_TABLE));
+    // Keys from FILE_SYSTEM_OPTIMIZED buckets
+    GlobalStats fileRecord = globalStatsDao.findById(
+            OmTableInsightTask.getTableCountKeyFromTable(FILE_TABLE));
+    if (keyRecord != null) {
+      totalKeys += keyRecord.getValue();
+    }
+    if (fileRecord != null) {
+      totalKeys += fileRecord.getValue();
+    }
+
+    metrics.put("pendingDirectorySize", pendingDirectorySize);
+    metrics.put("pendingKeySize", pendingKeySize);
+    metrics.put("totalOpenKeySize", totalOpenKeySize);
+    metrics.put("totalCommittedSize", totalCommittedSize);
+    metrics.put("totalUsedNamespace", totalUsedNamespace);
+    metrics.put("totalKeys", totalKeys);
+    return metrics;
+  }
+
+  private StorageCapacityDistributionResponse buildStorageDistributionResponse(
+          List<DatanodeStorageReport> nodeStorageReports,
+          GlobalStorageReport storageMetrics,
+          Map<String, Long> namespaceMetrics) {
+    DeletedBlocksTransactionSummary scmSummary = null;
+    try {
+      scmSummary = scmClient.getDeletedBlockSummary();
+    } catch (IOException e) {
+      log.error("Failed to get deleted block summary from SCM", e);
+    }
+
+    long totalPendingAtDnSide = 0L;
+    try {
+      totalPendingAtDnSide = 
blockDeletionMetricsMap.values().stream().reduce(0L, Long::sum);
+    } catch (Exception e) {
+      log.error("Error calculating pending deletion metrics", e);
+    }
+
+    DeletionPendingBytesByStage deletionPendingBytesByStage =
+            createDeletionPendingBytesByStage(
+                    namespaceMetrics.getOrDefault("pendingDirectorySize", 0L),
+                    namespaceMetrics.getOrDefault("pendingKeySize", 0L),
+                    scmSummary != null ? 
scmSummary.getTotalBlockReplicatedSize() : 0L,
+                    totalPendingAtDnSide);
+
+    // Safely get values from namespaceMetrics with null checks
+    Long totalUsedNamespace = namespaceMetrics.get("totalUsedNamespace");
+    Long totalOpenKeySize = namespaceMetrics.get("totalOpenKeySize");
+    Long totalCommittedSize = namespaceMetrics.get("totalCommittedSize");
+    Long totalKeys = namespaceMetrics.get("totalKeys");
+    Long totalContainerPreAllocated = nodeStorageReports.stream()
+        .map(report -> report.getCommitted())
+        .reduce(0L, Long::sum);
+
+    return StorageCapacityDistributionResponse.newBuilder()
+            .setDataNodeUsage(nodeStorageReports)
+            .setGlobalStorage(storageMetrics)
+            .setGlobalNamespace(new GlobalNamespaceReport(
+                    totalUsedNamespace != null ? totalUsedNamespace : 0L,
+                    totalKeys != null ? totalKeys : 0L))
+            .setUsedSpaceBreakDown(new UsedSpaceBreakDown(
+                    totalOpenKeySize != null ? totalOpenKeySize : 0L,
+                    totalCommittedSize != null ? totalCommittedSize : 0L,
+                    totalContainerPreAllocated != null ? 
totalContainerPreAllocated : 0L,
+                    deletionPendingBytesByStage))
+            .build();
+  }
+
+  private List<DatanodeStorageReport> collectDatanodeReports() {
+    return nodeManager.getAllNodes().stream()
+        .map(this::getStorageReport)
+        .filter(report -> report != null) // Filter out null reports
+        .collect(Collectors.toList());
+  }
+
+  private Map<String, Long> calculatePendingSizes() {
+    Map<String, Long> result = new HashMap<>();
+    Map<String, Long> pendingDeletedDirSizes = new HashMap<>();
+    
omdbInsightEndpoint.calculateTotalPendingDeletedDirSizes(pendingDeletedDirSizes);
+    Map<String, Long> pendingKeySize = new HashMap<>();
+    omdbInsightEndpoint.createKeysSummaryForDeletedKey(pendingKeySize);
+    result.put("pendingDirectorySize", 
pendingDeletedDirSizes.getOrDefault("totalReplicatedDataSize", 0L));
+    result.put("pendingKeySize", 
pendingKeySize.getOrDefault("totalReplicatedDataSize", 0L));
+    return result;
+  }
+
+  private long calculateOpenKeySizes() {
+    Map<String, Long> openKeySummary = new HashMap<>();
+    omdbInsightEndpoint.createKeysSummaryForOpenKey(openKeySummary);
+    Map<String, Long> openKeyMPUSummary = new HashMap<>();
+    omdbInsightEndpoint.createKeysSummaryForOpenMPUKey(openKeyMPUSummary);
+    long openKeyDataSize = 
openKeySummary.getOrDefault("totalReplicatedDataSize", 0L);
+    long totalMPUKeySize = 
openKeyMPUSummary.getOrDefault("totalReplicatedDataSize", 0L);
+    return openKeyDataSize + totalMPUKeySize;
+  }
+
+  private long calculateCommittedSize() {
+    try {
+      Response rootResponse = nsSummaryEndpoint.getDiskUsage("/", false, true, 
false);
+      if (rootResponse.getStatus() != Response.Status.OK.getStatusCode()) {
+        log.warn("Failed to get disk usage, status: {}", 
rootResponse.getStatus());
+        return 0L;
+      }
+      DUResponse duRootRes = (DUResponse) rootResponse.getEntity();
+      return duRootRes != null ? duRootRes.getSizeWithReplica() : 0L;
+    } catch (IOException e) {
+      log.error("IOException while calculating committed size", e);
+      return 0L;
+    }
+  }
+
+  private DeletionPendingBytesByStage createDeletionPendingBytesByStage(long 
pendingDirectorySize,
+                                                                        long 
pendingKeySize,
+                                                                        long 
totalPendingAtScmSide,
+                                                                        long 
totalPendingAtDnSide) {
+    long totalPending = pendingDirectorySize + pendingKeySize + 
totalPendingAtScmSide + totalPendingAtDnSide;
+    Map<String, Map<String, Long>> stageItems = new HashMap<>();
+    Map<String, Long> omMap = new HashMap<>();
+    omMap.put("totalBytes", pendingDirectorySize + pendingKeySize);
+    omMap.put("pendingDirectoryBytes", pendingDirectorySize);
+    omMap.put("pendingKeyBytes", pendingKeySize);
+    Map<String, Long> scmMap = new HashMap<>();
+    scmMap.put("pendingBytes", totalPendingAtScmSide);
+    Map<String, Long> dnMap = new HashMap<>();
+    dnMap.put("pendingBytes", totalPendingAtDnSide);
+    stageItems.put("OM", omMap);
+    stageItems.put("SCM", scmMap);
+    stageItems.put("DN", dnMap);
+    return new DeletionPendingBytesByStage(totalPending, stageItems);
+  }
+
+  private void initializeBlockDeletionMetricsMap() {
+    nodeManager.getNodeStats().keySet().forEach(nodeId -> {
+      try {
+        long dnPending = ReconUtils.getMetricsFromDatanode(nodeId,
+                "HddsDatanode",
+                "BlockDeletingService",
+                "TotalPendingBlockBytes");
+        blockDeletionMetricsMap.put(nodeId, dnPending);

Review Comment:
   Sequential HTTP calls with 5-second timeouts can cause 5×N seconds delay for 
N datanodes.
   Do you think a parallel execution would be useful?



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java:
##########
@@ -0,0 +1,328 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import javax.inject.Inject;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Response;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionSummary;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
+import org.apache.hadoop.ozone.recon.api.types.DeletionPendingBytesByStage;
+import org.apache.hadoop.ozone.recon.api.types.GlobalNamespaceReport;
+import org.apache.hadoop.ozone.recon.api.types.GlobalStorageReport;
+import 
org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.UsedSpaceBreakDown;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask;
+import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
+import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This endpoint handles requests related to storage distribution across
+ * different datanodes in a Recon instance. It provides detailed reports
+ * on storage capacity, utilization, and associated metrics.
+ * <p>
+ * The data is aggregated from multiple sources, including node manager
+ * statistics, and is used to construct responses with information
+ * about global storage and namespace usage, storage usage breakdown,
+ * and deletion operations in progress.
+ * <p>
+ * An instance of {@link ReconNodeManager} is used to fetch detailed
+ * node-specific statistics required for generating the report.
+ */
+@Path("/storageDistribution")
+@Produces("application/json")
+public class StorageDistributionEndpoint {

Review Comment:
   Make this an @AdminOnly endpoint so it cannot be accessed un-securely. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to