devmadhuu commented on code in PR #8995:
URL: https://github.com/apache/ozone/pull/8995#discussion_r2332026974
##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java:
##########
@@ -875,4 +886,105 @@ public static String
constructObjectPathWithPrefix(long... ids) {
}
return pathBuilder.toString();
}
+
+ private static HttpURLConnection makeHttpGetCall(String urlString) throws
IOException {
+ Objects.requireNonNull(urlString, "urlString");
+ URL url = new URL(urlString);
+ final HttpURLConnection conn = openURLConnection(url);
+ conn.setRequestMethod("GET");
+ conn.setConnectTimeout(HTTP_TIMEOUT_MS);
+ conn.setReadTimeout(HTTP_TIMEOUT_MS);
+ conn.setRequestProperty("Accept", "application/json");
+ return conn;
+ }
+
+ private static HttpURLConnection openURLConnection(URL url) throws
IOException {
+ final String protocol = url.getProtocol().toLowerCase(Locale.ROOT);
+ switch (protocol) {
+ case "https":
+ return (HttpsURLConnection) url.openConnection();
+ case "http":
+ return (HttpURLConnection) url.openConnection();
+ default:
+ throw new IOException("Unsupported protocol: " + protocol + " for URL: "
+ url);
+ }
+ }
+
+ public static long getMetricsFromDatanode(DatanodeDetails datanode, String
service, String name, String keyName)
+ throws IOException {
+ // Construct metrics URL for DataNode JMX endpoint
+ String metricsUrl =
String.format("http://%s:%d/jmx?qry=Hadoop:service=%s,name=%s",
+ datanode.getIpAddress(),
+ datanode.getPort(DatanodeDetails.Port.Name.HTTP).getValue(),
+ service,
+ name);
+
+ HttpURLConnection conn = makeHttpGetCall(metricsUrl);
+ try {
+ String jsonResponse = getResponseData(conn);
+ return parseMetrics(jsonResponse, name, keyName);
+ } finally {
+ try {
+ conn.disconnect();
+ } catch (Exception ignored) {
+ // no-op
+ }
+ }
+ }
+
+ private static String getResponseData(HttpURLConnection conn) throws
IOException {
+ int code = conn.getResponseCode();
+ // 2xx: read normal body
+ if (code >= 200 && code < 300) {
+ return readStream(conn.getInputStream());
+ }
+ String err = null;
+ try {
+ if (conn.getErrorStream() != null) {
+ err = readStream(conn.getErrorStream());
+ }
+ } catch (IOException ignored) {
+ // ignore read errors on error stream
+ }
+ log.warn("HTTP {} from {}. Error body: {}", code, conn.getURL(), err);
+ return "";
+ }
+
+ /** Small utility to read an entire stream as a UTF-8 String. */
+ private static String readStream(java.io.InputStream in) throws IOException {
+ StringBuilder sb = new StringBuilder();
+ try (BufferedReader br = new BufferedReader(new InputStreamReader(in,
StandardCharsets.UTF_8))) {
+ String line;
+ while ((line = br.readLine()) != null) {
+ sb.append(line).append('\n');
+ }
+ }
+ return sb.toString();
+ }
+
+ private static long parseMetrics(String jsonResponse, String serviceName,
String keyName) {
+ if (jsonResponse == null || jsonResponse.isEmpty()) {
Review Comment:
In case of errors, and then handling of empty responses make this return a 0
value which is a valid value. How are we differentiating error and valid value
? Shouldn't we make it as -1 ?
##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java:
##########
@@ -875,4 +886,105 @@ public static String
constructObjectPathWithPrefix(long... ids) {
}
return pathBuilder.toString();
}
+
+ private static HttpURLConnection makeHttpGetCall(String urlString) throws
IOException {
+ Objects.requireNonNull(urlString, "urlString");
+ URL url = new URL(urlString);
+ final HttpURLConnection conn = openURLConnection(url);
+ conn.setRequestMethod("GET");
+ conn.setConnectTimeout(HTTP_TIMEOUT_MS);
+ conn.setReadTimeout(HTTP_TIMEOUT_MS);
+ conn.setRequestProperty("Accept", "application/json");
+ return conn;
+ }
+
+ private static HttpURLConnection openURLConnection(URL url) throws
IOException {
+ final String protocol = url.getProtocol().toLowerCase(Locale.ROOT);
+ switch (protocol) {
+ case "https":
+ return (HttpsURLConnection) url.openConnection();
+ case "http":
+ return (HttpURLConnection) url.openConnection();
+ default:
+ throw new IOException("Unsupported protocol: " + protocol + " for URL: "
+ url);
+ }
+ }
+
+ public static long getMetricsFromDatanode(DatanodeDetails datanode, String
service, String name, String keyName)
+ throws IOException {
+ // Construct metrics URL for DataNode JMX endpoint
+ String metricsUrl =
String.format("http://%s:%d/jmx?qry=Hadoop:service=%s,name=%s",
+ datanode.getIpAddress(),
+ datanode.getPort(DatanodeDetails.Port.Name.HTTP).getValue(),
+ service,
+ name);
+
+ HttpURLConnection conn = makeHttpGetCall(metricsUrl);
+ try {
+ String jsonResponse = getResponseData(conn);
+ return parseMetrics(jsonResponse, name, keyName);
+ } finally {
+ try {
+ conn.disconnect();
+ } catch (Exception ignored) {
+ // no-op
+ }
+ }
+ }
+
+ private static String getResponseData(HttpURLConnection conn) throws
IOException {
+ int code = conn.getResponseCode();
+ // 2xx: read normal body
+ if (code >= 200 && code < 300) {
+ return readStream(conn.getInputStream());
+ }
+ String err = null;
+ try {
+ if (conn.getErrorStream() != null) {
+ err = readStream(conn.getErrorStream());
+ }
+ } catch (IOException ignored) {
+ // ignore read errors on error stream
Review Comment:
Logging the error should be done.
##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java:
##########
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import javax.inject.Inject;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Response;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionSummary;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
+import org.apache.hadoop.ozone.recon.api.types.DeletionPendingBytesByStage;
+import org.apache.hadoop.ozone.recon.api.types.GlobalNamespaceReport;
+import org.apache.hadoop.ozone.recon.api.types.GlobalStorageReport;
+import
org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.UsedSpaceBreakDown;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask;
+import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
+import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This endpoint handles requests related to storage distribution across
+ * different datanodes in a Recon instance. It provides detailed reports
+ * on storage capacity, utilization, and associated metrics.
+ * <p>
+ * The data is aggregated from multiple sources, including node manager
+ * statistics, and is used to construct responses with information
+ * about global storage and namespace usage, storage usage breakdown,
+ * and deletion operations in progress.
+ * <p>
+ * An instance of {@link ReconNodeManager} is used to fetch detailed
+ * node-specific statistics required for generating the report.
+ */
+@Path("/storageDistribution")
+@Produces("application/json")
+public class StorageDistributionEndpoint {
+ private final ReconNodeManager nodeManager;
+ private final OMDBInsightEndpoint omdbInsightEndpoint;
+ private final NSSummaryEndpoint nsSummaryEndpoint;
+ private final StorageContainerLocationProtocol scmClient;
+ private static Logger log =
LoggerFactory.getLogger(StorageDistributionEndpoint.class);
+ private Map<DatanodeDetails, Long> blockDeletionMetricsMap = new HashMap<>();
+ private GlobalStatsDao globalStatsDao;
+
+ @Inject
+ public StorageDistributionEndpoint(OzoneStorageContainerManager reconSCM,
+ OMDBInsightEndpoint omDbInsightEndpoint,
+ NSSummaryEndpoint nsSummaryEndpoint,
+ GlobalStatsDao globalStatsDao,
+ StorageContainerLocationProtocol
scmClient) {
+ this.nodeManager = (ReconNodeManager) reconSCM.getScmNodeManager();
+ this.omdbInsightEndpoint = omDbInsightEndpoint;
+ this.nsSummaryEndpoint = nsSummaryEndpoint;
+ this.scmClient = scmClient;
+ this.globalStatsDao = globalStatsDao;
+ }
+
+ @GET
+ public Response getStorageDistribution() {
+ try {
+ initializeBlockDeletionMetricsMap();
+ List<DatanodeStorageReport> nodeStorageReports =
collectDatanodeReports();
+ GlobalStorageReport globalStorageReport = calculateGlobalStorageReport();
+
+ Map<String, Long> namespaceMetrics = new HashMap<>();
+ try {
+ namespaceMetrics = calculateNamespaceMetrics();
+ } catch (Exception e) {
+ log.error("Error calculating namespace metrics", e);
+ // Initialize with default values
+ namespaceMetrics.put("totalUsedNamespace", 0L);
+ namespaceMetrics.put("totalOpenKeySize", 0L);
+ namespaceMetrics.put("totalCommittedSize", 0L);
+ namespaceMetrics.put("pendingDirectorySize", 0L);
+ namespaceMetrics.put("pendingKeySize", 0L);
+ namespaceMetrics.put("totalKeys", 0L);
+ }
+
+ StorageCapacityDistributionResponse response =
buildStorageDistributionResponse(
+ nodeStorageReports, globalStorageReport, namespaceMetrics);
+ return Response.ok(response).build();
+ } catch (Exception e) {
+ log.error("Error getting storage distribution", e);
+ return Response.status(Response.Status.INTERNAL_SERVER_ERROR)
+ .entity("Error retrieving storage distribution: " +
e.getMessage())
+ .build();
+ }
+ }
+
+ private GlobalStorageReport calculateGlobalStorageReport() {
+ try {
+ SCMNodeStat stats = nodeManager.getStats();
+ if (stats == null) {
+ log.warn("Node manager stats are null, returning default values");
+ return new GlobalStorageReport(0L, 0L, 0L);
+ }
+
+ long scmUsed = stats.getScmUsed() != null ? stats.getScmUsed().get() :
0L;
+ long remaining = stats.getRemaining() != null ?
stats.getRemaining().get() : 0L;
+ long capacity = stats.getCapacity() != null ? stats.getCapacity().get()
: 0L;
+
+ return new GlobalStorageReport(scmUsed, remaining, capacity);
+ } catch (Exception e) {
+ log.error("Error calculating global storage report", e);
+ return new GlobalStorageReport(0L, 0L, 0L);
+ }
+ }
+
+ private Map<String, Long> calculateNamespaceMetrics() {
+ Map<String, Long> metrics = new HashMap<>();
+ Map<String, Long> totalPendingAtOmSide = calculatePendingSizes();
+ long totalOpenKeySize = calculateOpenKeySizes();
+ long totalCommittedSize = calculateCommittedSize();
+ long pendingDirectorySize =
totalPendingAtOmSide.getOrDefault("pendingDirectorySize", 0L);
+ long pendingKeySize = totalPendingAtOmSide.getOrDefault("pendingKeySize",
0L);
+ long totalUsedNamespace = pendingDirectorySize + pendingKeySize +
totalOpenKeySize + totalCommittedSize;
+
+ long totalKeys = 0L;
+ // Keys from OBJECT_STORE buckets.
+ GlobalStats keyRecord = globalStatsDao.findById(
+ OmTableInsightTask.getTableCountKeyFromTable(KEY_TABLE));
+ // Keys from FILE_SYSTEM_OPTIMIZED buckets
+ GlobalStats fileRecord = globalStatsDao.findById(
+ OmTableInsightTask.getTableCountKeyFromTable(FILE_TABLE));
+ if (keyRecord != null) {
+ totalKeys += keyRecord.getValue();
+ }
+ if (fileRecord != null) {
+ totalKeys += fileRecord.getValue();
+ }
+
+ metrics.put("pendingDirectorySize", pendingDirectorySize);
+ metrics.put("pendingKeySize", pendingKeySize);
+ metrics.put("totalOpenKeySize", totalOpenKeySize);
+ metrics.put("totalCommittedSize", totalCommittedSize);
+ metrics.put("totalUsedNamespace", totalUsedNamespace);
+ metrics.put("totalKeys", totalKeys);
+ return metrics;
+ }
+
+ private StorageCapacityDistributionResponse buildStorageDistributionResponse(
+ List<DatanodeStorageReport> nodeStorageReports,
+ GlobalStorageReport storageMetrics,
+ Map<String, Long> namespaceMetrics) {
+ DeletedBlocksTransactionSummary scmSummary = null;
+ try {
+ scmSummary = scmClient.getDeletedBlockSummary();
+ } catch (IOException e) {
+ log.warn("Failed to get deleted block summary from SCM", e);
+ }
+
+ long totalPendingAtDnSide = 0L;
+ try {
+ totalPendingAtDnSide =
blockDeletionMetricsMap.values().stream().reduce(0L, Long::sum);
+ } catch (Exception e) {
+ log.warn("Error calculating pending deletion metrics", e);
+ }
+
+ DeletionPendingBytesByStage deletionPendingBytesByStage =
+ createDeletionPendingBytesByStage(
+ namespaceMetrics.getOrDefault("pendingDirectorySize", 0L),
+ namespaceMetrics.getOrDefault("pendingKeySize", 0L),
+ scmSummary != null ?
scmSummary.getTotalBlockReplicatedSize() : 0L,
+ totalPendingAtDnSide);
+
+ // Safely get values from namespaceMetrics with null checks
+ Long totalUsedNamespace = namespaceMetrics.get("totalUsedNamespace");
+ Long totalOpenKeySize = namespaceMetrics.get("totalOpenKeySize");
+ Long totalCommittedSize = namespaceMetrics.get("totalCommittedSize");
+ Long totalKeys = namespaceMetrics.get("totalKeys");
+
+ return StorageCapacityDistributionResponse.newBuilder()
+ .setDataNodeUsage(nodeStorageReports)
+ .setGlobalStorage(storageMetrics)
+ .setGlobalNamespace(new GlobalNamespaceReport(
+ totalUsedNamespace != null ? totalUsedNamespace : 0L,
+ totalKeys != null ? totalKeys : 0L))
+ .setUsedSpaceBreakDown(new UsedSpaceBreakDown(
+ totalOpenKeySize != null ? totalOpenKeySize : 0L,
+ totalCommittedSize != null ? totalCommittedSize : 0L,
+ deletionPendingBytesByStage))
+ .build();
+ }
+
+ private List<DatanodeStorageReport> collectDatanodeReports() {
+ return nodeManager.getAllNodes().stream()
+ .map(this::getStorageReport)
+ .filter(report -> report != null) // Filter out null reports
+ .collect(Collectors.toList());
+ }
+
+ private Map<String, Long> calculatePendingSizes() {
+ Map<String, Long> result = new HashMap<>();
+ Map<String, Long> pendingDeletedDirSizes = new HashMap<>();
+
omdbInsightEndpoint.calculateTotalPendingDeletedDirSizes(pendingDeletedDirSizes);
+ Map<String, Long> pendingKeySize = new HashMap<>();
+ omdbInsightEndpoint.createKeysSummaryForDeletedKey(pendingKeySize);
+ result.put("pendingDirectorySize",
pendingDeletedDirSizes.getOrDefault("totalReplicatedDataSize", 0L));
+ result.put("pendingKeySize",
pendingKeySize.getOrDefault("totalReplicatedDataSize", 0L));
+ return result;
+ }
+
+ private long calculateOpenKeySizes() {
+ Map<String, Long> openKeySummary = new HashMap<>();
+ omdbInsightEndpoint.createKeysSummaryForOpenKey(openKeySummary);
+ Map<String, Long> openKeyMPUSummary = new HashMap<>();
+ omdbInsightEndpoint.createKeysSummaryForOpenMPUKey(openKeyMPUSummary);
+ long openKeyDataSize =
openKeySummary.getOrDefault("totalReplicatedDataSize", 0L);
+ long totalMPUKeySize =
openKeyMPUSummary.getOrDefault("totalReplicatedDataSize", 0L);
+ return openKeyDataSize + totalMPUKeySize;
+ }
+
+ private long calculateCommittedSize() {
+ try {
+ Response rootResponse = nsSummaryEndpoint.getDiskUsage("/", false, true,
false);
+ if (rootResponse.getStatus() != Response.Status.OK.getStatusCode()) {
+ log.warn("Failed to get disk usage, status: {}",
rootResponse.getStatus());
+ return 0L;
+ }
+ DUResponse duRootRes = (DUResponse) rootResponse.getEntity();
+ return duRootRes != null ? duRootRes.getSizeWithReplica() : 0L;
+ } catch (IOException e) {
+ log.error("IOException while calculating committed size", e);
+ return 0L;
+ }
+ }
+
+ private DeletionPendingBytesByStage createDeletionPendingBytesByStage(long
pendingDirectorySize,
+ long
pendingKeySize,
+ long
totalPendingAtScmSide,
+ long
totalPendingAtDnSide) {
+ long totalPending = pendingDirectorySize + pendingKeySize +
totalPendingAtScmSide + totalPendingAtDnSide;
+ Map<String, Map<String, Long>> stageItems = new HashMap<>();
+ Map<String, Long> omMap = new HashMap<>();
+ omMap.put("totalBytes", pendingDirectorySize + pendingKeySize);
+ omMap.put("pendingDirectoryBytes", pendingDirectorySize);
+ omMap.put("pendingKeyBytes", pendingKeySize);
+ Map<String, Long> scmMap = new HashMap<>();
+ scmMap.put("pendingBytes", totalPendingAtScmSide);
+ Map<String, Long> dnMap = new HashMap<>();
+ dnMap.put("pendingBytes", totalPendingAtDnSide);
+ stageItems.put("OM", omMap);
+ stageItems.put("SCM", scmMap);
+ stageItems.put("DN", dnMap);
+ return new DeletionPendingBytesByStage(totalPending, stageItems);
+ }
+
+ private void initializeBlockDeletionMetricsMap() {
+ nodeManager.getNodeStats().keySet().forEach(nodeId -> {
+ try {
+ long dnPending = ReconUtils.getMetricsFromDatanode(nodeId,
+ "HddsDatanode",
+ "BlockDeletingService",
+ "TotalPendingBlockBytes");
+ blockDeletionMetricsMap.put(nodeId, dnPending);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
Review Comment:
We need not to catch the exception , in fact we can remove try catch as you
are already handling it in caller. Here we are simply catching and then
throwing back. It is redundant.
##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java:
##########
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import javax.inject.Inject;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Response;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionSummary;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
+import org.apache.hadoop.ozone.recon.api.types.DeletionPendingBytesByStage;
+import org.apache.hadoop.ozone.recon.api.types.GlobalNamespaceReport;
+import org.apache.hadoop.ozone.recon.api.types.GlobalStorageReport;
+import
org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.UsedSpaceBreakDown;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask;
+import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
+import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This endpoint handles requests related to storage distribution across
+ * different datanodes in a Recon instance. It provides detailed reports
+ * on storage capacity, utilization, and associated metrics.
+ * <p>
+ * The data is aggregated from multiple sources, including node manager
+ * statistics, and is used to construct responses with information
+ * about global storage and namespace usage, storage usage breakdown,
+ * and deletion operations in progress.
+ * <p>
+ * An instance of {@link ReconNodeManager} is used to fetch detailed
+ * node-specific statistics required for generating the report.
+ */
+@Path("/storageDistribution")
+@Produces("application/json")
+public class StorageDistributionEndpoint {
+ private final ReconNodeManager nodeManager;
+ private final OMDBInsightEndpoint omdbInsightEndpoint;
+ private final NSSummaryEndpoint nsSummaryEndpoint;
+ private final StorageContainerLocationProtocol scmClient;
+ private static Logger log =
LoggerFactory.getLogger(StorageDistributionEndpoint.class);
+ private Map<DatanodeDetails, Long> blockDeletionMetricsMap = new HashMap<>();
+ private GlobalStatsDao globalStatsDao;
+
+ @Inject
+ public StorageDistributionEndpoint(OzoneStorageContainerManager reconSCM,
+ OMDBInsightEndpoint omDbInsightEndpoint,
+ NSSummaryEndpoint nsSummaryEndpoint,
+ GlobalStatsDao globalStatsDao,
+ StorageContainerLocationProtocol
scmClient) {
+ this.nodeManager = (ReconNodeManager) reconSCM.getScmNodeManager();
+ this.omdbInsightEndpoint = omDbInsightEndpoint;
+ this.nsSummaryEndpoint = nsSummaryEndpoint;
+ this.scmClient = scmClient;
+ this.globalStatsDao = globalStatsDao;
+ }
+
+ @GET
+ public Response getStorageDistribution() {
+ try {
+ initializeBlockDeletionMetricsMap();
+ List<DatanodeStorageReport> nodeStorageReports =
collectDatanodeReports();
+ GlobalStorageReport globalStorageReport = calculateGlobalStorageReport();
+
+ Map<String, Long> namespaceMetrics = new HashMap<>();
+ try {
+ namespaceMetrics = calculateNamespaceMetrics();
+ } catch (Exception e) {
+ log.error("Error calculating namespace metrics", e);
+ // Initialize with default values
+ namespaceMetrics.put("totalUsedNamespace", 0L);
+ namespaceMetrics.put("totalOpenKeySize", 0L);
+ namespaceMetrics.put("totalCommittedSize", 0L);
+ namespaceMetrics.put("pendingDirectorySize", 0L);
+ namespaceMetrics.put("pendingKeySize", 0L);
+ namespaceMetrics.put("totalKeys", 0L);
+ }
+
+ StorageCapacityDistributionResponse response =
buildStorageDistributionResponse(
+ nodeStorageReports, globalStorageReport, namespaceMetrics);
+ return Response.ok(response).build();
+ } catch (Exception e) {
+ log.error("Error getting storage distribution", e);
+ return Response.status(Response.Status.INTERNAL_SERVER_ERROR)
+ .entity("Error retrieving storage distribution: " +
e.getMessage())
+ .build();
+ }
+ }
+
+ private GlobalStorageReport calculateGlobalStorageReport() {
+ try {
+ SCMNodeStat stats = nodeManager.getStats();
+ if (stats == null) {
+ log.warn("Node manager stats are null, returning default values");
+ return new GlobalStorageReport(0L, 0L, 0L);
+ }
+
+ long scmUsed = stats.getScmUsed() != null ? stats.getScmUsed().get() :
0L;
+ long remaining = stats.getRemaining() != null ?
stats.getRemaining().get() : 0L;
+ long capacity = stats.getCapacity() != null ? stats.getCapacity().get()
: 0L;
+
+ return new GlobalStorageReport(scmUsed, remaining, capacity);
+ } catch (Exception e) {
+ log.error("Error calculating global storage report", e);
+ return new GlobalStorageReport(0L, 0L, 0L);
+ }
+ }
+
+ private Map<String, Long> calculateNamespaceMetrics() {
+ Map<String, Long> metrics = new HashMap<>();
+ Map<String, Long> totalPendingAtOmSide = calculatePendingSizes();
+ long totalOpenKeySize = calculateOpenKeySizes();
+ long totalCommittedSize = calculateCommittedSize();
+ long pendingDirectorySize =
totalPendingAtOmSide.getOrDefault("pendingDirectorySize", 0L);
+ long pendingKeySize = totalPendingAtOmSide.getOrDefault("pendingKeySize",
0L);
+ long totalUsedNamespace = pendingDirectorySize + pendingKeySize +
totalOpenKeySize + totalCommittedSize;
+
+ long totalKeys = 0L;
+ // Keys from OBJECT_STORE buckets.
+ GlobalStats keyRecord = globalStatsDao.findById(
+ OmTableInsightTask.getTableCountKeyFromTable(KEY_TABLE));
+ // Keys from FILE_SYSTEM_OPTIMIZED buckets
+ GlobalStats fileRecord = globalStatsDao.findById(
+ OmTableInsightTask.getTableCountKeyFromTable(FILE_TABLE));
+ if (keyRecord != null) {
+ totalKeys += keyRecord.getValue();
+ }
+ if (fileRecord != null) {
+ totalKeys += fileRecord.getValue();
+ }
+
+ metrics.put("pendingDirectorySize", pendingDirectorySize);
+ metrics.put("pendingKeySize", pendingKeySize);
+ metrics.put("totalOpenKeySize", totalOpenKeySize);
+ metrics.put("totalCommittedSize", totalCommittedSize);
+ metrics.put("totalUsedNamespace", totalUsedNamespace);
+ metrics.put("totalKeys", totalKeys);
+ return metrics;
+ }
+
+ private StorageCapacityDistributionResponse buildStorageDistributionResponse(
+ List<DatanodeStorageReport> nodeStorageReports,
+ GlobalStorageReport storageMetrics,
+ Map<String, Long> namespaceMetrics) {
+ DeletedBlocksTransactionSummary scmSummary = null;
+ try {
+ scmSummary = scmClient.getDeletedBlockSummary();
+ } catch (IOException e) {
+ log.warn("Failed to get deleted block summary from SCM", e);
Review Comment:
Make it log.error
##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java:
##########
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import javax.inject.Inject;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Response;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionSummary;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
+import org.apache.hadoop.ozone.recon.api.types.DeletionPendingBytesByStage;
+import org.apache.hadoop.ozone.recon.api.types.GlobalNamespaceReport;
+import org.apache.hadoop.ozone.recon.api.types.GlobalStorageReport;
+import
org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.UsedSpaceBreakDown;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask;
+import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao;
+import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This endpoint handles requests related to storage distribution across
+ * different datanodes in a Recon instance. It provides detailed reports
+ * on storage capacity, utilization, and associated metrics.
+ * <p>
+ * The data is aggregated from multiple sources, including node manager
+ * statistics, and is used to construct responses with information
+ * about global storage and namespace usage, storage usage breakdown,
+ * and deletion operations in progress.
+ * <p>
+ * An instance of {@link ReconNodeManager} is used to fetch detailed
+ * node-specific statistics required for generating the report.
+ */
+@Path("/storageDistribution")
+@Produces("application/json")
+public class StorageDistributionEndpoint {
+ private final ReconNodeManager nodeManager;
+ private final OMDBInsightEndpoint omdbInsightEndpoint;
+ private final NSSummaryEndpoint nsSummaryEndpoint;
+ private final StorageContainerLocationProtocol scmClient;
+ private static Logger log =
LoggerFactory.getLogger(StorageDistributionEndpoint.class);
+ private Map<DatanodeDetails, Long> blockDeletionMetricsMap = new HashMap<>();
+ private GlobalStatsDao globalStatsDao;
+
+ @Inject
+ public StorageDistributionEndpoint(OzoneStorageContainerManager reconSCM,
+ OMDBInsightEndpoint omDbInsightEndpoint,
+ NSSummaryEndpoint nsSummaryEndpoint,
+ GlobalStatsDao globalStatsDao,
+ StorageContainerLocationProtocol
scmClient) {
+ this.nodeManager = (ReconNodeManager) reconSCM.getScmNodeManager();
+ this.omdbInsightEndpoint = omDbInsightEndpoint;
+ this.nsSummaryEndpoint = nsSummaryEndpoint;
+ this.scmClient = scmClient;
+ this.globalStatsDao = globalStatsDao;
+ }
+
+ @GET
+ public Response getStorageDistribution() {
+ try {
+ initializeBlockDeletionMetricsMap();
+ List<DatanodeStorageReport> nodeStorageReports =
collectDatanodeReports();
+ GlobalStorageReport globalStorageReport = calculateGlobalStorageReport();
+
+ Map<String, Long> namespaceMetrics = new HashMap<>();
+ try {
+ namespaceMetrics = calculateNamespaceMetrics();
+ } catch (Exception e) {
+ log.error("Error calculating namespace metrics", e);
+ // Initialize with default values
+ namespaceMetrics.put("totalUsedNamespace", 0L);
+ namespaceMetrics.put("totalOpenKeySize", 0L);
+ namespaceMetrics.put("totalCommittedSize", 0L);
+ namespaceMetrics.put("pendingDirectorySize", 0L);
+ namespaceMetrics.put("pendingKeySize", 0L);
+ namespaceMetrics.put("totalKeys", 0L);
+ }
+
+ StorageCapacityDistributionResponse response =
buildStorageDistributionResponse(
+ nodeStorageReports, globalStorageReport, namespaceMetrics);
+ return Response.ok(response).build();
+ } catch (Exception e) {
+ log.error("Error getting storage distribution", e);
+ return Response.status(Response.Status.INTERNAL_SERVER_ERROR)
+ .entity("Error retrieving storage distribution: " +
e.getMessage())
+ .build();
+ }
+ }
+
+ private GlobalStorageReport calculateGlobalStorageReport() {
+ try {
+ SCMNodeStat stats = nodeManager.getStats();
+ if (stats == null) {
+ log.warn("Node manager stats are null, returning default values");
+ return new GlobalStorageReport(0L, 0L, 0L);
+ }
+
+ long scmUsed = stats.getScmUsed() != null ? stats.getScmUsed().get() :
0L;
+ long remaining = stats.getRemaining() != null ?
stats.getRemaining().get() : 0L;
+ long capacity = stats.getCapacity() != null ? stats.getCapacity().get()
: 0L;
+
+ return new GlobalStorageReport(scmUsed, remaining, capacity);
+ } catch (Exception e) {
+ log.error("Error calculating global storage report", e);
+ return new GlobalStorageReport(0L, 0L, 0L);
+ }
+ }
+
+ private Map<String, Long> calculateNamespaceMetrics() {
+ Map<String, Long> metrics = new HashMap<>();
+ Map<String, Long> totalPendingAtOmSide = calculatePendingSizes();
+ long totalOpenKeySize = calculateOpenKeySizes();
+ long totalCommittedSize = calculateCommittedSize();
+ long pendingDirectorySize =
totalPendingAtOmSide.getOrDefault("pendingDirectorySize", 0L);
+ long pendingKeySize = totalPendingAtOmSide.getOrDefault("pendingKeySize",
0L);
+ long totalUsedNamespace = pendingDirectorySize + pendingKeySize +
totalOpenKeySize + totalCommittedSize;
+
+ long totalKeys = 0L;
+ // Keys from OBJECT_STORE buckets.
+ GlobalStats keyRecord = globalStatsDao.findById(
+ OmTableInsightTask.getTableCountKeyFromTable(KEY_TABLE));
+ // Keys from FILE_SYSTEM_OPTIMIZED buckets
+ GlobalStats fileRecord = globalStatsDao.findById(
+ OmTableInsightTask.getTableCountKeyFromTable(FILE_TABLE));
+ if (keyRecord != null) {
+ totalKeys += keyRecord.getValue();
+ }
+ if (fileRecord != null) {
+ totalKeys += fileRecord.getValue();
+ }
+
+ metrics.put("pendingDirectorySize", pendingDirectorySize);
+ metrics.put("pendingKeySize", pendingKeySize);
+ metrics.put("totalOpenKeySize", totalOpenKeySize);
+ metrics.put("totalCommittedSize", totalCommittedSize);
+ metrics.put("totalUsedNamespace", totalUsedNamespace);
+ metrics.put("totalKeys", totalKeys);
+ return metrics;
+ }
+
+ private StorageCapacityDistributionResponse buildStorageDistributionResponse(
+ List<DatanodeStorageReport> nodeStorageReports,
+ GlobalStorageReport storageMetrics,
+ Map<String, Long> namespaceMetrics) {
+ DeletedBlocksTransactionSummary scmSummary = null;
+ try {
+ scmSummary = scmClient.getDeletedBlockSummary();
+ } catch (IOException e) {
+ log.warn("Failed to get deleted block summary from SCM", e);
+ }
+
+ long totalPendingAtDnSide = 0L;
+ try {
+ totalPendingAtDnSide =
blockDeletionMetricsMap.values().stream().reduce(0L, Long::sum);
+ } catch (Exception e) {
+ log.warn("Error calculating pending deletion metrics", e);
Review Comment:
Make it log.error
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]