Copilot commented on code in PR #12563:
URL: https://github.com/apache/cloudstack/pull/12563#discussion_r2841482124


##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java:
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import com.cloud.host.HostVO;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NASFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.ExportPolicy;
+import org.apache.cloudstack.storage.feign.model.ExportRule;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class UnifiedNASStrategy extends NASStrategy {
+
+    private static final Logger s_logger = 
LogManager.getLogger(UnifiedNASStrategy.class);
+    private final FeignClientFactory feignClientFactory;
+    private final NASFeignClient nasFeignClient;
+    private final VolumeFeignClient volumeFeignClient;
+    private final JobFeignClient jobFeignClient;
+    @Inject private VolumeDao volumeDao;
+    @Inject private EndPointSelector epSelector;
+    @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+
+    public UnifiedNASStrategy(OntapStorage ontapStorage) {
+        super(ontapStorage);
+        String baseURL = Constants.HTTPS + ontapStorage.getManagementLIF();
+        this.feignClientFactory = new FeignClientFactory();
+        this.nasFeignClient = 
feignClientFactory.createClient(NASFeignClient.class, baseURL);
+        this.volumeFeignClient = 
feignClientFactory.createClient(VolumeFeignClient.class,baseURL );
+        this.jobFeignClient = 
feignClientFactory.createClient(JobFeignClient.class, baseURL );
+    }
+
+    public void setOntapStorage(OntapStorage ontapStorage) {
+        this.storage = ontapStorage;
+    }
+
+    @Override
+    public CloudStackVolume createCloudStackVolume(CloudStackVolume 
cloudstackVolume) {
+        return null;
+    }
+
+    @Override
+    CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) 
{
+        return null;
+    }
+
+    @Override
+    public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {
+    }
+
+    @Override
+    public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) {
+
+    }
+
+    @Override
+    public CloudStackVolume getCloudStackVolume(Map<String, String> 
cloudStackVolumeMap) {
+        return null;
+    }
+
+    @Override
+    public AccessGroup createAccessGroup(AccessGroup accessGroup) {
+        s_logger.info("createAccessGroup: Create access group {}: " , 
accessGroup);
+        Map<String, String> details = 
accessGroup.getPrimaryDataStoreInfo().getDetails();
+        String svmName = details.get(Constants.SVM_NAME);
+        String volumeUUID = details.get(Constants.VOLUME_UUID);
+        String volumeName = details.get(Constants.VOLUME_NAME);
+
+        ExportPolicy policyRequest = 
createExportPolicyRequest(accessGroup,svmName,volumeName);
+        try {
+            ExportPolicy createdPolicy = createExportPolicy(svmName, 
policyRequest);
+            s_logger.info("ExportPolicy created: {}, now attaching this policy 
to storage pool volume", createdPolicy.getName());
+            assignExportPolicyToVolume(volumeUUID,createdPolicy.getName());
+            
storagePoolDetailsDao.addDetail(accessGroup.getPrimaryDataStoreInfo().getId(), 
Constants.EXPORT_POLICY_ID, String.valueOf(createdPolicy.getId()), true);
+            
storagePoolDetailsDao.addDetail(accessGroup.getPrimaryDataStoreInfo().getId(), 
Constants.EXPORT_POLICY_NAME, createdPolicy.getName(), true);
+            s_logger.info("Successfully assigned exportPolicy {} to volume 
{}", policyRequest.getName(), volumeName);
+            accessGroup.setPolicy(policyRequest);
+            return accessGroup;
+        }catch(Exception e){
+            s_logger.error("Exception occurred while creating access group: " 
+  e);
+            throw new CloudRuntimeException("Failed to create access group: " 
+ e);
+        }
+    }
+
+    @Override
+    public void deleteAccessGroup(AccessGroup accessGroup) {
+        s_logger.info("deleteAccessGroup: Deleting export policy");
+
+        if (accessGroup == null) {
+            throw new CloudRuntimeException("deleteAccessGroup: Invalid 
accessGroup object - accessGroup is null");
+        }
+
+        PrimaryDataStoreInfo primaryDataStoreInfo = 
accessGroup.getPrimaryDataStoreInfo();
+        if (primaryDataStoreInfo == null) {
+            throw new CloudRuntimeException("deleteAccessGroup: 
PrimaryDataStoreInfo is null in accessGroup");
+        }
+        s_logger.info("deleteAccessGroup: Deleting export policy for the 
storage pool {}", primaryDataStoreInfo.getName());
+        try {
+            String authHeader = 
Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+            String svmName = storage.getSvmName();
+            String exportPolicyName = 
primaryDataStoreInfo.getDetails().get(Constants.EXPORT_POLICY_NAME);
+            String exportPolicyId = 
primaryDataStoreInfo.getDetails().get(Constants.EXPORT_POLICY_ID);
+
+            try {
+                
nasFeignClient.deleteExportPolicyById(authHeader,exportPolicyId);
+                s_logger.info("deleteAccessGroup: Successfully deleted export 
policy '{}'", exportPolicyName);

Review Comment:
   `exportPolicyId` is read from `primaryDataStoreInfo.getDetails()` and used 
directly in `deleteExportPolicyById` without validating it. If the detail is 
missing/empty (or details weren’t loaded), this will result in an invalid ONTAP 
call and a hard failure. Please validate `exportPolicyId` (and/or fall back to 
lookup by `exportPolicyName`) before attempting deletion, and handle the 
"already deleted" case gracefully.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java:
##########
@@ -0,0 +1,527 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.lifecycle;
+
+
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.google.common.base.Preconditions;
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import 
org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+
+public class OntapPrimaryDatastoreLifecycle extends 
BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
+    @Inject private ClusterDao _clusterDao;
+    @Inject private StorageManager _storageMgr;
+    @Inject private ResourceManager _resourceMgr;
+    @Inject private PrimaryDataStoreHelper _dataStoreHelper;
+    @Inject private PrimaryDataStoreDetailsDao _datastoreDetailsDao;
+    @Inject private StoragePoolAutomation _storagePoolAutomation;
+    @Inject private PrimaryDataStoreDao storagePoolDao;
+    @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+    private static final Logger s_logger = 
LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
+
+    private static final long ONTAP_MIN_VOLUME_SIZE = 1677721600L;
+
+    @Override
+    public DataStore initialize(Map<String, Object> dsInfos) {
+        if (dsInfos == null) {
+            throw new CloudRuntimeException("Datastore info map is null, 
cannot create primary storage");
+        }
+        String url = (String) dsInfos.get("url");
+        Long zoneId = (Long) dsInfos.get("zoneId");
+        Long podId = (Long) dsInfos.get("podId");
+        Long clusterId = (Long) dsInfos.get("clusterId");
+        String storagePoolName = (String) dsInfos.get("name");
+        String providerName = (String) dsInfos.get("providerName");
+        Long capacityBytes = (Long) dsInfos.get("capacityBytes");
+        boolean managed = (boolean) dsInfos.get("managed");
+        String tags = (String) dsInfos.get("tags");
+        Boolean isTagARule = (Boolean) dsInfos.get("isTagARule");
+
+        s_logger.info("Creating ONTAP primary storage pool with name: " + 
storagePoolName + ", provider: " + providerName +
+                ", zoneId: " + zoneId + ", podId: " + podId + ", clusterId: " 
+ clusterId);
+        s_logger.debug("Received capacityBytes from UI: " + capacityBytes);
+
+        @SuppressWarnings("unchecked")
+        Map<String, String> details = (Map<String, String>) 
dsInfos.get("details");
+
+        if (capacityBytes == null || capacityBytes <= 0) {
+            s_logger.warn("capacityBytes not provided or invalid (" + 
capacityBytes + "), using ONTAP minimum size: " + ONTAP_MIN_VOLUME_SIZE);
+            capacityBytes = ONTAP_MIN_VOLUME_SIZE;
+        } else if (capacityBytes < ONTAP_MIN_VOLUME_SIZE) {
+            s_logger.warn("capacityBytes (" + capacityBytes + ") is below 
ONTAP minimum (" + ONTAP_MIN_VOLUME_SIZE + "), adjusting to minimum");
+            capacityBytes = ONTAP_MIN_VOLUME_SIZE;
+        }
+
+        if (podId == null ^ clusterId == null) {
+            throw new CloudRuntimeException("Cluster Id or Pod Id is null, 
cannot create primary storage");
+        }
+
+        if (podId == null && clusterId == null) {
+            if (zoneId != null) {
+                s_logger.info("Both Pod Id and Cluster Id are null, Primary 
storage pool will be associated with a Zone");
+            } else {
+                throw new CloudRuntimeException("Pod Id, Cluster Id and Zone 
Id are all null, cannot create primary storage");
+            }
+        }
+
+        if (storagePoolName == null || storagePoolName.isEmpty()) {
+            throw new CloudRuntimeException("Storage pool name is null or 
empty, cannot create primary storage");
+        }
+
+        if (providerName == null || providerName.isEmpty()) {
+            throw new CloudRuntimeException("Provider name is null or empty, 
cannot create primary storage");
+        }
+
+        PrimaryDataStoreParameters parameters = new 
PrimaryDataStoreParameters();
+        if (clusterId != null) {
+            ClusterVO clusterVO = _clusterDao.findById(clusterId);
+            Preconditions.checkNotNull(clusterVO, "Unable to locate the 
specified cluster");
+            if (clusterVO.getHypervisorType() != 
Hypervisor.HypervisorType.KVM) {
+                throw new CloudRuntimeException("ONTAP primary storage is 
supported only for KVM hypervisor");
+            }
+            parameters.setHypervisorType(clusterVO.getHypervisorType());
+        }
+
+        s_logger.debug("ONTAP primary storage will be created as " + (managed 
? "managed" : "unmanaged"));
+        if (!managed) {
+            throw new CloudRuntimeException("ONTAP primary storage must be 
managed");
+        }
+
+        Set<String> requiredKeys = Set.of(
+                Constants.USERNAME,
+                Constants.PASSWORD,
+                Constants.SVM_NAME,
+                Constants.PROTOCOL,
+                Constants.MANAGEMENT_LIF
+        );
+
+        Set<String> optionalKeys = Set.of(
+                Constants.IS_DISAGGREGATED
+        );
+
+        Set<String> allowedKeys = new java.util.HashSet<>(requiredKeys);
+        allowedKeys.addAll(optionalKeys);
+
+        if (url != null && !url.isEmpty()) {
+            for (String segment : url.split(Constants.SEMICOLON)) {
+                if (segment.isEmpty()) {
+                    continue;
+                }
+                String[] kv = segment.split(Constants.EQUALS, 2);
+                if (kv.length == 2) {
+                    details.put(kv[0].trim(), kv[1].trim());
+                }
+            }
+        }
+
+        for (Map.Entry<String, String> e : details.entrySet()) {
+            String key = e.getKey();
+            String val = e.getValue();
+            if (!allowedKeys.contains(key)) {
+                throw new CloudRuntimeException("Unexpected ONTAP detail key 
in URL: " + key);
+            }
+            if (val == null || val.isEmpty()) {
+                throw new CloudRuntimeException("ONTAP primary storage 
creation failed, empty detail: " + key);
+            }
+        }
+
+        Set<String> providedKeys = new java.util.HashSet<>(details.keySet());
+        if (!providedKeys.containsAll(requiredKeys)) {
+            Set<String> missing = new java.util.HashSet<>(requiredKeys);
+            missing.removeAll(providedKeys);
+            throw new CloudRuntimeException("ONTAP primary storage creation 
failed, missing detail(s): " + missing);
+        }
+
+        details.put(Constants.SIZE, capacityBytes.toString());
+
+        details.putIfAbsent(Constants.IS_DISAGGREGATED, "false");
+
+        ProtocolType protocol = 
ProtocolType.valueOf(details.get(Constants.PROTOCOL));
+
+        long volumeSize = Long.parseLong(details.get(Constants.SIZE));
+        OntapStorage ontapStorage = new OntapStorage(
+                details.get(Constants.USERNAME),
+                details.get(Constants.PASSWORD),
+                details.get(Constants.MANAGEMENT_LIF),
+                details.get(Constants.SVM_NAME),
+                volumeSize,
+                protocol,
+                
Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED).toLowerCase()));
+
+        StorageStrategy storageStrategy = 
StorageProviderFactory.getStrategy(ontapStorage);
+        boolean isValid = storageStrategy.connect();
+        if (isValid) {
+            String dataLIF = storageStrategy.getNetworkInterface();
+            if (dataLIF == null || dataLIF.isEmpty()) {
+                throw new CloudRuntimeException("Failed to retrieve Data LIF 
from ONTAP, cannot create primary storage");
+            }
+            s_logger.info("Using Data LIF for storage access: " + dataLIF);
+            details.put(Constants.DATA_LIF, dataLIF);
+            s_logger.info("Creating ONTAP volume '" + storagePoolName + "' 
with size: " + volumeSize + " bytes (" +
+                    (volumeSize / (1024 * 1024 * 1024)) + " GB)");
+            try {
+                Volume volume = 
storageStrategy.createStorageVolume(storagePoolName, volumeSize);
+                if (volume == null) {
+                    s_logger.error("createStorageVolume returned null for 
volume: " + storagePoolName);
+                    throw new CloudRuntimeException("Failed to create ONTAP 
volume: " + storagePoolName);
+                }
+                s_logger.info("Volume object retrieved successfully. UUID: " + 
volume.getUuid() + ", Name: " + volume.getName());
+                details.putIfAbsent(Constants.VOLUME_UUID, volume.getUuid());
+                details.putIfAbsent(Constants.VOLUME_NAME, volume.getName());
+            } catch (Exception e) {
+                s_logger.error("Exception occurred while creating ONTAP 
volume: " + storagePoolName, e);
+                throw new CloudRuntimeException("Failed to create ONTAP 
volume: " + storagePoolName + ". Error: " + e.getMessage(), e);
+            }
+        } else {
+            throw new CloudRuntimeException("ONTAP details validation failed, 
cannot create primary storage");
+        }
+
+        String path;
+        int port;
+        switch (protocol) {
+            case NFS3:
+                parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
+                path = Constants.SLASH + storagePoolName;
+                port = Constants.NFS3_PORT;
+                s_logger.info("Setting NFS path for storage pool: " + path + 
", port: " + port);
+                break;
+            case ISCSI:
+                parameters.setType(Storage.StoragePoolType.Iscsi);
+                path = storageStrategy.getStoragePath();
+                port = Constants.ISCSI_PORT;
+                s_logger.info("Setting iSCSI path for storage pool: " + path + 
", port: " + port);
+                break;
+            default:
+                throw new CloudRuntimeException("Unsupported protocol: " + 
protocol + ", cannot create primary storage");
+        }
+
+        parameters.setHost(details.get(Constants.DATA_LIF));
+        parameters.setPort(port);
+        parameters.setPath(path);
+        parameters.setTags(tags);
+        parameters.setIsTagARule(isTagARule);
+        parameters.setDetails(details);
+        parameters.setUuid(UUID.randomUUID().toString());
+        parameters.setZoneId(zoneId);
+        parameters.setPodId(podId);
+        parameters.setClusterId(clusterId);
+        parameters.setName(storagePoolName);
+        parameters.setProviderName(providerName);
+        parameters.setManaged(managed);
+        parameters.setCapacityBytes(capacityBytes);
+        parameters.setUsedBytes(0);
+
+        return _dataStoreHelper.createPrimaryDataStore(parameters);
+    }
+
+    @Override
+    public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
+        logger.debug("In attachCluster for ONTAP primary storage");
+        if (dataStore == null) {
+            throw new InvalidParameterValueException("attachCluster: dataStore 
should not be null");
+        }
+        if (scope == null) {
+            throw new InvalidParameterValueException("attachCluster: scope 
should not be null");
+        }
+        List<String> hostsIdentifier = new ArrayList<>();
+        StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+        if (storagePool == null) {
+            s_logger.error("attachCluster : Storage Pool not found for id: " + 
dataStore.getId());
+            throw new CloudRuntimeException("attachCluster : Storage Pool not 
found for id: " + dataStore.getId());
+        }
+        PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
+        List<HostVO> hostsToConnect = 
_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore);
+        logger.debug("attachCluster: Eligible Up and Enabled hosts: {} in 
cluster {}", hostsToConnect, primaryStore.getClusterId());
+
+        Map<String, String> details = 
storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId());
+        StorageStrategy strategy = 
Utility.getStrategyByStoragePoolDetails(details);
+
+        ProtocolType protocol = 
ProtocolType.valueOf(details.get(Constants.PROTOCOL));
+        if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, 
protocol, hostsIdentifier)) {
+            String errMsg = "attachCluster: Not all hosts in the cluster 
support the protocol: " + protocol.name();
+            s_logger.error(errMsg);
+            throw new CloudRuntimeException(errMsg);
+        }
+
+        logger.debug("attachCluster: Attaching the pool to each of the host in 
the cluster: {}", primaryStore.getClusterId());
+        if (hostsIdentifier != null && hostsIdentifier.size() > 0) {
+            try {
+                AccessGroup accessGroupRequest = new AccessGroup();
+                accessGroupRequest.setHostsToConnect(hostsToConnect);
+                accessGroupRequest.setScope(scope);
+                primaryStore.setDetails(details);
+                accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
+                strategy.createAccessGroup(accessGroupRequest);
+            } catch (Exception e) {
+                s_logger.error("attachCluster: Failed to create access group 
on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: 
" + e.getMessage());
+                throw new CloudRuntimeException("attachCluster: Failed to 
create access group on storage system for cluster: " + 
primaryStore.getClusterId() + ". Exception: " + e.getMessage());
+            }
+        }
+        logger.debug("attachCluster: Attaching the pool to each of the host in 
the cluster: {}", primaryStore.getClusterId());
+        for (HostVO host : hostsToConnect) {
+            try {
+                _storageMgr.connectHostToSharedPool(host, dataStore.getId());
+            } catch (Exception e) {
+                logger.warn("attachCluster: Unable to establish a connection 
between " + host + " and " + dataStore, e);
+                return false;
+            }
+        }
+        _dataStoreHelper.attachCluster(dataStore);
+        return true;
+    }
+
+    @Override
+    public boolean attachHost(DataStore store, HostScope scope, 
StoragePoolInfo existingInfo) {
+        return false;
+    }
+
+    @Override
+    public boolean attachZone(DataStore dataStore, ZoneScope scope, 
Hypervisor.HypervisorType hypervisorType) {
+        logger.debug("In attachZone for ONTAP primary storage");
+        if (dataStore == null) {
+            throw new InvalidParameterValueException("attachZone: dataStore 
should not be null");
+        }
+        if (scope == null) {
+            throw new InvalidParameterValueException("attachZone: scope should 
not be null");
+        }
+        List<String> hostsIdentifier = new ArrayList<>();
+        StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+        if (storagePool == null) {
+            s_logger.error("attachZone : Storage Pool not found for id: " + 
dataStore.getId());
+            throw new CloudRuntimeException("attachZone : Storage Pool not 
found for id: " + dataStore.getId());
+        }
+
+        PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
+        List<HostVO> hostsToConnect = 
_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, 
scope.getScopeId(), Hypervisor.HypervisorType.KVM);
+        logger.debug(String.format("In createPool. Attaching the pool to each 
of the hosts in %s.", hostsToConnect));
+
+        Map<String, String> details = 
storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId());
+        StorageStrategy strategy = 
Utility.getStrategyByStoragePoolDetails(details);
+
+        logger.debug("attachZone: Eligible Up and Enabled hosts: {}", 
hostsToConnect);
+        ProtocolType protocol = 
ProtocolType.valueOf(details.get(Constants.PROTOCOL));
+        if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, 
protocol, hostsIdentifier)) {
+            String errMsg = "attachZone: Not all hosts in the zone support the 
protocol: " + protocol.name();
+            s_logger.error(errMsg);
+            throw new CloudRuntimeException(errMsg);
+        }
+        if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) {
+            try {
+                AccessGroup accessGroupRequest = new AccessGroup();
+                accessGroupRequest.setHostsToConnect(hostsToConnect);
+                accessGroupRequest.setScope(scope);
+                primaryStore.setDetails(details);
+                accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
+                strategy.createAccessGroup(accessGroupRequest);
+            } catch (Exception e) {
+                s_logger.error("attachZone: Failed to create access group on 
storage system for zone with Exception: " + e.getMessage());
+                throw new CloudRuntimeException("attachZone: Failed to create 
access group on storage system for zone with Exception: " + e.getMessage());
+            }
+        }
+        for (HostVO host : hostsToConnect) {
+            try {
+                _storageMgr.connectHostToSharedPool(host, dataStore.getId());
+            } catch (Exception e) {
+                logger.warn("Unable to establish a connection between " + host 
+ " and " + dataStore, e);
+                return  false;
+            }
+        }
+        _dataStoreHelper.attachZone(dataStore);
+        return true;
+    }
+
+    private boolean 
validateProtocolSupportAndFetchHostsIdentifier(List<HostVO> hosts, ProtocolType 
protocolType, List<String> hostIdentifiers) {
+        switch (protocolType) {
+            case ISCSI:
+                String protocolPrefix = Constants.IQN;
+                for (HostVO host : hosts) {
+                    if (host == null || host.getStorageUrl() == null || 
host.getStorageUrl().trim().isEmpty()
+                            || 
!host.getStorageUrl().startsWith(protocolPrefix)) {
+                        return false;
+                    }
+                    hostIdentifiers.add(host.getStorageUrl());
+                }
+                break;
+            case NFS3:
+                String ip = "";
+                for (HostVO host : hosts) {
+                    if (host != null) {
+                        ip =  host.getStorageIpAddress() != null ? 
host.getStorageIpAddress().trim() : "";
+                        if (ip.isEmpty() && host.getPrivateIpAddress() != null 
|| host.getPrivateIpAddress().trim().isEmpty()) {
+                            return false;
+                        } else {
+                            ip = ip.isEmpty() ? 
host.getPrivateIpAddress().trim() : ip;
+                        }

Review Comment:
   In the NFS3 branch, the condition `if (ip.isEmpty() && 
host.getPrivateIpAddress() != null || 
host.getPrivateIpAddress().trim().isEmpty())` has incorrect operator precedence 
and can throw an NPE when `getPrivateIpAddress()` is null. It also returns 
`false` when `ip` is empty but `privateIpAddress` is present (the opposite of 
what you want). Please rewrite this logic with proper null/empty checks and 
parentheses so it correctly falls back to `privateIpAddress` when 
`storageIpAddress` is missing.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java:
##########
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import feign.RequestInterceptor;
+import feign.Retryer;
+import feign.Client;
+import feign.httpclient.ApacheHttpClient;
+import feign.codec.Decoder;
+import feign.codec.Encoder;
+import feign.Response;
+import feign.codec.DecodeException;
+import feign.codec.EncodeException;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import org.apache.http.conn.ConnectionKeepAliveStrategy;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.conn.ssl.TrustAllStrategy;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.ssl.SSLContexts;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.net.ssl.SSLContext;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Type;
+import java.nio.charset.StandardCharsets;
+import java.util.concurrent.TimeUnit;
+
+public class FeignConfiguration {
+    private static final Logger logger = 
LogManager.getLogger(FeignConfiguration.class);
+
+    private final int retryMaxAttempt = 3;
+    private final int retryMaxInterval = 5;
+    private final String ontapFeignMaxConnection = "80";
+    private final String ontapFeignMaxConnectionPerRoute = "20";
+    private final ObjectMapper objectMapper;
+
+    public FeignConfiguration() {
+        this.objectMapper = new ObjectMapper();
+        
this.objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, 
false);
+    }
+
+    public Client createClient() {
+        int maxConn;
+        int maxConnPerRoute;
+        try {
+            maxConn = Integer.parseInt(this.ontapFeignMaxConnection);
+        } catch (Exception e) {
+            logger.error("ontapFeignClient: parse max connection failed, using 
default");
+            maxConn = 20;
+        }
+        try {
+            maxConnPerRoute = 
Integer.parseInt(this.ontapFeignMaxConnectionPerRoute);
+        } catch (Exception e) {
+            logger.error("ontapFeignClient: parse max connection per route 
failed, using default");
+            maxConnPerRoute = 2;
+        }
+        logger.debug("ontapFeignClient: maxConn={}, maxConnPerRoute={}", 
maxConn, maxConnPerRoute);
+        ConnectionKeepAliveStrategy keepAliveStrategy = (response, context) -> 
0;
+        CloseableHttpClient httpClient = HttpClientBuilder.create()
+                .setMaxConnTotal(maxConn)
+                .setMaxConnPerRoute(maxConnPerRoute)
+                .setKeepAliveStrategy(keepAliveStrategy)
+                .setSSLSocketFactory(getSSLSocketFactory())
+                .setConnectionTimeToLive(60, TimeUnit.SECONDS)
+                .build();
+        return new ApacheHttpClient(httpClient);
+    }
+
+    private SSLConnectionSocketFactory getSSLSocketFactory() {
+        try {
+            SSLContext sslContext = 
SSLContexts.custom().loadTrustMaterial(null, new TrustAllStrategy()).build();
+            return new SSLConnectionSocketFactory(sslContext, new 
NoopHostnameVerifier());
+        } catch (Exception ex) {
+            throw new RuntimeException(ex);
+        }
+    }
+
+    public RequestInterceptor createRequestInterceptor() {
+        return template -> {
+            logger.info("Feign Request URL: {}", template.url());
+            logger.info("HTTP Method: {}", template.method());
+            logger.info("Headers: {}", template.headers());
+            if (template.body() != null) {
+                logger.info("Body: {}", new String(template.body(), 
StandardCharsets.UTF_8));
+            }
+        };

Review Comment:
   The request interceptor logs full headers and request bodies at INFO. This 
will include the `Authorization` header (Basic auth) and may log sensitive 
payload fields, leaking credentials into logs. Please redact sensitive headers 
(at least `Authorization`) and consider lowering to DEBUG/TRACE or making 
request logging opt-in.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java:
##########
@@ -0,0 +1,309 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.SANFeignClient;
+import org.apache.cloudstack.storage.feign.model.Igroup;
+import org.apache.cloudstack.storage.feign.model.Initiator;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class UnifiedSANStrategy extends SANStrategy {
+
+    private static final Logger s_logger = 
LogManager.getLogger(UnifiedSANStrategy.class);
+    private final FeignClientFactory feignClientFactory;
+    private final SANFeignClient sanFeignClient;
+
+    public UnifiedSANStrategy(OntapStorage ontapStorage) {
+        super(ontapStorage);
+        String baseURL = Constants.HTTPS + ontapStorage.getManagementLIF();
+        this.feignClientFactory = new FeignClientFactory();
+        this.sanFeignClient = 
feignClientFactory.createClient(SANFeignClient.class, baseURL);
+    }
+
+    public void setOntapStorage(OntapStorage ontapStorage) {
+        this.storage = ontapStorage;
+    }
+
+    @Override
+    public CloudStackVolume createCloudStackVolume(CloudStackVolume 
cloudstackVolume) {
+        return null;
+    }
+
+    @Override
+    CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) 
{
+        return null;
+    }
+
+    @Override
+    public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {}
+
+    @Override
+    public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) {}
+
+    @Override
+    public CloudStackVolume getCloudStackVolume(Map<String, String> values) {
+        return null;
+    }
+
+    @Override
+    public AccessGroup createAccessGroup(AccessGroup accessGroup) {
+        s_logger.info("createAccessGroup : Create Igroup");
+        String igroupName = "unknown";
+        s_logger.debug("createAccessGroup : Creating Igroup with access group 
request {} ", accessGroup);
+        if (accessGroup == null) {
+            s_logger.error("createAccessGroup: Igroup creation failed. Invalid 
request: {}", accessGroup);
+            throw new CloudRuntimeException("createAccessGroup : Failed to 
create Igroup, invalid request");
+        }
+        try {
+            if (accessGroup.getPrimaryDataStoreInfo() == null || 
accessGroup.getPrimaryDataStoreInfo().getDetails() == null
+                    || 
accessGroup.getPrimaryDataStoreInfo().getDetails().isEmpty()) {
+                throw new CloudRuntimeException("createAccessGroup : Failed to 
create Igroup, invalid datastore details in the request");
+            }
+            Map<String, String> dataStoreDetails = 
accessGroup.getPrimaryDataStoreInfo().getDetails();
+            s_logger.debug("createAccessGroup: Successfully fetched datastore 
details.");
+
+            String authHeader = 
Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+
+            Igroup igroupRequest = new Igroup();
+            List<String> hostsIdentifier = new ArrayList<>();
+            String svmName = dataStoreDetails.get(Constants.SVM_NAME);
+            igroupName = Utility.getIgroupName(svmName, 
accessGroup.getScope().getScopeType(), accessGroup.getScope().getScopeId());
+            Hypervisor.HypervisorType hypervisorType = 
accessGroup.getPrimaryDataStoreInfo().getHypervisor();
+
+            ProtocolType protocol = 
ProtocolType.valueOf(dataStoreDetails.get(Constants.PROTOCOL));
+            if (accessGroup.getHostsToConnect() == null || 
accessGroup.getHostsToConnect().isEmpty()) {
+                throw new CloudRuntimeException("createAccessGroup : Failed to 
create Igroup, no hosts to connect provided in the request");
+            }
+            if 
(!validateProtocolSupportAndFetchHostsIdentifier(accessGroup.getHostsToConnect(),
 protocol, hostsIdentifier)) {
+                String errMsg = "createAccessGroup: Not all hosts in the " +  
accessGroup.getScope().getScopeType().toString()  + " support the protocol: " + 
protocol.name();
+                throw new CloudRuntimeException(errMsg);
+            }
+
+            if (svmName != null && !svmName.isEmpty()) {
+                Svm svm = new Svm();
+                svm.setName(svmName);
+                igroupRequest.setSvm(svm);
+            }
+
+            if (igroupName != null && !igroupName.isEmpty()) {
+                igroupRequest.setName(igroupName);
+            }
+
+            igroupRequest.setOsType(Igroup.OsTypeEnum.linux);
+
+            if (hostsIdentifier != null && hostsIdentifier.size() > 0) {
+                List<Initiator> initiators = new ArrayList<>();
+                for (String hostIdentifier : hostsIdentifier) {
+                    Initiator initiator = new Initiator();
+                    initiator.setName(hostIdentifier);
+                    initiators.add(initiator);
+                }
+                igroupRequest.setInitiators(initiators);
+            }
+            igroupRequest.setProtocol(Igroup.ProtocolEnum.valueOf("iscsi"));
+            s_logger.debug("createAccessGroup: About to call 
sanFeignClient.createIgroup with igroupName: {}", igroupName);
+            AccessGroup createdAccessGroup = new AccessGroup();
+            OntapResponse<Igroup> createdIgroup = null;
+            try {
+                createdIgroup = sanFeignClient.createIgroup(authHeader, true, 
igroupRequest);
+            } catch (Exception feignEx) {
+                String errMsg = feignEx.getMessage();
+                if (errMsg != null && errMsg.contains(("5374023"))) {
+                    s_logger.warn("createAccessGroup: Igroup with name {} 
already exists. Fetching existing Igroup.", igroupName);
+                    return createdAccessGroup;
+                }

Review Comment:
   When ONTAP returns error code 5374023 (iGroup already exists), the code logs 
that it will fetch the existing iGroup but then returns an empty `AccessGroup` 
without setting the iGroup. This can hide real issues and makes the return 
value misleading. Either fetch and set the existing iGroup (e.g., via 
`getIgroupResponse`) or clearly document/return null and let callers handle the 
"already exists" case.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java:
##########
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.client;
+
+import feign.QueryMap;
+import org.apache.cloudstack.storage.feign.model.Igroup;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
+import org.apache.cloudstack.storage.feign.model.Lun;
+import org.apache.cloudstack.storage.feign.model.LunMap;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import feign.Headers;
+import feign.Param;
+import feign.RequestLine;
+import java.util.Map;
+
+public interface SANFeignClient {
+    // iSCSI Service APIs
+    @RequestLine("GET /api/protocols/san/iscsi/services")
+    @Headers({"Authorization: {authHeader}"})
+    OntapResponse<IscsiService> getIscsiServices(@Param("authHeader") String 
authHeader, @QueryMap Map<String, Object> queryMap);
+
+    // LUN Operation APIs
+    @RequestLine("POST /api/storage/luns?return_records={returnRecords}")
+    @Headers({"Authorization: {authHeader}"})
+    OntapResponse<Lun> createLun(@Param("authHeader") String authHeader, 
@Param("returnRecords") boolean returnRecords, Lun lun);
+
+    @RequestLine("GET /api/storage/luns")
+    @Headers({"Authorization: {authHeader}"})
+    OntapResponse<Lun> getLunResponse(@Param("authHeader") String authHeader, 
@QueryMap Map<String, Object> queryMap);
+
+    @RequestLine("GET /{uuid}")
+    @Headers({"Authorization: {authHeader}"})
+    Lun getLunByUUID(@Param("authHeader") String authHeader, @Param("uuid") 
String uuid);
+
+    @RequestLine("PATCH /{uuid}")
+    @Headers({"Authorization: {authHeader}"})
+    void updateLun(@Param("authHeader") String authHeader, @Param("uuid") 
String uuid, Lun lun);
+
+    @RequestLine("DELETE /api/storage/luns/{uuid}")
+    @Headers({"Authorization: {authHeader}"})
+    void deleteLun(@Param("authHeader") String authHeader, @Param("uuid") 
String uuid, @QueryMap Map<String, Object> queryMap);
+
+    // iGroup Operation APIs
+    @RequestLine("POST 
/api/protocols/san/igroups?return_records={returnRecords}")
+    @Headers({"Authorization: {authHeader}"})
+    OntapResponse<Igroup> createIgroup(@Param("authHeader") String authHeader, 
@Param("returnRecords") boolean returnRecords, Igroup igroupRequest);
+
+    @RequestLine("GET /api/protocols/san/igroups")
+    @Headers({"Authorization: {authHeader}"})
+    OntapResponse<Igroup> getIgroupResponse(@Param("authHeader") String 
authHeader, @QueryMap Map<String, Object> queryMap);
+
+    @RequestLine("GET /{uuid}")
+    @Headers({"Authorization: {authHeader}"})
+    Igroup getIgroupByUUID(@Param("authHeader") String authHeader, 
@Param("uuid") String uuid);

Review Comment:
   `getIgroupByUUID` uses `@RequestLine("GET /{uuid}")`, which looks incorrect 
for ONTAP (it will hit the server root). Use the correct iGroup endpoint path 
(e.g., `/api/protocols/san/igroups/{uuid}`) to avoid calling the wrong URL.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java:
##########
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.listener;
+
+import javax.inject.Inject;
+
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.agent.api.ModifyStoragePoolAnswer;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.alert.AlertManager;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
+import com.cloud.host.Host;
+import com.cloud.storage.StoragePool;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import 
org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import com.cloud.host.dao.HostDao;
+
+public class OntapHostListener implements HypervisorHostListener {
+    protected Logger logger = LogManager.getLogger(getClass());
+
+    @Inject
+    private AgentManager _agentMgr;
+    @Inject
+    private AlertManager _alertMgr;
+    @Inject
+    private PrimaryDataStoreDao _storagePoolDao;
+    @Inject
+    private HostDao _hostDao;
+    @Inject private StoragePoolHostDao storagePoolHostDao;
+
+
+    @Override
+    public boolean hostConnect(long hostId, long poolId)  {
+        logger.info("Connect to host " + hostId + " from pool " + poolId);
+        Host host = _hostDao.findById(hostId);
+        if (host == null) {
+            logger.error("host was not found with id : {}", hostId);
+            return false;
+        }
+
+        StoragePool pool = _storagePoolDao.findById(poolId);
+        if (pool == null) {
+            logger.error("Failed to connect host - storage pool not found with 
id: {}", poolId);
+            return false;
+        }
+        logger.info("Connecting host {} to ONTAP storage pool {}", 
host.getName(), pool.getName());
+        try {
+            ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, 
pool);
+
+            Answer answer = _agentMgr.easySend(hostId, cmd);
+
+            if (answer == null) {
+                throw new CloudRuntimeException(String.format("Unable to get 
an answer to the modify storage pool command (%s)", pool));
+            }
+
+            if (!answer.getResult()) {
+                String msg = String.format("Unable to attach storage pool %s 
to host %d", pool, hostId);
+
+                _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, 
pool.getDataCenterId(), pool.getPodId(), msg, msg);
+
+                throw new CloudRuntimeException(String.format(
+                        "Unable to establish a connection from agent to 
storage pool %s due to %s", pool, answer.getDetails()));
+            }
+
+            ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) 
answer;
+            StoragePoolInfo poolInfo = mspAnswer.getPoolInfo();
+            if (poolInfo == null) {
+                throw new CloudRuntimeException("ModifyStoragePoolAnswer 
returned null poolInfo");
+            }

Review Comment:
   `answer` is cast directly to `ModifyStoragePoolAnswer` without verifying the 
runtime type. If the agent returns a different `Answer` implementation, this 
will throw a `ClassCastException` and fail the host connect. Consider adding an 
`instanceof` check (as done in other HostListeners) and returning a clear error 
if the answer type is unexpected.



##########
plugins/storage/volume/ontap/pom.xml:
##########
@@ -0,0 +1,169 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+    <modelVersion>4.0.0</modelVersion>
+    <artifactId>cloud-plugin-storage-volume-ontap</artifactId>
+    <name>Apache CloudStack Plugin - Storage Volume ONTAP Provider</name>
+    <parent>
+        <groupId>org.apache.cloudstack</groupId>
+        <artifactId>cloudstack-plugins</artifactId>
+        <version>4.23.0.0-SNAPSHOT</version>
+        <relativePath>../../../pom.xml</relativePath>
+    </parent>
+    <properties>
+        <spring-cloud.version>2021.0.7</spring-cloud.version>
+        <openfeign.version>11.0</openfeign.version>
+        <httpclient.version>4.5.14</httpclient.version>
+        <swagger-annotations.version>1.6.2</swagger-annotations.version>
+        <maven-compiler-plugin.version>3.8.1</maven-compiler-plugin.version>
+        <maven-surefire-plugin.version>2.22.2</maven-surefire-plugin.version>
+        <jackson-databind.version>2.13.4</jackson-databind.version>
+        <assertj.version>3.24.2</assertj.version>
+        <junit-jupiter.version>5.8.1</junit-jupiter.version>
+        <mockito.version>3.12.4</mockito.version>
+        <mockito-junit-jupiter.version>5.2.0</mockito-junit-jupiter.version>

Review Comment:
   This module hardcodes its own dependency versions (Jackson, JUnit Jupiter, 
Mockito) instead of using the project-wide version properties/BOM (e.g., 
`${cs.jackson.version}`, `${cs.junit.jupiter.version}`, `${cs.mockito.version}` 
in the root pom). This can introduce version skew and dependency convergence 
issues across CloudStack. Consider removing these custom version properties and 
aligning to the parent-managed versions unless there’s a specific compatibility 
requirement.
   ```suggestion
           
<jackson-databind.version>${cs.jackson.version}</jackson-databind.version>
           <assertj.version>3.24.2</assertj.version>
           
<junit-jupiter.version>${cs.junit.jupiter.version}</junit-jupiter.version>
           <mockito.version>${cs.mockito.version}</mockito.version>
           
<mockito-junit-jupiter.version>${cs.mockito.version}</mockito-junit-jupiter.version>
   ```



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java:
##########
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import feign.RequestInterceptor;
+import feign.Retryer;
+import feign.Client;
+import feign.httpclient.ApacheHttpClient;
+import feign.codec.Decoder;
+import feign.codec.Encoder;
+import feign.Response;
+import feign.codec.DecodeException;
+import feign.codec.EncodeException;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import org.apache.http.conn.ConnectionKeepAliveStrategy;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.conn.ssl.TrustAllStrategy;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.ssl.SSLContexts;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.net.ssl.SSLContext;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Type;
+import java.nio.charset.StandardCharsets;
+import java.util.concurrent.TimeUnit;
+
+public class FeignConfiguration {
+    private static final Logger logger = 
LogManager.getLogger(FeignConfiguration.class);
+
+    private final int retryMaxAttempt = 3;
+    private final int retryMaxInterval = 5;
+    private final String ontapFeignMaxConnection = "80";
+    private final String ontapFeignMaxConnectionPerRoute = "20";
+    private final ObjectMapper objectMapper;
+
+    public FeignConfiguration() {
+        this.objectMapper = new ObjectMapper();
+        
this.objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, 
false);
+    }
+
+    public Client createClient() {
+        int maxConn;
+        int maxConnPerRoute;
+        try {
+            maxConn = Integer.parseInt(this.ontapFeignMaxConnection);
+        } catch (Exception e) {
+            logger.error("ontapFeignClient: parse max connection failed, using 
default");
+            maxConn = 20;
+        }
+        try {
+            maxConnPerRoute = 
Integer.parseInt(this.ontapFeignMaxConnectionPerRoute);
+        } catch (Exception e) {
+            logger.error("ontapFeignClient: parse max connection per route 
failed, using default");
+            maxConnPerRoute = 2;
+        }
+        logger.debug("ontapFeignClient: maxConn={}, maxConnPerRoute={}", 
maxConn, maxConnPerRoute);
+        ConnectionKeepAliveStrategy keepAliveStrategy = (response, context) -> 
0;
+        CloseableHttpClient httpClient = HttpClientBuilder.create()
+                .setMaxConnTotal(maxConn)
+                .setMaxConnPerRoute(maxConnPerRoute)
+                .setKeepAliveStrategy(keepAliveStrategy)
+                .setSSLSocketFactory(getSSLSocketFactory())
+                .setConnectionTimeToLive(60, TimeUnit.SECONDS)
+                .build();
+        return new ApacheHttpClient(httpClient);
+    }
+
+    private SSLConnectionSocketFactory getSSLSocketFactory() {
+        try {
+            SSLContext sslContext = 
SSLContexts.custom().loadTrustMaterial(null, new TrustAllStrategy()).build();
+            return new SSLConnectionSocketFactory(sslContext, new 
NoopHostnameVerifier());
+        } catch (Exception ex) {
+            throw new RuntimeException(ex);
+        }

Review Comment:
   The Feign HTTP client is configured to trust all TLS certificates and 
disable hostname verification unconditionally (`TrustAllStrategy` + 
`NoopHostnameVerifier`). This undermines HTTPS security for ONTAP credentials 
and traffic. Please make TLS verification the default and only allow skipping 
verification via an explicit, opt-in configuration setting.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java:
##########
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.client;
+
+import feign.QueryMap;
+import org.apache.cloudstack.storage.feign.model.Igroup;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
+import org.apache.cloudstack.storage.feign.model.Lun;
+import org.apache.cloudstack.storage.feign.model.LunMap;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import feign.Headers;
+import feign.Param;
+import feign.RequestLine;
+import java.util.Map;
+
+public interface SANFeignClient {
+    // iSCSI Service APIs
+    @RequestLine("GET /api/protocols/san/iscsi/services")
+    @Headers({"Authorization: {authHeader}"})
+    OntapResponse<IscsiService> getIscsiServices(@Param("authHeader") String 
authHeader, @QueryMap Map<String, Object> queryMap);
+
+    // LUN Operation APIs
+    @RequestLine("POST /api/storage/luns?return_records={returnRecords}")
+    @Headers({"Authorization: {authHeader}"})
+    OntapResponse<Lun> createLun(@Param("authHeader") String authHeader, 
@Param("returnRecords") boolean returnRecords, Lun lun);
+
+    @RequestLine("GET /api/storage/luns")
+    @Headers({"Authorization: {authHeader}"})
+    OntapResponse<Lun> getLunResponse(@Param("authHeader") String authHeader, 
@QueryMap Map<String, Object> queryMap);
+
+    @RequestLine("GET /{uuid}")
+    @Headers({"Authorization: {authHeader}"})
+    Lun getLunByUUID(@Param("authHeader") String authHeader, @Param("uuid") 
String uuid);
+
+    @RequestLine("PATCH /{uuid}")
+    @Headers({"Authorization: {authHeader}"})
+    void updateLun(@Param("authHeader") String authHeader, @Param("uuid") 
String uuid, Lun lun);

Review Comment:
   `getLunByUUID` / `updateLun` use `@RequestLine("GET /{uuid}")` and `PATCH 
/{uuid}` which will call the server root (e.g., `https://<mgmtLIF>/<uuid>`) 
instead of the ONTAP LUN endpoints. These should include the full ONTAP paths 
(consistent with the other methods, e.g., `/api/storage/luns/{uuid}`), 
otherwise the calls will 404 or hit the wrong API.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java:
##########
@@ -0,0 +1,453 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+        package org.apache.cloudstack.storage.service;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
+import org.apache.cloudstack.storage.feign.client.SANFeignClient;
+import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.Aggregate;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+public abstract class StorageStrategy {
+    private final FeignClientFactory feignClientFactory;
+    private final AggregateFeignClient aggregateFeignClient;
+    private final VolumeFeignClient volumeFeignClient;
+    private final SvmFeignClient svmFeignClient;
+    private final JobFeignClient jobFeignClient;
+    private final NetworkFeignClient networkFeignClient;
+    private final SANFeignClient sanFeignClient;
+
+    protected OntapStorage storage;
+
+    private List<Aggregate> aggregates;
+
+    private static final Logger s_logger = 
LogManager.getLogger(StorageStrategy.class);
+
+    public StorageStrategy(OntapStorage ontapStorage) {
+        storage = ontapStorage;
+        String baseURL = Constants.HTTPS + storage.getManagementLIF();
+        s_logger.info("Initializing StorageStrategy with base URL: " + 
baseURL);
+        this.feignClientFactory = new FeignClientFactory();
+        this.aggregateFeignClient = 
feignClientFactory.createClient(AggregateFeignClient.class, baseURL);
+        this.volumeFeignClient = 
feignClientFactory.createClient(VolumeFeignClient.class, baseURL);
+        this.svmFeignClient = 
feignClientFactory.createClient(SvmFeignClient.class, baseURL);
+        this.jobFeignClient = 
feignClientFactory.createClient(JobFeignClient.class, baseURL);
+        this.networkFeignClient = 
feignClientFactory.createClient(NetworkFeignClient.class, baseURL);
+        this.sanFeignClient = 
feignClientFactory.createClient(SANFeignClient.class, baseURL);
+    }
+
+    public boolean connect() {
+        s_logger.info("Attempting to connect to ONTAP cluster at " + 
storage.getManagementLIF() + " and validate SVM " +
+                storage.getSvmName() + ", protocol " + storage.getProtocol());
+        String authHeader = Utility.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+        String svmName = storage.getSvmName();
+        try {
+            Svm svm = new Svm();
+            s_logger.info("Fetching the SVM details...");
+            Map<String, Object> queryParams = Map.of(Constants.NAME, svmName, 
Constants.FIELDS, Constants.AGGREGATES +
+                    Constants.COMMA + Constants.STATE);
+            OntapResponse<Svm> svms = 
svmFeignClient.getSvmResponse(queryParams, authHeader);
+            if (svms != null && svms.getRecords() != null && 
!svms.getRecords().isEmpty()) {
+                svm = svms.getRecords().get(0);
+            } else {
+                s_logger.error("No SVM found on the ONTAP cluster by the name" 
+ svmName + ".");
+                return false;
+            }
+
+            s_logger.info("Validating SVM state and protocol settings...");
+            if (!Objects.equals(svm.getState(), Constants.RUNNING)) {
+                s_logger.error("SVM " + svmName + " is not in running state.");
+                return false;
+            }
+            if (Objects.equals(storage.getProtocol(), Constants.NFS) && 
!svm.getNfsEnabled()) {
+                s_logger.error("NFS protocol is not enabled on SVM " + 
svmName);
+                return false;
+            } else if (Objects.equals(storage.getProtocol(), Constants.ISCSI) 
&& !svm.getIscsiEnabled()) {
+                s_logger.error("iSCSI protocol is not enabled on SVM " + 
svmName);
+                return false;
+            }
+            List<Aggregate> aggrs = svm.getAggregates();
+            if (aggrs == null || aggrs.isEmpty()) {
+                s_logger.error("No aggregates are assigned to SVM " + svmName);
+                return false;
+            }
+            for (Aggregate aggr : aggrs) {
+                s_logger.debug("Found aggregate: " + aggr.getName() + " with 
UUID: " + aggr.getUuid());
+                Aggregate aggrResp = 
aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid());
+                if (!Objects.equals(aggrResp.getState(), 
Aggregate.StateEnum.ONLINE)) {
+                    s_logger.warn("Aggregate " + aggr.getName() + " is not in 
online state. Skipping this aggregate.");
+                    continue;
+                } else if (aggrResp.getSpace() == null || 
aggrResp.getAvailableBlockStorageSpace() == null ||
+                        aggrResp.getAvailableBlockStorageSpace() <= 
storage.getSize().doubleValue()) {
+                    s_logger.warn("Aggregate " + aggr.getName() + " does not 
have sufficient available space. Skipping this aggregate.");
+                    continue;
+                }
+                s_logger.info("Selected aggregate: " + aggr.getName() + " for 
volume operations.");
+                this.aggregates = List.of(aggr);
+                break;
+            }
+            if (this.aggregates == null || this.aggregates.isEmpty()) {
+                s_logger.error("No suitable aggregates found on SVM " + 
svmName + " for volume creation.");
+                return false;
+            }
+
+            this.aggregates = aggrs;
+            s_logger.info("Successfully connected to ONTAP cluster and 
validated ONTAP details provided");

Review Comment:
   The aggregate selection logic sets `this.aggregates = List.of(aggr)` when it 
finds a suitable aggregate, but then immediately overwrites it with 
`this.aggregates = aggrs`. This makes the earlier validation/selection 
misleading and can allow later operations to consider aggregates that were just 
deemed unsuitable. Either keep only the filtered/selected aggregates, or remove 
the initial selection loop and rely on `createStorageVolume` to choose the 
aggregate.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java:
##########
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.utils;
+
+import com.cloud.storage.ScopeType;
+import com.cloud.utils.StringUtils;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.springframework.util.Base64Utils;
+
+import java.util.Map;
+
+public class Utility {
+
+    private static final Logger s_logger = LogManager.getLogger(Utility.class);
+
+    private static final String BASIC = "Basic";
+    private static final String AUTH_HEADER_COLON = ":";
+
+    public static String generateAuthHeader (String username, String password) 
{
+        byte[] encodedBytes = Base64Utils.encode((username + AUTH_HEADER_COLON 
+ password).getBytes());
+        return BASIC + StringUtils.SPACE + new String(encodedBytes);
+    }

Review Comment:
   `generateAuthHeader` uses platform-default encoding for both `getBytes()` 
and `new String(encodedBytes)`, which can lead to incorrect auth headers on 
non-UTF-8 platforms. Use an explicit charset (e.g., UTF-8) for both steps to 
make the header generation deterministic.



##########
plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java:
##########
@@ -0,0 +1,453 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+        package org.apache.cloudstack.storage.service;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
+import org.apache.cloudstack.storage.feign.client.SANFeignClient;
+import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.Aggregate;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+public abstract class StorageStrategy {
+    private final FeignClientFactory feignClientFactory;
+    private final AggregateFeignClient aggregateFeignClient;
+    private final VolumeFeignClient volumeFeignClient;
+    private final SvmFeignClient svmFeignClient;
+    private final JobFeignClient jobFeignClient;
+    private final NetworkFeignClient networkFeignClient;
+    private final SANFeignClient sanFeignClient;
+
+    protected OntapStorage storage;
+
+    private List<Aggregate> aggregates;
+
+    private static final Logger s_logger = 
LogManager.getLogger(StorageStrategy.class);
+
+    public StorageStrategy(OntapStorage ontapStorage) {
+        storage = ontapStorage;
+        String baseURL = Constants.HTTPS + storage.getManagementLIF();
+        s_logger.info("Initializing StorageStrategy with base URL: " + 
baseURL);
+        this.feignClientFactory = new FeignClientFactory();
+        this.aggregateFeignClient = 
feignClientFactory.createClient(AggregateFeignClient.class, baseURL);
+        this.volumeFeignClient = 
feignClientFactory.createClient(VolumeFeignClient.class, baseURL);
+        this.svmFeignClient = 
feignClientFactory.createClient(SvmFeignClient.class, baseURL);
+        this.jobFeignClient = 
feignClientFactory.createClient(JobFeignClient.class, baseURL);
+        this.networkFeignClient = 
feignClientFactory.createClient(NetworkFeignClient.class, baseURL);
+        this.sanFeignClient = 
feignClientFactory.createClient(SANFeignClient.class, baseURL);
+    }
+
+    public boolean connect() {
+        s_logger.info("Attempting to connect to ONTAP cluster at " + 
storage.getManagementLIF() + " and validate SVM " +
+                storage.getSvmName() + ", protocol " + storage.getProtocol());
+        String authHeader = Utility.generateAuthHeader(storage.getUsername(), 
storage.getPassword());
+        String svmName = storage.getSvmName();
+        try {
+            Svm svm = new Svm();
+            s_logger.info("Fetching the SVM details...");
+            Map<String, Object> queryParams = Map.of(Constants.NAME, svmName, 
Constants.FIELDS, Constants.AGGREGATES +
+                    Constants.COMMA + Constants.STATE);
+            OntapResponse<Svm> svms = 
svmFeignClient.getSvmResponse(queryParams, authHeader);
+            if (svms != null && svms.getRecords() != null && 
!svms.getRecords().isEmpty()) {
+                svm = svms.getRecords().get(0);
+            } else {
+                s_logger.error("No SVM found on the ONTAP cluster by the name" 
+ svmName + ".");
+                return false;
+            }
+
+            s_logger.info("Validating SVM state and protocol settings...");
+            if (!Objects.equals(svm.getState(), Constants.RUNNING)) {
+                s_logger.error("SVM " + svmName + " is not in running state.");
+                return false;
+            }
+            if (Objects.equals(storage.getProtocol(), Constants.NFS) && 
!svm.getNfsEnabled()) {
+                s_logger.error("NFS protocol is not enabled on SVM " + 
svmName);
+                return false;
+            } else if (Objects.equals(storage.getProtocol(), Constants.ISCSI) 
&& !svm.getIscsiEnabled()) {
+                s_logger.error("iSCSI protocol is not enabled on SVM " + 
svmName);
+                return false;

Review Comment:
   `storage.getProtocol()` is a `ProtocolType`, but the checks compare it to 
`Constants.NFS`/`Constants.ISCSI` (strings). This condition will never be true, 
so protocol enablement on the SVM is effectively not validated. Compare against 
`ProtocolType.NFS3` / `ProtocolType.ISCSI` (or convert consistently) so an SVM 
with the protocol disabled fails validation as intended.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to