Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -129,10 +129,12 @@ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObje
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
StorageStrategy storageStrategy = getStrategyByStoragePoolDetails(details);
s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME));
CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, dataObject);
CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, (VolumeInfo) dataObject);
CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest);
if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) {
return cloudStackVolume.getLun().getName();
} else if (ProtocolType.NFS.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
return cloudStackVolume.getFile().getName();
} else {
String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + dataObject;
s_logger.error(errMsg);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,39 +26,39 @@
import feign.Param;
import feign.RequestLine;

//TODO: Proper URLs should be added in the RequestLine annotations below
public interface NASFeignClient {

// File Operations
@RequestLine("GET /{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
OntapResponse<FileInfo> getFileResponse(@Param("authHeader") String authHeader,
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath);
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath);

@RequestLine("DELETE /{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void deleteFile(@Param("authHeader") String authHeader,
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath);
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath);

@RequestLine("PATCH /{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void updateFile(@Param("authHeader") String authHeader,
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath, FileInfo fileInfo);
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath,
FileInfo fileInfo);

@RequestLine("POST /{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void createFile(@Param("authHeader") String authHeader,
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath, FileInfo file);
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath,
FileInfo file);

// Export Policy Operations
@RequestLine("POST /")
@Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
@Headers({"Authorization: {authHeader}"})
ExportPolicy createExportPolicy(@Param("authHeader") String authHeader,
@Param("returnRecords") boolean returnRecords,
ExportPolicy exportPolicy);

@RequestLine("GET /")
Expand All @@ -68,16 +68,16 @@ ExportPolicy createExportPolicy(@Param("authHeader") String authHeader,
@RequestLine("GET /{id}")
@Headers({"Authorization: {authHeader}"})
OntapResponse<ExportPolicy> getExportPolicyById(@Param("authHeader") String authHeader,
@Param("id") String id);
@Param("id") String id);

@RequestLine("DELETE /{id}")
@Headers({"Authorization: {authHeader}"})
void deleteExportPolicyById(@Param("authHeader") String authHeader,
@Param("id") String id);
@Param("id") String id);

@RequestLine("PATCH /{id}")
@Headers({"Authorization: {authHeader}"})
OntapResponse<ExportPolicy> updateExportPolicy(@Param("authHeader") String authHeader,
@Param("id") String id,
ExportPolicy request);
@Param("id") String id,
ExportPolicy request);
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,15 @@
*/
package org.apache.cloudstack.storage.feign.client;

import feign.QueryMap;
import org.apache.cloudstack.storage.feign.model.Volume;
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
import feign.Headers;
import feign.Param;
import feign.RequestLine;
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;

import java.util.Map;

public interface VolumeFeignClient {

Expand All @@ -38,8 +42,12 @@ public interface VolumeFeignClient {
@Headers({"Authorization: {authHeader}"})
Volume getVolumeByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);

@RequestLine("GET /api/storage/volumes")
@Headers({"Authorization: {authHeader}"})
OntapResponse<Volume> getVolume(@Param("authHeader") String authHeader, @QueryMap Map<String, Object> queryMap);

@RequestLine("PATCH /api/storage/volumes/{uuid}")
@Headers({"Accept: {acceptHeader}", "Authorization: {authHeader}"})
JobResponse updateVolumeRebalancing(@Param("acceptHeader") String acceptHeader, @Param("uuid") String uuid, Volume volumeRequest);
@Headers({ "Authorization: {authHeader}"})
JobResponse updateVolumeRebalancing(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Volume volumeRequest);
Comment on lines +50 to +51
Copy link

Copilot AI Nov 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removed 'acceptHeader' parameter from method signature. This is a breaking change that could affect existing callers of this method.

Suggested change
@Headers({ "Authorization: {authHeader}"})
JobResponse updateVolumeRebalancing(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Volume volumeRequest);
@Headers({ "Authorization: {authHeader}", "Accept: {acceptHeader}" })
JobResponse updateVolumeRebalancing(@Param("authHeader") String authHeader, @Param("acceptHeader") String acceptHeader, @Param("uuid") String uuid, Volume volumeRequest);

Copilot uses AI. Check for mistakes.
}

Empty file.
Empty file.
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,13 @@
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.feign.model.Volume;
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
import org.apache.cloudstack.storage.service.StorageStrategy;
import org.apache.cloudstack.storage.service.model.AccessGroup;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.cloudstack.storage.utils.Constants;
import org.apache.cloudstack.storage.utils.Utility;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
Expand Down Expand Up @@ -184,7 +187,7 @@ public DataStore initialize(Map<String, Object> dsInfos) {
String path;
ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL));
switch (protocol) {
case NFS3:
case NFS:
parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
path = details.get(Constants.MANAGEMENT_LIF) + ":/" + storagePoolName;
s_logger.info("Setting NFS path for storage pool: " + path);
Expand Down Expand Up @@ -213,7 +216,9 @@ public DataStore initialize(Map<String, Object> dsInfos) {
long volumeSize = Long.parseLong(details.get(Constants.SIZE));
s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" +
(volumeSize / (1024 * 1024 * 1024)) + " GB)");
storageStrategy.createStorageVolume(storagePoolName, volumeSize);
Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize);
details.put(Constants.VOLUME_UUID, volume.getUuid());
details.put(Constants.VOLUME_NAME, volume.getName());
} else {
throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage");
}
Expand Down Expand Up @@ -241,10 +246,20 @@ public DataStore initialize(Map<String, Object> dsInfos) {
@Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
logger.debug("In attachCluster for ONTAP primary storage");
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore);

logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId()));

Map<String, String> details = primaryStore.getDetails(); // TODO check while testing , if it is populated we can remove below db call
StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
AccessGroup accessGroupRequest = new AccessGroup();
accessGroupRequest.setHostsToConnect(hostsToConnect);
accessGroupRequest.setScope(scope);
accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
strategy.createAccessGroup(accessGroupRequest);

logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId());
Copy link

Copilot AI Nov 14, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nitpick] Missing space after the semicolon on line 282. Add a space between '//' and 'setting' for consistent comment formatting.

Copilot uses AI. Check for mistakes.
for (HostVO host : hostsToConnect) {
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
Expand All @@ -265,9 +280,18 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
logger.debug("In attachZone for ONTAP primary storage");
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);

PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));

Map<String, String> details = primaryStore.getDetails(); // TODO check while testing , if it is populated we can remove below db call
StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
AccessGroup accessGroupRequest = new AccessGroup();
accessGroupRequest.setHostsToConnect(hostsToConnect);
accessGroupRequest.setScope(scope);
accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
strategy.createAccessGroup(accessGroupRequest);
for (HostVO host : hostsToConnect) {
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
package org.apache.cloudstack.storage.provider;

import com.cloud.exception.StorageConflictException;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;

class OntapHostListener implements HypervisorHostListener {

@Override
public boolean hostAdded(long hostId) {
return false;
}

@Override
public boolean hostConnect(long hostId, long poolId) throws StorageConflictException {
return false;
}

@Override
public boolean hostDisconnected(long hostId, long poolId) {
return false;
}

@Override
public boolean hostAboutToBeRemoved(long hostId) {
return false;
}

@Override
public boolean hostRemoved(long hostId, long clusterId) {
return false;
}

@Override
public boolean hostEnabled(long hostId) {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider {
private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class);
private OntapPrimaryDatastoreDriver primaryDatastoreDriver;
private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle;
private OntapHostListener ontapHostListener;

public OntapPrimaryDatastoreProvider() {
s_logger.info("OntapPrimaryDatastoreProvider initialized");
Expand All @@ -57,7 +58,7 @@ public DataStoreDriver getDataStoreDriver() {

@Override
public HypervisorHostListener getHostListener() {
return null;
return ontapHostListener;
}

@Override
Expand All @@ -71,6 +72,8 @@ public boolean configure(Map<String, Object> params) {
s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called");
primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class);
primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class);
ontapHostListener = ComponentContext.inject(OntapHostListener.class);

return true;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ public static StorageStrategy getStrategy(OntapStorage ontapStorage) {
ProtocolType protocol = ontapStorage.getProtocol();
s_logger.info("Initializing StorageProviderFactory with protocol: " + protocol);
switch (protocol) {
case NFS3:
case NFS:
if (!ontapStorage.getIsDisaggregated()) {
UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage);
unifiedNASStrategy.setOntapStorage(ontapStorage);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,11 @@ public Volume createStorageVolume(String volumeName, Long size) {
volumeRequest.setSize(size);
// Make the POST API call to create the volume
try {
// Create URI for POST CreateVolume API
// Call the VolumeFeignClient to create the volume
/*
ONTAP created a default rule of 0.0.0.0 if no export rule are defined while creating volume
and since in storage pool creation, cloudstack is not aware of the host , we can either create default or
permissive rule and later update it as part of attachCluster or attachZone implementation
*/
JobResponse jobResponse = volumeFeignClient.createVolumeWithJob(authHeader, volumeRequest);
if (jobResponse == null || jobResponse.getJob() == null) {
throw new CloudRuntimeException("Failed to initiate volume creation for " + volumeName);
Expand Down Expand Up @@ -192,8 +195,20 @@ public Volume createStorageVolume(String volumeName, Long size) {
throw new CloudRuntimeException("Failed to create volume: " + e.getMessage());
}
s_logger.info("Volume created successfully: " + volumeName);
//TODO
return null;
// Below code is to update volume uuid to storage pool mapping once and used for all other workflow saving get volume call
OntapResponse<Volume> ontapVolume = new OntapResponse<>();
try {
Map<String, Object> queryParams = Map.of(Constants.NAME, volumeName);
ontapVolume = volumeFeignClient.getVolume(authHeader, queryParams);
if ((ontapVolume == null || ontapVolume.getRecords().isEmpty())) {
s_logger.error("Exception while getting volume volume not found:");
throw new CloudRuntimeException("Failed to fetch volume " + volumeName);
}
}catch (Exception e) {
s_logger.error("Exception while getting volume: " + e.getMessage());
throw new CloudRuntimeException("Failed to fetch volume: " + e.getMessage());
}
return ontapVolume.getRecords().get(0);
}

/**
Expand Down Expand Up @@ -287,7 +302,7 @@ public Volume getStorageVolume(Volume volume)
* @param accessGroup the access group to create
* @return the created AccessGroup object
*/
abstract AccessGroup createAccessGroup(AccessGroup accessGroup);
abstract public AccessGroup createAccessGroup(AccessGroup accessGroup);
Copy link

Copilot AI Nov 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The modifier 'public' should come before 'abstract' following Java convention. Change to 'public abstract AccessGroup createAccessGroup(AccessGroup accessGroup);'.

Suggested change
abstract public AccessGroup createAccessGroup(AccessGroup accessGroup);
public abstract AccessGroup createAccessGroup(AccessGroup accessGroup);

Copilot uses AI. Check for mistakes.

/**
* Method encapsulates the behavior based on the opted protocol in subclasses
Expand Down
Loading
Loading