Skip to content

Commit 1031c31

Browse files
author
Rene Glover
authored
FiberChannel Multipath for KVM + Pure Flash Array and HPE-Primera Support (#7889)
This PR provides a new primary storage volume type called "FiberChannel" that allows access to volumes connected to hosts over fiber channel connections. It requires Multipath to provide path discovery and failover. Second, the PR adds an AdaptivePrimaryDatastoreProvider that abstracts how volumes are managed/orchestrated from the connector to communicate with the primary storage provider, using a ProviderAdapter interface, allowing the code interacting with the primary storage provider API's to be simpler and have no direct dependencies on Cloudstack code. Lastly, the PR provides an implementation of the ProviderAdapter classes for the HP Enterprise Primera line of storage solutions and the Pure Flash Array line of storage solutions.
1 parent 4e46f5a commit 1031c31

File tree

103 files changed

+10464
-152
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

103 files changed

+10464
-152
lines changed

api/src/main/java/com/cloud/storage/Storage.java

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,13 +77,18 @@ public String getFileExtension() {
7777
}
7878

7979
public static enum Capability {
80-
HARDWARE_ACCELERATION("HARDWARE_ACCELERATION");
80+
HARDWARE_ACCELERATION("HARDWARE_ACCELERATION"),
81+
ALLOW_MIGRATE_OTHER_POOLS("ALLOW_MIGRATE_OTHER_POOLS");
8182

8283
private final String capability;
8384

8485
private Capability(String capability) {
8586
this.capability = capability;
8687
}
88+
89+
public String toString() {
90+
return this.capability;
91+
}
8792
}
8893

8994
public static enum ProvisioningType {
@@ -150,7 +155,8 @@ public static enum StoragePoolType {
150155
ManagedNFS(true, false, false),
151156
Linstor(true, true, false),
152157
DatastoreCluster(true, true, false), // for VMware, to abstract pool of clusters
153-
StorPool(true, true, true);
158+
StorPool(true, true, true),
159+
FiberChannel(true, true, false); // Fiber Channel Pool for KVM hypervisors is used to find the volume by WWN value (/dev/disk/by-id/wwn-<wwnvalue>)
154160

155161
private final boolean shared;
156162
private final boolean overprovisioning;

api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
package org.apache.cloudstack.api.command.admin.storage;
1818

1919
import java.util.List;
20+
import java.util.Map;
2021

2122
import org.apache.cloudstack.api.ApiCommandResourceType;
2223
import org.apache.log4j.Logger;
@@ -32,6 +33,7 @@
3233
import com.cloud.storage.StoragePool;
3334
import com.cloud.user.Account;
3435

36+
@SuppressWarnings("rawtypes")
3537
@APICommand(name = "updateStoragePool", description = "Updates a storage pool.", responseObject = StoragePoolResponse.class, since = "3.0.0",
3638
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
3739
public class UpdateStoragePoolCmd extends BaseCmd {
@@ -61,6 +63,20 @@ public class UpdateStoragePoolCmd extends BaseCmd {
6163
" enable it back.")
6264
private Boolean enabled;
6365

66+
@Parameter(name = ApiConstants.DETAILS,
67+
type = CommandType.MAP,
68+
required = false,
69+
description = "the details for the storage pool",
70+
since = "4.19.0")
71+
private Map details;
72+
73+
@Parameter(name = ApiConstants.URL,
74+
type = CommandType.STRING,
75+
required = false,
76+
description = "the URL of the storage pool",
77+
since = "4.19.0")
78+
private String url;
79+
6480
@Parameter(name = ApiConstants.IS_TAG_A_RULE, type = CommandType.BOOLEAN, description = ApiConstants.PARAMETER_DESCRIPTION_IS_TAG_A_RULE)
6581
private Boolean isTagARule;
6682

@@ -115,6 +131,22 @@ public ApiCommandResourceType getApiResourceType() {
115131
return ApiCommandResourceType.StoragePool;
116132
}
117133

134+
public Map<String,String> getDetails() {
135+
return details;
136+
}
137+
138+
public void setDetails(Map<String,String> details) {
139+
this.details = details;
140+
}
141+
142+
public String getUrl() {
143+
return url;
144+
}
145+
146+
public void setUrl(String url) {
147+
this.url = url;
148+
}
149+
118150
@Override
119151
public void execute() {
120152
StoragePool result = _storageService.updateStoragePool(this);

client/pom.xml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,16 @@
111111
<artifactId>cloud-plugin-storage-volume-storpool</artifactId>
112112
<version>${project.version}</version>
113113
</dependency>
114+
<dependency>
115+
<groupId>org.apache.cloudstack</groupId>
116+
<artifactId>cloud-plugin-storage-volume-primera</artifactId>
117+
<version>${project.version}</version>
118+
</dependency>
119+
<dependency>
120+
<groupId>org.apache.cloudstack</groupId>
121+
<artifactId>cloud-plugin-storage-volume-flasharray</artifactId>
122+
<version>${project.version}</version>
123+
</dependency>
114124
<dependency>
115125
<groupId>org.apache.cloudstack</groupId>
116126
<artifactId>cloud-server</artifactId>

engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2957,6 +2957,7 @@ protected Map<Volume, StoragePool> buildMapUsingUserInformation(VirtualMachinePr
29572957
* <ul>
29582958
* <li> If the current storage pool of the volume is not a managed storage, we do not need to validate anything here.
29592959
* <li> If the current storage pool is a managed storage and the target storage pool ID is different from the current one, we throw an exception.
2960+
* <li> If the current storage pool is a managed storage and explicitly declared its capable of migration to alternate storage pools
29602961
* </ul>
29612962
*/
29622963
protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) {
@@ -2966,6 +2967,11 @@ protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StorageP
29662967
if (currentPool.getId() == targetPool.getId()) {
29672968
return;
29682969
}
2970+
2971+
Map<String, String> details = _storagePoolDao.getDetails(currentPool.getId());
2972+
if (details != null && Boolean.parseBoolean(details.get(Storage.Capability.ALLOW_MIGRATE_OTHER_POOLS.toString()))) {
2973+
return;
2974+
}
29692975
throw new CloudRuntimeException(String.format("Currently, a volume on managed storage can only be 'migrated' to itself " + "[volumeId=%s, currentStoragePoolId=%s, targetStoragePoolId=%s].",
29702976
volume.getUuid(), currentPool.getUuid(), targetPool.getUuid()));
29712977
}

engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java

Lines changed: 20 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ protected Answer copyObject(DataObject srcData, DataObject destData, Host destHo
193193
destData.getType() == DataObjectType.TEMPLATE)) {
194194
// volume transfer from primary to secondary. Volume transfer between primary pools are already handled by copyVolumeBetweenPools
195195
// Delete cache in order to certainly transfer a latest image.
196-
s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId +
196+
if (s_logger.isDebugEnabled()) s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId +
197197
", uuid: " + cacheUuid + ")");
198198
cacheMgr.deleteCacheObject(srcForCopy);
199199
} else {
@@ -205,15 +205,15 @@ protected Answer copyObject(DataObject srcData, DataObject destData, Host destHo
205205
", uuid: " + cacheUuid + ")");
206206
cacheMgr.deleteCacheObject(srcForCopy);
207207
} else {
208-
s_logger.debug("Decrease reference count of " + cacheType +
208+
if (s_logger.isDebugEnabled()) s_logger.debug("Decrease reference count of " + cacheType +
209209
" cache(id: " + cacheId + ", uuid: " + cacheUuid + ")");
210210
cacheMgr.releaseCacheObject(srcForCopy);
211211
}
212212
}
213213
}
214214
return answer;
215215
} catch (Exception e) {
216-
s_logger.debug("copy object failed: ", e);
216+
if (s_logger.isDebugEnabled()) s_logger.debug("copy object failed: ", e);
217217
if (cacheData != null) {
218218
cacheMgr.deleteCacheObject(cacheData);
219219
}
@@ -331,7 +331,7 @@ protected Answer cloneVolume(DataObject template, DataObject volume) {
331331
}
332332
return answer;
333333
} catch (Exception e) {
334-
s_logger.debug("Failed to send to storage pool", e);
334+
if (s_logger.isDebugEnabled()) s_logger.debug("Failed to send to storage pool", e);
335335
throw new CloudRuntimeException("Failed to send to storage pool", e);
336336
}
337337
}
@@ -388,7 +388,7 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData)
388388

389389
if (answer == null || !answer.getResult()) {
390390
if (answer != null) {
391-
s_logger.debug("copy to image store failed: " + answer.getDetails());
391+
if (s_logger.isDebugEnabled()) s_logger.debug("copy to image store failed: " + answer.getDetails());
392392
}
393393
objOnImageStore.processEvent(Event.OperationFailed);
394394
imageStore.delete(objOnImageStore);
@@ -411,7 +411,7 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData)
411411

412412
if (answer == null || !answer.getResult()) {
413413
if (answer != null) {
414-
s_logger.debug("copy to primary store failed: " + answer.getDetails());
414+
if (s_logger.isDebugEnabled()) s_logger.debug("copy to primary store failed: " + answer.getDetails());
415415
}
416416
objOnImageStore.processEvent(Event.OperationFailed);
417417
imageStore.delete(objOnImageStore);
@@ -471,13 +471,17 @@ protected Answer migrateVolumeToPool(DataObject srcData, DataObject destData) {
471471
s_logger.error(errMsg);
472472
answer = new Answer(command, false, errMsg);
473473
} else {
474+
if (s_logger.isDebugEnabled()) s_logger.debug("Sending MIGRATE_COPY request to node " + ep);
474475
answer = ep.sendMessage(command);
476+
if (s_logger.isDebugEnabled()) s_logger.debug("Received MIGRATE_COPY response from node with answer: " + answer);
475477
}
476478

477479
if (answer == null || !answer.getResult()) {
478480
throw new CloudRuntimeException("Failed to migrate volume " + volume + " to storage pool " + destPool);
479481
} else {
480482
// Update the volume details after migration.
483+
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY updating volume");
484+
481485
VolumeVO volumeVo = volDao.findById(volume.getId());
482486
Long oldPoolId = volume.getPoolId();
483487
volumeVo.setPath(((MigrateVolumeAnswer)answer).getVolumePath());
@@ -496,6 +500,8 @@ protected Answer migrateVolumeToPool(DataObject srcData, DataObject destData) {
496500
}
497501
volumeVo.setFolder(folder);
498502
volDao.update(volume.getId(), volumeVo);
503+
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY update volume data complete");
504+
499505
}
500506

501507
return answer;
@@ -507,7 +513,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As
507513
Answer answer = null;
508514
String errMsg = null;
509515
try {
510-
s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString());
516+
if (s_logger.isDebugEnabled()) s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString());
511517
if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) {
512518
answer = copyVolumeFromSnapshot(srcData, destData);
513519
} else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.TEMPLATE) {
@@ -516,11 +522,16 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As
516522
answer = cloneVolume(srcData, destData);
517523
} else if (destData.getType() == DataObjectType.VOLUME && srcData.getType() == DataObjectType.VOLUME &&
518524
srcData.getDataStore().getRole() == DataStoreRole.Primary && destData.getDataStore().getRole() == DataStoreRole.Primary) {
525+
if (s_logger.isDebugEnabled()) s_logger.debug("About to MIGRATE copy between datasources");
519526
if (srcData.getId() == destData.getId()) {
520527
// The volume has to be migrated across storage pools.
528+
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool STARTING");
521529
answer = migrateVolumeToPool(srcData, destData);
530+
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool DONE: " + answer.getResult());
522531
} else {
532+
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools STARTING");
523533
answer = copyVolumeBetweenPools(srcData, destData);
534+
if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools DONE: " + answer.getResult());
524535
}
525536
} else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.SNAPSHOT) {
526537
answer = copySnapshot(srcData, destData);
@@ -532,7 +543,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As
532543
errMsg = answer.getDetails();
533544
}
534545
} catch (Exception e) {
535-
s_logger.debug("copy failed", e);
546+
if (s_logger.isDebugEnabled()) s_logger.debug("copy failed", e);
536547
errMsg = e.toString();
537548
}
538549
CopyCommandResult result = new CopyCommandResult(null, answer);
@@ -627,7 +638,7 @@ protected Answer copySnapshot(DataObject srcData, DataObject destData) {
627638
}
628639
return answer;
629640
} catch (Exception e) {
630-
s_logger.debug("copy snasphot failed: ", e);
641+
if (s_logger.isDebugEnabled()) s_logger.debug("copy snasphot failed: ", e);
631642
if (cacheData != null) {
632643
cacheMgr.deleteCacheObject(cacheData);
633644
}

0 commit comments

Comments
 (0)