-
Notifications
You must be signed in to change notification settings - Fork 0
Feature/cstackex 22: Shared NFS pool and volume creation - Approach 1 #22
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 24 commits
fbcfed7
ee2197f
d14a23e
419bdb9
8e00054
3fdea75
1860284
71b5ddf
1c33211
899d68e
20371e6
dc95253
ec9f2fd
adc30de
cbc44ba
7db78a1
0064390
982843b
4ad6c71
000cc1d
aae0747
23ddff9
0aea9fb
409d28f
516b553
ef728cd
74e6584
93e66b4
d5e0728
90e6d56
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -64,13 +64,15 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver { | |
|
|
||
| @Inject private StoragePoolDetailsDao storagePoolDetailsDao; | ||
| @Inject private PrimaryDataStoreDao storagePoolDao; | ||
|
|
||
| @Override | ||
| public Map<String, String> getCapabilities() { | ||
| s_logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called"); | ||
| Map<String, String> mapCapabilities = new HashMap<>(); | ||
|
|
||
| mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString()); | ||
| mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); | ||
| // RAW managed initial implementation: snapshot features not yet supported | ||
| // TODO Set it to false once we start supporting snapshot feature | ||
| mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.FALSE.toString()); | ||
| mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.FALSE.toString()); | ||
|
|
||
| return mapCapabilities; | ||
| } | ||
|
|
@@ -81,9 +83,7 @@ public DataTO getTO(DataObject data) { | |
| } | ||
|
|
||
| @Override | ||
| public DataStoreTO getStoreTO(DataStore store) { | ||
| return null; | ||
| } | ||
| public DataStoreTO getStoreTO(DataStore store) { return null; } | ||
|
|
||
| @Override | ||
| public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback<CreateCmdResult> callback) { | ||
|
|
@@ -103,7 +103,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet | |
| s_logger.info("createAsync: Started for data store [{}] and data object [{}] of type [{}]", | ||
| dataStore, dataObject, dataObject.getType()); | ||
| if (dataObject.getType() == DataObjectType.VOLUME) { | ||
| path = createCloudStackVolumeForTypeVolume(dataStore, dataObject); | ||
| path = createCloudStackVolumeForTypeVolume(dataStore, (VolumeInfo)dataObject); | ||
| createCmdResult = new CreateCmdResult(path, new Answer(null, true, null)); | ||
| } else { | ||
| errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; | ||
|
|
@@ -116,11 +116,14 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet | |
| createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg)); | ||
| createCmdResult.setResult(e.toString()); | ||
| } finally { | ||
| if (createCmdResult != null && createCmdResult.isSuccess()) { | ||
| s_logger.info("createAsync: Volume created successfully. Path: {}", path); | ||
| } | ||
| callback.complete(createCmdResult); | ||
| } | ||
| } | ||
|
|
||
| private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObject dataObject) { | ||
| private String createCloudStackVolumeForTypeVolume(DataStore dataStore, VolumeInfo volumeObject) { | ||
| StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); | ||
| if(storagePool == null) { | ||
| s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); | ||
|
|
@@ -129,20 +132,43 @@ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObje | |
| Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); | ||
| StorageStrategy storageStrategy = getStrategyByStoragePoolDetails(details); | ||
| s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME)); | ||
| CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, dataObject); | ||
| CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, volumeObject); | ||
| CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); | ||
| if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) { | ||
| return cloudStackVolume.getLun().getName(); | ||
| } else if (ProtocolType.NFS.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { | ||
| return volumeObject.getUuid(); // return the volume UUID for agent as path for mounting | ||
| } else { | ||
| String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + dataObject; | ||
| String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + volumeObject; | ||
| s_logger.error(errMsg); | ||
| throw new CloudRuntimeException(errMsg); | ||
| } | ||
| } | ||
|
|
||
| @Override | ||
| public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback<CommandResult> callback) { | ||
|
|
||
| CommandResult commandResult = new CommandResult(); | ||
| try { | ||
| if (store == null || data == null) { | ||
| throw new CloudRuntimeException("deleteAsync: store or data is null"); | ||
| } | ||
| if (data.getType() == DataObjectType.VOLUME) { | ||
| StoragePoolVO storagePool = storagePoolDao.findById(store.getId()); | ||
| if(storagePool == null) { | ||
| s_logger.error("deleteAsync : Storage Pool not found for id: " + store.getId()); | ||
| throw new CloudRuntimeException("deleteAsync : Storage Pool not found for id: " + store.getId()); | ||
| } | ||
| Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(store.getId()); | ||
| if (ProtocolType.NFS.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { | ||
|
||
| // ManagedNFS qcow2 backing file deletion handled by KVM host/libvirt; nothing to do via ONTAP REST. | ||
| s_logger.info("deleteAsync: ManagedNFS volume {} no-op ONTAP deletion", data.getId()); | ||
| } | ||
| } | ||
| } catch (Exception e) { | ||
| commandResult.setResult(e.getMessage()); | ||
| } finally { | ||
| callback.complete(commandResult); | ||
| } | ||
| } | ||
|
|
||
| @Override | ||
|
|
@@ -177,7 +203,6 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore | |
|
|
||
| @Override | ||
| public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { | ||
|
|
||
| } | ||
|
|
||
| @Override | ||
|
|
@@ -217,7 +242,7 @@ public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, Qual | |
|
|
||
| @Override | ||
| public boolean canProvideStorageStats() { | ||
| return true; | ||
| return false; | ||
| } | ||
|
|
||
| @Override | ||
|
|
@@ -227,7 +252,7 @@ public Pair<Long, Long> getStorageStats(StoragePool storagePool) { | |
|
|
||
| @Override | ||
| public boolean canProvideVolumeStats() { | ||
| return true; | ||
| return false; // Not yet implemented for RAW managed NFS | ||
| } | ||
|
|
||
| @Override | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,3 +1,22 @@ | ||
| /* | ||
| * Licensed to the Apache Software Foundation (ASF) under one | ||
| * or more contributor license agreements. See the NOTICE file | ||
| * distributed with this work for additional information | ||
| * regarding copyright ownership. The ASF licenses this file | ||
| * to you under the Apache License, Version 2.0 (the | ||
| * "License"); you may not use this file except in compliance | ||
| * with the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, | ||
| * software distributed under the License is distributed on an | ||
| * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
| * KIND, either express or implied. See the License for the | ||
| * specific language governing permissions and limitations | ||
| * under the License. | ||
| */ | ||
|
|
||
| package org.apache.cloudstack.storage.feign; | ||
|
|
||
| import feign.RequestInterceptor; | ||
|
|
@@ -11,7 +30,7 @@ | |
| import feign.codec.EncodeException; | ||
| import com.fasterxml.jackson.core.JsonProcessingException; | ||
| import com.fasterxml.jackson.databind.DeserializationFeature; | ||
| import com.fasterxml.jackson.databind.json.JsonMapper; | ||
| import com.fasterxml.jackson.databind.ObjectMapper; | ||
| import org.apache.http.conn.ConnectionKeepAliveStrategy; | ||
| import org.apache.http.conn.ssl.NoopHostnameVerifier; | ||
| import org.apache.http.conn.ssl.SSLConnectionSocketFactory; | ||
|
|
@@ -36,13 +55,11 @@ public class FeignConfiguration { | |
| private final int retryMaxInterval = 5; | ||
| private final String ontapFeignMaxConnection = "80"; | ||
| private final String ontapFeignMaxConnectionPerRoute = "20"; | ||
| private final JsonMapper jsonMapper; | ||
| private final ObjectMapper jsonMapper; | ||
|
|
||
| public FeignConfiguration() { | ||
| this.jsonMapper = JsonMapper.builder() | ||
| .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) | ||
| .findAndAddModules() | ||
| .build(); | ||
| this.jsonMapper = new ObjectMapper(); | ||
| this.jsonMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); | ||
| } | ||
|
|
||
| public Client createClient() { | ||
|
|
@@ -120,16 +137,36 @@ public Decoder createDecoder() { | |
| @Override | ||
| public Object decode(Response response, Type type) throws IOException, DecodeException { | ||
| if (response.body() == null) { | ||
| logger.debug("Response body is null, returning null"); | ||
| return null; | ||
| } | ||
| String json = null; | ||
| try (InputStream bodyStream = response.body().asInputStream()) { | ||
| json = new String(bodyStream.readAllBytes(), StandardCharsets.UTF_8); | ||
| logger.debug("Decoding JSON response: {}", json); | ||
| return jsonMapper.readValue(json, jsonMapper.getTypeFactory().constructType(type)); | ||
| Object result = null; | ||
| try { | ||
| var javaType = jsonMapper.getTypeFactory().constructType(type); | ||
| result = jsonMapper.readValue(json, javaType); | ||
| logger.debug("jsonMapper.readValue() completed successfully"); | ||
| } catch (Throwable ex) { | ||
| logger.error("EXCEPTION in jsonMapper.readValue()! Type: {}, Message: {}", ex.getClass().getName(), ex.getMessage(), ex); | ||
|
||
| throw ex; | ||
| } | ||
|
|
||
| if (result == null) { | ||
| logger.warn("Decoded result is null!"); | ||
| } else { | ||
| logger.debug("Successfully decoded to object of type: {}", result.getClass().getName()); | ||
| } | ||
| logger.debug("Returning result from decoder"); | ||
| return result; | ||
| } catch (IOException e) { | ||
| logger.error("Error decoding JSON response. Status: {}, Raw body: {}", response.status(), json, e); | ||
| logger.error("IOException during decoding. Status: {}, Raw body: {}", response.status(), json, e); | ||
| throw new DecodeException(response.status(), "Error decoding JSON response", response.request(), e); | ||
| } catch (Exception e) { | ||
| logger.error("Unexpected error during decoding. Status: {}, Type: {}, Raw body: {}", response.status(), type, json, e); | ||
| throw new DecodeException(response.status(), "Unexpected error during decoding", response.request(), e); | ||
| } | ||
| } | ||
| }; | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.