rg9975 commented on code in PR #7889: URL: https://github.com/apache/cloudstack/pull/7889#discussion_r1374845375
########## plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java: ########## @@ -0,0 +1,871 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.driver; + +import java.util.Map; +import javax.inject.Inject; +import org.apache.log4j.Logger; + +import java.util.HashMap; +import java.util.List; + +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterConstants; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering; +import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats; +import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStorageStats; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.storage.volume.VolumeObject; +import org.apache.cloudstack.storage.snapshot.SnapshotObject; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.api.to.DiskTO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; +import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.projects.dao.ProjectDao; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.ResizeVolumePayload; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage.ImageFormat; + +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeDetailVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotDetailsDao; +import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.dao.AccountDao; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; + +public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDriverImpl { + + static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreDriverImpl.class); + + private String providerName = null; + + @Inject + AccountManager _accountMgr; + @Inject + DiskOfferingDao _diskOfferingDao; + @Inject + VolumeDao _volumeDao; + @Inject + PrimaryDataStoreDao _storagePoolDao; + @Inject + ProjectDao _projectDao; + @Inject + SnapshotDataStoreDao _snapshotDataStoreDao; + @Inject + SnapshotDetailsDao _snapshotDetailsDao; + @Inject + VolumeDetailsDao _volumeDetailsDao; + @Inject + VMTemplatePoolDao _vmTemplatePoolDao; + @Inject + AccountDao _accountDao; + @Inject + StoragePoolDetailsDao _storagePoolDetailsDao; + @Inject + SnapshotDao _snapshotDao; + @Inject + VMTemplateDao _vmTemplateDao; + @Inject + DataCenterDao _datacenterDao; + @Inject + DomainDao _domainDao; + + private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null; + + public AdaptiveDataStoreDriverImpl(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) { + this._adapterFactoryMap = factoryMap; + } + + @Override + public DataTO getTO(DataObject data) { + return null; + } + + @Override + public DataStoreTO getStoreTO(DataStore store) { + return null; + } + + public ProviderAdapter getAPI(StoragePool pool, Map<String, String> details) { + return _adapterFactoryMap.getAPI(pool.getUuid(), pool.getStorageProviderName(), details); + } + + @Override + public void createAsync(DataStore dataStore, DataObject dataObject, + AsyncCompletionCallback<CreateCmdResult> callback) { + CreateCmdResult result = null; + try { + s_logger.info("Volume creation starting for data store [" + dataStore.getName() + + "] and data object [" + dataObject.getUuid() + "] of type [" + dataObject.getType() + "]"); + + // quota size of the cloudbyte volume will be increased with the given + // HypervisorSnapshotReserve + Long volumeSizeBytes = dataObject.getSize(); + // cloudstack talks bytes, primera talks MiB + StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId()); + Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId()); + + ProviderAdapter api = getAPI(storagePool, details); + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool); + ProviderAdapterDiskOffering inDiskOffering = null; + // only get the offering if its a volume type. If its a template type we skip this. + if (DataObjectType.VOLUME.equals(dataObject.getType())) { + // get the disk offering as provider may need to see details of this to + // provision the correct type of volume + VolumeVO volumeVO = _volumeDao.findById(dataObject.getId()); + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeVO.getDiskOfferingId()); + if (diskOffering.isUseLocalStorage()) { + throw new CloudRuntimeException( + "Disk offering requires local storage but this storage provider does not suppport local storage. Please contact the cloud adminstrator to have the disk offering configuration updated to avoid this conflict."); + } + inDiskOffering = new ProviderAdapterDiskOffering(diskOffering); + } + + // if its a template and it already exist, just return the info -- may mean a previous attempt to + // copy this template failed after volume creation and its state has not advanced yet. + ProviderVolume volume = null; + if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { + volume = api.getVolume(context, dataIn); + if (volume != null) { + s_logger.info("Template volume already exists [" + dataObject.getUuid() + "]"); + } + } + + // create the volume if it didn't already exist + if (volume == null) { + // klunky - if this fails AND this detail property is set, it means upstream may have already created it + // in VolumeService and DataMotionStrategy tries to do it again before copying... + try { + volume = api.create(context, dataIn, inDiskOffering, volumeSizeBytes); + } catch (Exception e) { + VolumeDetailVO csId = _volumeDetailsDao.findDetail(dataObject.getId(), "cloneOfTemplate"); + if (csId != null && csId.getId() > 0) { + volume = api.getVolume(context, dataIn); + } else { + throw e; + } + } + s_logger.info("New volume created on remote storage for [" + dataObject.getUuid() + "]"); + } + + // set these from the discovered or created volume before proceeding + dataIn.setExternalName(volume.getExternalName()); + dataIn.setExternalUuid(volume.getExternalUuid()); + + // add the volume to the host set + String connectionId = api.attach(context, dataIn); + + // update the cloudstack metadata about the volume + persistVolumeOrTemplateData(storagePool, details, dataObject, volume, connectionId); + + result = new CreateCmdResult(dataObject.getUuid(), new Answer(null)); + result.setSuccess(true); + s_logger.info("Volume creation complete for [" + dataObject.getUuid() + "]"); + } catch (Throwable e) { + s_logger.error("Volume creation failed for dataObject [" + dataObject.getUuid() + "]: " + e.toString(), e); + result = new CreateCmdResult(null, new Answer(null)); + result.setResult(e.toString()); + result.setSuccess(false); + throw new CloudRuntimeException(e.getMessage()); + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void deleteAsync(DataStore dataStore, DataObject dataObject, + AsyncCompletionCallback<CommandResult> callback) { + s_logger.debug("Delete volume started"); + CommandResult result = new CommandResult(); + try { + StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId()); + Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject inData = newManagedDataObject(dataObject, storagePool); + // skip adapter delete if neither external identifier is set. Probably means the volume + // create failed before this chould be set + if (!(inData.getExternalName() == null && inData.getExternalUuid() == null)) { + api.delete(context, inData); + } + result.setResult("Successfully deleted volume"); + result.setSuccess(true); + } catch (Throwable e) { + s_logger.error("Result to volume delete failed with exception", e); + result.setResult(e.toString()); + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void copyAsync(DataObject srcdata, DataObject destdata, + AsyncCompletionCallback<CopyCommandResult> callback) { + CopyCommandResult result = null; + try { + s_logger.info("Copying volume " + srcdata.getUuid() + " to " + destdata.getUuid() + "]"); + + if (!canCopy(srcdata, destdata)) { + throw new CloudRuntimeException( + "The data store provider is unable to perform copy operations because the source or destination object is not the correct type of volume"); + } + + try { + StoragePoolVO storagePool = _storagePoolDao.findById(srcdata.getDataStore().getId()); + Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + s_logger.info("Copy volume " + srcdata.getUuid() + " to " + destdata.getUuid()); + + ProviderVolume outVolume; + ProviderAdapterContext context = newManagedVolumeContext(destdata); + ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool); + ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool); + outVolume = api.copy(context, sourceIn, destIn); + + String connectionId = api.attach(context, destIn); + + String finalPath; + // format: type=fiberwwn; address=<address>; connid=<connid> + if (connectionId != null) { + finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId); + } else { + finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase()); + } + + persistVolumeData(storagePool, details, destdata, outVolume, connectionId); + s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); + + VolumeObjectTO voto = new VolumeObjectTO(); + voto.setPath(finalPath); + + result = new CopyCommandResult(finalPath, new CopyCmdAnswer(voto)); + result.setSuccess(true); + } catch (Throwable e) { + s_logger.error("Result to volume copy failed with exception", e); + result = new CopyCommandResult(null, null); + result.setSuccess(false); + result.setResult(e.toString()); + } + } finally { + if (callback != null) + callback.complete(result); + } + } + + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, + AsyncCompletionCallback<CopyCommandResult> callback) { + copyAsync(srcData, destData, callback); + } + + @Override + public boolean canCopy(DataObject srcData, DataObject destData) { + s_logger.debug("canCopy: Checking srcData [" + srcData.getUuid() + ":" + srcData.getType() + ":" + + srcData.getDataStore().getId() + " AND destData [" + + destData.getUuid() + ":" + destData.getType() + ":" + destData.getDataStore().getId() + "]"); + try { + if (!isSameProvider(srcData)) { // TODO: change to generic + s_logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!"); + return false; + } + + if (!isSameProvider(destData)) { + s_logger.debug("canCopy: No we can't -- the destination provider is NOT the correct type for this driver!"); + return false; + } + s_logger.debug( + "canCopy: Source and destination are the same so we can copy via storage endpoint, checking that the source actually exists"); + StoragePoolVO poolVO = _storagePoolDao.findById(srcData.getDataStore().getId()); + Map<String, String> details = _storagePoolDao.getDetails(srcData.getDataStore().getId()); + ProviderAdapter api = getAPI(poolVO, details); + + /** + * The storage provider generates its own names for snapshots which we store and + * retrieve when needed + */ + ProviderAdapterContext context = newManagedVolumeContext(srcData); + ProviderAdapterDataObject srcDataObject = newManagedDataObject(srcData, poolVO); + if (srcData instanceof SnapshotObject) { + ProviderSnapshot snapshot = api.getSnapshot(context, srcDataObject); + if (snapshot == null) { + return false; + } else { + return true; + } + } else { + ProviderVolume vol = api.getVolume(context, srcDataObject); + if (vol == null) { + return false; + } else { + return true; + } + } + } catch (Throwable e) { + s_logger.warn("Problem checking if we canCopy", e); + return false; + } + } + + @Override + public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) { Review Comment: In the case of raw over fiberchannel, we did not find this to be the case. the guest OS sees changes made to the raw volume on the storage array immediately. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
