The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6946

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===

From 5f7dd11bb4a8d59ec3601992f6db2cb78f756170 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 27 Feb 2020 10:07:37 +0000
Subject: [PATCH 1/9] lxd/main/init: Removes legacy storage drivers from
 availableStorageDrivers

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/main_init.go | 9 +--------
 1 file changed, 1 insertion(+), 8 deletions(-)

diff --git a/lxd/main_init.go b/lxd/main_init.go
index 00a4a574a5..d757277926 100644
--- a/lxd/main_init.go
+++ b/lxd/main_init.go
@@ -198,18 +198,11 @@ func (c *cmdInit) availableStorageDrivers(poolType 
string) []string {
                        continue
                }
 
-               // Check if available as a new style driver.
+               // Check if available as a driver.
                if shared.StringInSlice(driver, availableDrivers) {
                        drivers = append(drivers, driver)
                        continue
                }
-
-               // Check if available as an old style driver.
-               _, err := storageCoreInit(driver)
-               if err == nil {
-                       drivers = append(drivers, driver)
-                       continue
-               }
        }
 
        return drivers

From 8c3059aa6e3d7e97e6aba265d9dc265e33224900 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 27 Feb 2020 10:08:25 +0000
Subject: [PATCH 2/9] lxd/patches: Updates patchStorageApiPermissions to use
 new storage drivers

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/patches.go | 31 ++++++++++++++++++++-----------
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/lxd/patches.go b/lxd/patches.go
index 3c2c44c73a..ec51b9f995 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -3023,22 +3023,31 @@ func patchStorageApiPermissions(name string, d *Daemon) 
error {
                }
 
                for _, vol := range volumes {
-                       volStruct, err := storagePoolVolumeInit(d.State(), 
"default", poolName, vol, storagePoolVolumeTypeCustom)
+                       pool, err := storagePools.GetPoolByName(d.State(), 
poolName)
                        if err != nil {
                                return err
                        }
 
-                       ourMount, err := volStruct.StoragePoolVolumeMount()
-                       if err != nil {
-                               return err
-                       }
-                       if ourMount {
-                               defer volStruct.StoragePoolVolumeUmount()
-                       }
+                       // Run task in anonymous function so as not to stack up 
defers.
+                       err = func() error {
+                               ourMount, err := pool.MountCustomVolume(vol, 
nil)
+                               if err != nil {
+                                       return err
+                               }
 
-                       cuMntPoint := 
driver.GetStoragePoolVolumeMountPoint(poolName, vol)
-                       err = os.Chmod(cuMntPoint, 0711)
-                       if err != nil && !os.IsNotExist(err) {
+                               if ourMount {
+                                       defer pool.UnmountCustomVolume(vol, nil)
+                               }
+
+                               cuMntPoint := 
driver.GetStoragePoolVolumeMountPoint(poolName, vol)
+                               err = os.Chmod(cuMntPoint, 0711)
+                               if err != nil && !os.IsNotExist(err) {
+                                       return err
+                               }
+
+                               return nil
+                       }()
+                       if err != nil {
                                return err
                        }
                }

From beca714e331b8d73c5703a7bd31a647e1ff2f546 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 27 Feb 2020 10:08:57 +0000
Subject: [PATCH 3/9] lxd/storage: Removes storageCoreInit function

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/storage.go | 26 --------------------------
 1 file changed, 26 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 223064ed1e..c28012c155 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -223,32 +223,6 @@ type storage interface {
        StorageMigrationSink(conn *websocket.Conn, op *operations.Operation, 
args MigrationSinkArgs) error
 }
 
-func storageCoreInit(driver string) (storage, error) {
-       sType, err := storageStringToType(driver)
-       if err != nil {
-               return nil, err
-       }
-
-       switch sType {
-       case storageTypeCeph:
-               ceph := storageCeph{}
-               err = ceph.StorageCoreInit()
-               if err != nil {
-                       return nil, err
-               }
-               return &ceph, nil
-       case storageTypeMock:
-               mock := storageMock{}
-               err = mock.StorageCoreInit()
-               if err != nil {
-                       return nil, err
-               }
-               return &mock, nil
-       }
-
-       return nil, fmt.Errorf("invalid storage type")
-}
-
 func storageInit(s *state.State, project, poolName, volumeName string, 
volumeType int) (storage, error) {
        // Load the storage pool.
        poolID, pool, err := s.Cluster.StoragePoolGet(poolName)

From 571ec26c841764de76b821c9a5e2bcb413dec78d Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 27 Feb 2020 10:09:14 +0000
Subject: [PATCH 4/9] lxd/storage: Removes legacy drivers from
 storagePoolDriversCacheUpdate

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/storage.go | 12 ------------
 1 file changed, 12 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index c28012c155..03397c3931 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -644,18 +644,6 @@ func storagePoolDriversCacheUpdate(s *state.State) {
                }
        }
 
-       // Handle legacy backends.
-       for _, driver := range drivers {
-               // Initialize a core storage interface for the given driver.
-               sCore, err := storageCoreInit(driver)
-               if err != nil {
-                       continue
-               }
-
-               // Grab the version.
-               data[driver] = sCore.GetStorageTypeVersion()
-       }
-
        // Prepare the cache entries.
        backends := []string{}
        for k, v := range data {

From 1f5ebdc51ffc4058e522e03b8c142afca528a109 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 27 Feb 2020 10:39:51 +0000
Subject: [PATCH 5/9] lxd/patches: Removes old storage layer from
 upgradeFromStorageTypeLvm

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/patches.go | 48 +++++++++++++++++++++++++++---------------------
 1 file changed, 27 insertions(+), 21 deletions(-)

diff --git a/lxd/patches.go b/lxd/patches.go
index ec51b9f995..c69503407d 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -1210,44 +1210,50 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, 
defaultPoolName string, d
                                // means that this was a mixed-storage LXD
                                // instance.
 
-                               // Initialize storage interface for the new
-                               // container.
-                               ctStorage, err := 
storagePoolVolumeContainerLoadInit(d.State(), "default", ct)
+                               // Load the container from the database.
+                               ctStruct, err := 
instance.LoadByProjectAndName(d.State(), "default", ct)
                                if err != nil {
-                                       logger.Errorf("Failed to initialize new 
storage interface for LVM container %s: %s", ct, err)
+                                       logger.Errorf("Failed to load LVM 
container %s: %s", ct, err)
                                        return err
                                }
 
-                               // Load the container from the database.
-                               ctStruct, err := 
instance.LoadByProjectAndName(d.State(), "default", ct)
+                               pool, err := 
storagePools.GetPoolByInstance(d.State(), ctStruct)
                                if err != nil {
-                                       logger.Errorf("Failed to load LVM 
container %s: %s", ct, err)
                                        return err
                                }
 
-                               // Create an empty LVM logical volume for the
-                               // container.
-                               err = ctStorage.ContainerCreate(ctStruct)
+                               // Create an empty LVM logical volume for the 
container.
+                               err = pool.CreateInstance(ctStruct, nil)
                                if err != nil {
                                        logger.Errorf("Failed to create empty 
LVM logical volume for container %s: %s", ct, err)
                                        return err
                                }
 
-                               // In case the new LVM logical volume for the
-                               // container is not mounted mount it.
-                               if !shared.IsMountPoint(newContainerMntPoint) {
-                                       _, err = 
ctStorage.ContainerMount(ctStruct)
+                               err = func() error {
+                                       // In case the new LVM logical volume 
for the container is not mounted mount it.
+                                       if 
!shared.IsMountPoint(newContainerMntPoint) {
+                                               ourMount, err := 
pool.MountInstance(ctStruct, nil)
+                                               if err != nil {
+                                                       logger.Errorf("Failed 
to mount new empty LVM logical volume for container %s: %s", ct, err)
+                                                       return err
+                                               }
+
+                                               if ourMount {
+                                                       defer 
pool.UnmountInstance(ctStruct, nil)
+                                               }
+                                       }
+
+                                       // Use rsync to fill the empty volume.
+                                       output, err := 
rsync.LocalCopy(oldContainerMntPoint, newContainerMntPoint, "", true)
                                        if err != nil {
-                                               logger.Errorf("Failed to mount 
new empty LVM logical volume for container %s: %s", ct, err)
-                                               return err
+                                               pool.DeleteInstance(ctStruct, 
nil)
+                                               return fmt.Errorf("rsync 
failed: %s", string(output))
                                        }
-                               }
 
-                               // Use rsync to fill the empty volume.
-                               output, err := 
rsync.LocalCopy(oldContainerMntPoint, newContainerMntPoint, "", true)
+                                       return nil
+                               }()
                                if err != nil {
-                                       ctStorage.ContainerDelete(ctStruct)
-                                       return fmt.Errorf("rsync failed: %s", 
string(output))
+                                       return err
                                }
 
                                // Remove the old container.

From fba757123b4c94d49f1cc5443ee2bf95e892eeab Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 27 Feb 2020 11:47:30 +0000
Subject: [PATCH 6/9] lxd/container/lxc: Removes some calls to the old storage
 layer

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/container_lxc.go | 48 +++++++++-----------------------------------
 1 file changed, 9 insertions(+), 39 deletions(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index e2827b564e..d538adf7cc 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -3272,22 +3272,12 @@ func (c *containerLXC) Restore(sourceContainer 
instance.Instance, stateful bool)
                }
 
                // Ensure that storage is mounted for state path checks and for 
backup.yaml updates.
-               if pool != nil {
-                       ourStart, err := pool.MountInstance(c, nil)
-                       if err != nil {
-                               return err
-                       }
-                       if ourStart {
-                               defer pool.UnmountInstance(c, nil)
-                       }
-               } else {
-                       ourStart, err := c.mount()
-                       if err != nil {
-                               return err
-                       }
-                       if ourStart {
-                               defer c.unmount()
-                       }
+               ourStart, err := pool.MountInstance(c, nil)
+               if err != nil {
+                       return err
+               }
+               if ourStart {
+                       defer pool.UnmountInstance(c, nil)
                }
        }
 
@@ -3302,17 +3292,9 @@ func (c *containerLXC) Restore(sourceContainer 
instance.Instance, stateful bool)
        logger.Info("Restoring container", ctxMap)
 
        // Restore the rootfs.
-       if pool != nil {
-               err = pool.RestoreInstanceSnapshot(c, sourceContainer, nil)
-               if err != nil {
-                       return err
-               }
-       } else {
-               err = c.storage.ContainerRestore(c, sourceContainer)
-               if err != nil {
-                       logger.Error("Failed restoring container filesystem", 
ctxMap)
-                       return err
-               }
+       err = pool.RestoreInstanceSnapshot(c, sourceContainer, nil)
+       if err != nil {
+               return err
        }
 
        // Restore the configuration.
@@ -4803,12 +4785,6 @@ func (c *containerLXC) Migrate(args *CriuMigrationArgs) 
error {
 
        logger.Info("Migrating container", ctxMap)
 
-       // Initialize storage interface for the container.
-       err = c.initStorage()
-       if err != nil {
-               return err
-       }
-
        prettyCmd := ""
        switch args.cmd {
        case lxc.MIGRATE_PRE_DUMP:
@@ -5667,12 +5643,6 @@ func (c *containerLXC) cpuState() api.InstanceStateCPU {
 func (c *containerLXC) diskState() map[string]api.InstanceStateDisk {
        disk := map[string]api.InstanceStateDisk{}
 
-       // Initialize storage interface for the container.
-       err := c.initStorage()
-       if err != nil {
-               return disk
-       }
-
        for _, dev := range c.expandedDevices.Sorted() {
                if dev.Config["type"] != "disk" {
                        continue

From 8f4795bef87e0de5b32c8d83299c83feb9942750 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 27 Feb 2020 11:47:54 +0000
Subject: [PATCH 7/9] lxd/migrate/container: Removes calls to old storage layer

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/migrate_container.go | 114 +++++++++------------------------------
 1 file changed, 25 insertions(+), 89 deletions(-)

diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index f242b42eb5..70751a70a7 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -432,8 +432,6 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
                return err
        }
 
-       var legacyDriver MigrationStorageSourceDriver
-       var legacyCleanup func()            // Called after migration, to 
remove any temporary snapshots, etc.
        var migrationTypes []migration.Type // Negotiated migration types.
        var rsyncBwlimit string             // Used for CRIU state and legacy 
storage rsync transfers.
 
@@ -454,10 +452,6 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
        // the purpose of using defer. An abort function reduces the odds of 
mishandling errors
        // without introducing the fragility of closing on err.
        abort := func(err error) error {
-               if legacyCleanup != nil {
-                       legacyCleanup()
-               }
-
                go s.sendControl(err)
                return err
        }
@@ -469,71 +463,27 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
        // Indicate this info to the storage driver so that it can alter its 
behaviour if needed.
        volSourceArgs.MultiSync = s.live || (respHeader.Criu != nil && 
*respHeader.Criu == migration.CRIUType_NONE)
 
-       if pool != nil {
-               rsyncBwlimit = pool.Driver().Config()["rsync.bwlimit"]
-               migrationTypes, err = migration.MatchTypes(respHeader, 
migration.MigrationFSType_RSYNC, poolMigrationTypes)
-               if err != nil {
-                       logger.Errorf("Failed to negotiate migration type: %v", 
err)
-                       return abort(err)
-               }
-
-               sendSnapshotNames := snapshotNames
-
-               // If we are in refresh mode, only send the snapshots the 
target has asked for.
-               if respHeader.GetRefresh() {
-                       sendSnapshotNames = respHeader.GetSnapshotNames()
-               }
-
-               volSourceArgs.Name = s.instance.Name()
-               volSourceArgs.MigrationType = migrationTypes[0]
-               volSourceArgs.Snapshots = sendSnapshotNames
-               volSourceArgs.TrackProgress = true
-               err = pool.MigrateInstance(s.instance, 
&shared.WebsocketIO{Conn: s.fsConn}, volSourceArgs, migrateOp)
-               if err != nil {
-                       return abort(err)
-               }
-       } else {
-               // Handle zfs options.
-               zfsFeatures := respHeader.GetZfsFeaturesSlice()
-
-               // Set source args.
-               sourceArgs := MigrationSourceArgs{
-                       Instance:      s.instance,
-                       InstanceOnly:  s.instanceOnly,
-                       RsyncFeatures: rsyncFeatures,
-                       ZfsFeatures:   zfsFeatures,
-               }
-
-               // Initialize storage driver.
-               legacyDriver, err = ct.Storage().MigrationSource(sourceArgs)
-               if err != nil {
-                       return abort(err)
-               }
-               legacyCleanup = legacyDriver.Cleanup
+       rsyncBwlimit = pool.Driver().Config()["rsync.bwlimit"]
+       migrationTypes, err = migration.MatchTypes(respHeader, 
migration.MigrationFSType_RSYNC, poolMigrationTypes)
+       if err != nil {
+               logger.Errorf("Failed to negotiate migration type: %v", err)
+               return abort(err)
+       }
 
-               if respHeader.GetRefresh() || *offerHeader.Fs != *respHeader.Fs 
{
-                       myType := migration.MigrationFSType_RSYNC
-                       respHeader.Fs = &myType
+       sendSnapshotNames := snapshotNames
 
-                       if respHeader.GetRefresh() {
-                               legacyDriver, _ = 
rsyncRefreshSource(respHeader.GetSnapshotNames(), sourceArgs)
-                       } else {
-                               legacyDriver, _ = 
rsyncMigrationSource(sourceArgs)
-                       }
-
-                       // Check if this storage pool has a rate limit set for 
rsync.
-                       poolwritable := ct.Storage().GetStoragePoolWritable()
-                       if poolwritable.Config != nil {
-                               rsyncBwlimit = 
poolwritable.Config["rsync.bwlimit"]
-                       }
-               }
+       // If we are in refresh mode, only send the snapshots the target has 
asked for.
+       if respHeader.GetRefresh() {
+               sendSnapshotNames = respHeader.GetSnapshotNames()
+       }
 
-               logger.Debugf("SendWhileRunning starting")
-               err = legacyDriver.SendWhileRunning(s.fsConn, migrateOp, 
rsyncBwlimit, s.instanceOnly)
-               if err != nil {
-                       return abort(err)
-               }
-               logger.Debugf("SendWhileRunning finished")
+       volSourceArgs.Name = s.instance.Name()
+       volSourceArgs.MigrationType = migrationTypes[0]
+       volSourceArgs.Snapshots = sendSnapshotNames
+       volSourceArgs.TrackProgress = true
+       err = pool.MigrateInstance(s.instance, &shared.WebsocketIO{Conn: 
s.fsConn}, volSourceArgs, migrateOp)
+       if err != nil {
+               return abort(err)
        }
 
        restoreSuccess := make(chan bool, 1)
@@ -716,31 +666,17 @@ func (s *migrationSourceWs) Do(state *state.State, 
migrateOp *operations.Operati
 
        // Perform final sync if in multi sync mode.
        if volSourceArgs.MultiSync {
-               if pool != nil {
-                       // Indicate to the storage driver we are doing final 
sync and because of this don't send
-                       // snapshots as they don't need to have a final sync as 
not being modified.
-                       volSourceArgs.FinalSync = true
-                       volSourceArgs.Snapshots = nil
+               // Indicate to the storage driver we are doing final sync and 
because of this don't send
+               // snapshots as they don't need to have a final sync as not 
being modified.
+               volSourceArgs.FinalSync = true
+               volSourceArgs.Snapshots = nil
 
-                       err = pool.MigrateInstance(s.instance, 
&shared.WebsocketIO{Conn: s.fsConn}, volSourceArgs, migrateOp)
-                       if err != nil {
-                               return abort(err)
-                       }
-               } else {
-                       logger.Debugf("SendAfterCheckpoint starting")
-                       err = legacyDriver.SendAfterCheckpoint(s.fsConn, 
rsyncBwlimit)
-                       if err != nil {
-                               return abort(err)
-                       }
-                       logger.Debugf("SendAfterCheckpoint finished")
+               err = pool.MigrateInstance(s.instance, 
&shared.WebsocketIO{Conn: s.fsConn}, volSourceArgs, migrateOp)
+               if err != nil {
+                       return abort(err)
                }
        }
 
-       // Perform any storage level cleanup, such as removing any temporary 
snapshots.
-       if legacyCleanup != nil {
-               legacyCleanup()
-       }
-
        msg := migration.MigrationControl{}
        err = s.recv(&msg)
        if err != nil {

From 0bffb5e91b3a5bb7ee01adc239b517c737ed4e89 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 27 Feb 2020 11:48:12 +0000
Subject: [PATCH 8/9] lxd/migrate/storage/volumes: Removes calls to old storage
 layer

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/migrate_storage_volumes.go | 85 +++++++---------------------------
 1 file changed, 16 insertions(+), 69 deletions(-)

diff --git a/lxd/migrate_storage_volumes.go b/lxd/migrate_storage_volumes.go
index fdf17d7f1b..66a959dd85 100644
--- a/lxd/migrate_storage_volumes.go
+++ b/lxd/migrate_storage_volumes.go
@@ -108,77 +108,24 @@ func (s *migrationSourceWs) DoStorage(state *state.State, 
poolName string, volNa
        }
 
        // Use new storage layer for migration if supported.
-       if pool != nil {
-               migrationTypes, err := migration.MatchTypes(respHeader, 
migration.MigrationFSType_RSYNC, poolMigrationTypes)
-               if err != nil {
-                       logger.Errorf("Failed to negotiate migration type: %v", 
err)
-                       s.sendControl(err)
-                       return err
-               }
-
-               volSourceArgs := &migration.VolumeSourceArgs{
-                       Name:          volName,
-                       MigrationType: migrationTypes[0],
-                       Snapshots:     snapshotNames,
-                       TrackProgress: true,
-               }
-
-               err = pool.MigrateCustomVolume(&shared.WebsocketIO{Conn: 
s.fsConn}, volSourceArgs, migrateOp)
-               if err != nil {
-                       go s.sendControl(err)
-                       return err
-               }
-       } else {
-               // Use legacy storage layer for migration.
-
-               // Get target's rsync options.
-               rsyncFeatures := respHeader.GetRsyncFeaturesSlice()
-               if !shared.StringInSlice("bidirectional", rsyncFeatures) {
-                       // If no bi-directional support, assume LXD 3.7 level
-                       // NOTE: Do NOT extend this list of arguments
-                       rsyncFeatures = []string{"xattrs", "delete", "compress"}
-               }
-
-               // Get target's zfs options.
-               zfsFeatures := respHeader.GetZfsFeaturesSlice()
-
-               // Set source args
-               sourceArgs := MigrationSourceArgs{
-                       RsyncFeatures: rsyncFeatures,
-                       ZfsFeatures:   zfsFeatures,
-                       VolumeOnly:    s.volumeOnly,
-               }
-
-               driver, fsErr := s.storage.StorageMigrationSource(sourceArgs)
-               if fsErr != nil {
-                       logger.Errorf("Failed to initialize new storage volume 
migration driver")
-                       s.sendControl(fsErr)
-                       return fsErr
-               }
-
-               bwlimit := ""
-
-               if *offerHeader.Fs != *respHeader.Fs {
-                       driver, _ = rsyncStorageMigrationSource(sourceArgs)
-
-                       // Check if this storage pool has a rate limit set for 
rsync.
-                       poolwritable := s.storage.GetStoragePoolWritable()
-                       if poolwritable.Config != nil {
-                               bwlimit = poolwritable.Config["rsync.bwlimit"]
-                       }
-               }
+       migrationTypes, err := migration.MatchTypes(respHeader, 
migration.MigrationFSType_RSYNC, poolMigrationTypes)
+       if err != nil {
+               logger.Errorf("Failed to negotiate migration type: %v", err)
+               s.sendControl(err)
+               return err
+       }
 
-               abort := func(err error) error {
-                       driver.Cleanup()
-                       go s.sendControl(err)
-                       return err
-               }
+       volSourceArgs := &migration.VolumeSourceArgs{
+               Name:          volName,
+               MigrationType: migrationTypes[0],
+               Snapshots:     snapshotNames,
+               TrackProgress: true,
+       }
 
-               err = driver.SendStorageVolume(s.fsConn, migrateOp, bwlimit, 
s.storage, s.volumeOnly)
-               if err != nil {
-                       logger.Errorf("Failed to send storage volume")
-                       return abort(err)
-               }
+       err = pool.MigrateCustomVolume(&shared.WebsocketIO{Conn: s.fsConn}, 
volSourceArgs, migrateOp)
+       if err != nil {
+               go s.sendControl(err)
+               return err
        }
 
        msg := migration.MigrationControl{}

From 55f2b700c9a4692407d5b4d4955a5a130e6e663f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 27 Feb 2020 11:48:39 +0000
Subject: [PATCH 9/9] lxd/patches: Switches upgradeFromStorageTypeLvm to use
 new storage layer

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/patches.go | 68 +++++++++++++++++++++++++++++---------------------
 1 file changed, 40 insertions(+), 28 deletions(-)

diff --git a/lxd/patches.go b/lxd/patches.go
index c69503407d..bd13a029eb 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -1369,14 +1369,6 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, 
defaultPoolName string, d
                                        // and it means that this was a
                                        // mixed-storage LXD instance.
 
-                                       // Initialize storage interface for the 
new
-                                       // snapshot.
-                                       csStorage, err := 
storagePoolVolumeContainerLoadInit(d.State(), "default", cs)
-                                       if err != nil {
-                                               logger.Errorf("Failed to 
initialize new storage interface for LVM container %s: %s", cs, err)
-                                               return err
-                                       }
-
                                        // Load the snapshot from the database.
                                        csStruct, err := 
instance.LoadByProjectAndName(d.State(), "default", cs)
                                        if err != nil {
@@ -1384,36 +1376,56 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, 
defaultPoolName string, d
                                                return err
                                        }
 
-                                       // Create an empty LVM logical volume
-                                       // for the snapshot.
-                                       err = 
csStorage.ContainerSnapshotCreateEmpty(csStruct)
+                                       pool, err := 
storagePools.GetPoolByInstance(d.State(), csStruct)
                                        if err != nil {
-                                               logger.Errorf("Failed to create 
empty LVM logical volume for container %s: %s", cs, err)
                                                return err
                                        }
 
-                                       // In case the new LVM logical volume
-                                       // for the snapshot is not mounted mount
-                                       // it.
-                                       if 
!shared.IsMountPoint(newSnapshotMntPoint) {
-                                               _, err = 
csStorage.ContainerMount(csStruct)
-                                               if err != nil {
-                                                       logger.Errorf("Failed 
to mount new empty LVM logical volume for container %s: %s", cs, err)
-                                                       return err
-                                               }
+                                       parent, _, _ := 
shared.InstanceGetParentAndSnapshotName(csStruct.Name())
+                                       parentInst, err := 
instance.LoadByProjectAndName(d.State(), csStruct.Project(), parent)
+                                       if err != nil {
+                                               logger.Errorf("Failed to load 
parent LVM container %s: %s", cs, err)
+                                               return err
                                        }
 
-                                       // Use rsync to fill the empty volume.
-                                       output, err := 
rsync.LocalCopy(oldSnapshotMntPoint, newSnapshotMntPoint, "", true)
+                                       // Create an empty LVM logical volume 
for the snapshot.
+                                       err = 
pool.CreateInstanceSnapshot(csStruct, parentInst, nil)
                                        if err != nil {
-                                               
csStorage.ContainerDelete(csStruct)
-                                               return fmt.Errorf("rsync 
failed: %s", string(output))
+                                               logger.Errorf("Failed to create 
LVM logical volume snapshot for container %s: %s", cs, err)
+                                               return err
                                        }
 
-                                       // Remove the old snapshot.
-                                       err = os.RemoveAll(oldSnapshotMntPoint)
+                                       err = func() error {
+                                               // In case the new LVM logical 
volume for the snapshot is not mounted mount it.
+                                               if 
!shared.IsMountPoint(newSnapshotMntPoint) {
+                                                       ourMount, err := 
pool.MountInstanceSnapshot(csStruct, nil)
+                                                       if err != nil {
+                                                               
logger.Errorf("Failed to mount new empty LVM logical volume for container %s: 
%s", cs, err)
+                                                               return err
+                                                       }
+
+                                                       if ourMount {
+                                                               defer 
pool.UnmountInstanceSnapshot(csStruct, nil)
+                                                       }
+                                               }
+
+                                               // Use rsync to fill the 
snapshot volume.
+                                               output, err := 
rsync.LocalCopy(oldSnapshotMntPoint, newSnapshotMntPoint, "", true)
+                                               if err != nil {
+                                                       
pool.DeleteInstanceSnapshot(csStruct, nil)
+                                                       return 
fmt.Errorf("rsync failed: %s", string(output))
+                                               }
+
+                                               // Remove the old snapshot.
+                                               err = 
os.RemoveAll(oldSnapshotMntPoint)
+                                               if err != nil {
+                                                       logger.Errorf("Failed 
to remove old container %s: %s", oldSnapshotMntPoint, err)
+                                                       return err
+                                               }
+
+                                               return nil
+                                       }()
                                        if err != nil {
-                                               logger.Errorf("Failed to remove 
old container %s: %s", oldSnapshotMntPoint, err)
                                                return err
                                        }
                                }
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to