The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6535

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
Currently this is a WIP but wanted to check it against jenkin's storage engines.
From 93a308b3f4c462ab6ec544d08e57d6b9da045644 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 29 Nov 2019 17:32:54 +0000
Subject: [PATCH 1/4] lxd/storage/backend/lxd: Fixes comments

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/storage/backend_lxd.go | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index b3578033fc..4aade0a6c1 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -533,7 +533,7 @@ func (b *lxdBackend) CreateInstanceFromCopy(inst 
instance.Instance, src instance
                                Name:          inst.Name(),
                                Snapshots:     snapshotNames,
                                MigrationType: migrationType,
-                               TrackProgress: false, // Do not a progress 
tracker on receiver.
+                               TrackProgress: false, // Do not use a progress 
tracker on receiver.
                        }, op)
 
                        bEndErrCh <- err
@@ -675,7 +675,7 @@ func (b *lxdBackend) RefreshInstance(inst 
instance.Instance, src instance.Instan
                                Snapshots:     snapshotNames,
                                MigrationType: migrationType,
                                Refresh:       true,  // Indicate to receiver 
volume should exist.
-                               TrackProgress: false, // Do not a progress 
tracker on receiver.
+                               TrackProgress: false, // Do not use a progress 
tracker on receiver.
                        }, op)
 
                        bEndErrCh <- err
@@ -1686,7 +1686,7 @@ func (b *lxdBackend) CreateCustomVolumeFromCopy(volName, 
desc string, config map
                        Config:        config,
                        Snapshots:     snapshotNames,
                        MigrationType: migrationType,
-                       TrackProgress: false, // Do not a progress tracker on 
receiver.
+                       TrackProgress: false, // Do not use a progress tracker 
on receiver.
 
                }, op)
 

From b48f2271cfba2676bf3cb1b22b0e318d28d25854 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Mon, 2 Dec 2019 13:45:34 +0000
Subject: [PATCH 2/4] lxd/storage/backend/lxd: Adds symlink and revert support
 to CreateInstanceFromMigration

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/storage/backend_lxd.go | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 4aade0a6c1..2bb15b0fb4 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -835,12 +835,36 @@ func (b *lxdBackend) CreateInstanceFromMigration(inst 
instance.Instance, conn io
        volStorageName := project.Prefix(inst.Project(), args.Name)
 
        vol := b.newVolume(volType, contentType, volStorageName, args.Config)
+
+       revert := true
+       if !args.Refresh {
+               defer func() {
+                       if !revert {
+                               return
+                       }
+                       b.DeleteInstance(inst, op)
+               }()
+       }
+
        err = b.driver.CreateVolumeFromMigration(vol, conn, args, op)
        if err != nil {
                conn.Close()
                return err
        }
 
+       err = b.ensureInstanceSymlink(inst.Type(), inst.Project(), inst.Name(), 
vol.MountPath())
+       if err != nil {
+               return err
+       }
+
+       if len(args.Snapshots) > 0 {
+               err = b.ensureInstanceSnapshotSymlink(inst.Type(), 
inst.Project(), inst.Name())
+               if err != nil {
+                       return err
+               }
+       }
+
+       revert = false
        return nil
 }
 

From 59d7c3840f47a65629de07655002e9ccb4c1c0b8 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Mon, 2 Dec 2019 13:46:40 +0000
Subject: [PATCH 3/4] lxd/migrate/container: Links migrationSink.Do to new
 storage pkg

- Moves instance snapshot DB record creation into generic migration layer and 
out of storage layer.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/migrate_container.go | 79 +++++++++++++++++++++++++++++++++++++++-
 1 file changed, 78 insertions(+), 1 deletion(-)

diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 6ada2233a0..bd2597999e 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -22,6 +22,8 @@ import (
        "github.com/lxc/lxd/lxd/operations"
        "github.com/lxc/lxd/lxd/rsync"
        "github.com/lxc/lxd/lxd/state"
+       storagePools "github.com/lxc/lxd/lxd/storage"
+       storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
        "github.com/lxc/lxd/lxd/util"
        "github.com/lxc/lxd/shared"
        "github.com/lxc/lxd/shared/api"
@@ -861,7 +863,82 @@ func (c *migrationSink) Do(state *state.State, migrateOp 
*operations.Operation)
        // The migration header to be sent back to source with our target 
options.
        var respHeader migration.MigrationHeader
 
-       if c.src.instance.Type() == instancetype.Container {
+       pool, err := storagePools.GetPoolByInstance(state, c.src.instance)
+       if err != storageDrivers.ErrUnknownDriver && err != 
storageDrivers.ErrNotImplemented {
+               if err != nil {
+                       return err
+               }
+
+               // Extract the source's migration type and then match it 
against our pool's
+               // supported types and features. If a match is found the 
combined features list
+               // will be sent back to requester.
+               respType, err := migration.MatchTypes(offerHeader, 
migration.MigrationFSType_RSYNC, 
pool.MigrationTypes(storagePools.InstanceContentType(c.src.instance)))
+               if err != nil {
+                       return err
+               }
+
+               // Convert response type to response header and copy snapshot 
info into it.
+               respHeader = migration.TypesToHeader(respType)
+               respHeader.SnapshotNames = offerHeader.SnapshotNames
+               respHeader.Snapshots = offerHeader.Snapshots
+
+               // Translate the legacy MigrationSinkArgs to a VolumeTargetArgs 
suitable for use
+               // with the new storage layer.
+               myTarget = func(conn *websocket.Conn, op *operations.Operation, 
args MigrationSinkArgs) error {
+                       volTargetArgs := migration.VolumeTargetArgs{
+                               Name:          args.Instance.Name(),
+                               MigrationType: respType,
+                               Refresh:       args.Refresh, // Indicate to 
receiver volume should exist.
+                               TrackProgress: false,        // Do not use a 
progress tracker on receiver.
+                       }
+
+                       // At this point we have already figured out the parent 
container's root
+                       // disk device so we can simply retrieve it from the 
expanded devices.
+                       parentStoragePool := ""
+                       parentExpandedDevices := args.Instance.ExpandedDevices()
+                       parentLocalRootDiskDeviceKey, 
parentLocalRootDiskDevice, _ := 
shared.GetRootDiskDevice(parentExpandedDevices.CloneNative())
+                       if parentLocalRootDiskDeviceKey != "" {
+                               parentStoragePool = 
parentLocalRootDiskDevice["pool"]
+                       }
+
+                       if parentStoragePool == "" {
+                               return fmt.Errorf("Instance's root device is 
missing the pool property")
+                       }
+
+                       // A zero length Snapshots slice indicates volume only 
migration in
+                       // VolumeTargetArgs. So if VoluneOnly was requested, do 
not populate them.
+                       if !args.VolumeOnly {
+                               volTargetArgs.Snapshots = make([]string, 0, 
len(args.Snapshots))
+                               for _, snap := range args.Snapshots {
+                                       volTargetArgs.Snapshots = 
append(volTargetArgs.Snapshots, *snap.Name)
+                                       snapArgs := 
snapshotProtobufToInstanceArgs(args.Instance.Project(), args.Instance.Name(), 
snap)
+
+                                       // Ensure that snapshot and parent 
container have the same
+                                       // storage pool in their local root 
disk device. If the root
+                                       // disk device for the snapshot comes 
from a profile on the
+                                       // new instance as well we don't need 
to do anything.
+                                       if snapArgs.Devices != nil {
+                                               snapLocalRootDiskDeviceKey, _, 
_ := shared.GetRootDiskDevice(snapArgs.Devices.CloneNative())
+                                               if snapLocalRootDiskDeviceKey 
!= "" {
+                                                       
snapArgs.Devices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
+                                               }
+                                       }
+
+                                       // Try and a load instance.
+                                       _, err := 
instanceLoadByProjectAndName(args.Instance.DaemonState(), 
args.Instance.Project(), snapArgs.Name)
+                                       if err != nil {
+                                               // Create the snapshot as it 
doesn't seem to exist.
+                                               _, err := 
instanceCreateInternal(state, snapArgs)
+                                               if err != nil {
+                                                       return err
+                                               }
+                                       }
+                               }
+                       }
+
+                       return pool.CreateInstanceFromMigration(args.Instance, 
&shared.WebsocketIO{Conn: conn}, volTargetArgs, op)
+               }
+       } else if c.src.instance.Type() == instancetype.Container {
                ct := c.src.instance.(*containerLXC)
                myTarget = ct.Storage().MigrationSink
                myType := ct.Storage().MigrationType()

From fd76f5853a6dc7d3fd33253f45d0e26ab1644055 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Mon, 2 Dec 2019 13:48:21 +0000
Subject: [PATCH 4/4] lxd/containers/post: Links createFromMigration to new
 storage pkg

- Only creates instance DB records and doesn't create storage volumes (leaves 
this to the storage layer).

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/containers_post.go | 107 ++++++++++++++++++++++-------------------
 1 file changed, 58 insertions(+), 49 deletions(-)

diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index 43bf9ee95c..071cd057b2 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -312,77 +312,87 @@ func createFromMigration(d *Daemon, project string, req 
*api.InstancesPost) resp
                }
        }()
 
+       instanceOnly := req.Source.InstanceOnly || req.Source.ContainerOnly
+
        if !req.Source.Refresh {
-               /* Only create a container from an image if we're going to
-                * rsync over the top of it. In the case of a better file
-                * transfer mechanism, let's just use that.
-                *
-                * TODO: we could invent some negotiation here, where if the
-                * source and sink both have the same image, we can clone from
-                * it, but we have to know before sending the snapshot that
-                * we're sending the whole thing or just a delta from the
-                * image, so one extra negotiation round trip is needed. An
-                * alternative is to move actual container object to a later
-                * point and just negotiate it over the migration control
-                * socket. Anyway, it'll happen later :)
-                */
-               _, _, err = d.cluster.ImageGet(args.Project, 
req.Source.BaseImage, false, true)
-               if err != nil {
-                       inst, err = instanceCreateAsEmpty(d, args)
+               // Check if we can load new storage layer for pool driver type.
+               _, err := storagePools.GetPoolByName(d.State(), storagePool)
+               if err != storageDrivers.ErrUnknownDriver {
                        if err != nil {
                                return response.InternalError(err)
                        }
-               } else {
-                       // Retrieve the future storage pool.
-                       tmpInst, err := instanceLoad(d.State(), args, nil)
-                       if err != nil {
-                               return response.InternalError(err)
+
+                       // Get preferred migration type from storage backend.
+                       // tomp TODO add optimised migration support from 
existing image.
+                       /*migrationTypes := 
pool.MigrationTypes(storagePools.InstanceContentType(tmpInst))
+                       if len(migrationTypes) > 0 {
+                               migrationType = migrationTypes[0].FSType
                        }
+                       */
 
-                       _, rootDiskDevice, err := 
shared.GetRootDiskDevice(tmpInst.ExpandedDevices().CloneNative())
+                       // Create the instance record.
+                       inst, err = instanceCreateInternal(d.State(), args)
                        if err != nil {
                                return response.InternalError(err)
                        }
-
-                       if rootDiskDevice["pool"] == "" {
-                               return response.BadRequest(fmt.Errorf("The 
container's root device is missing the pool property"))
-                       }
-
-                       storagePool = rootDiskDevice["pool"]
-
-                       var migrationType migration.MigrationFSType
-
-                       // Check if we can load new storage layer for pool 
driver type.
-                       pool, err := storagePools.GetPoolByName(d.State(), 
storagePool)
-                       if err != storageDrivers.ErrUnknownDriver {
+               } else {
+                       /* Only create a container from an image if we're going 
to
+                        * rsync over the top of it. In the case of a better 
file
+                        * transfer mechanism, let's just use that.
+                        *
+                        * TODO: we could invent some negotiation here, where 
if the
+                        * source and sink both have the same image, we can 
clone from
+                        * it, but we have to know before sending the snapshot 
that
+                        * we're sending the whole thing or just a delta from 
the
+                        * image, so one extra negotiation round trip is 
needed. An
+                        * alternative is to move actual container object to a 
later
+                        * point and just negotiate it over the migration 
control
+                        * socket. Anyway, it'll happen later :)
+                        */
+                       _, _, err = d.cluster.ImageGet(args.Project, 
req.Source.BaseImage, false, true)
+                       if err != nil {
+                               inst, err = instanceCreateAsEmpty(d, args)
                                if err != nil {
                                        return response.InternalError(err)
                                }
-
-                               // Get preferred migration type from storage 
backend.
-                               migrationTypes := 
pool.MigrationTypes(storagePools.InstanceContentType(tmpInst))
-                               if len(migrationTypes) > 0 {
-                                       migrationType = migrationTypes[0].FSType
-                               }
                        } else {
-                               ps, err := storagePoolInit(d.State(), 
storagePool)
+                               // Retrieve the future storage pool.
+                               tmpInst, err := instanceLoad(d.State(), args, 
nil)
                                if err != nil {
                                        return response.InternalError(err)
                                }
 
-                               migrationType = ps.MigrationType()
-                       }
-
-                       if migrationType == migration.MigrationFSType_RSYNC {
-                               inst, err = instanceCreateFromImage(d, args, 
req.Source.BaseImage, nil)
+                               _, rootDiskDevice, err := 
shared.GetRootDiskDevice(tmpInst.ExpandedDevices().CloneNative())
                                if err != nil {
                                        return response.InternalError(err)
                                }
-                       } else {
-                               inst, err = instanceCreateAsEmpty(d, args)
+
+                               if rootDiskDevice["pool"] == "" {
+                                       return 
response.BadRequest(fmt.Errorf("The container's root device is missing the pool 
property"))
+                               }
+
+                               storagePool = rootDiskDevice["pool"]
+
+                               var migrationType migration.MigrationFSType
+
+                               ps, err := storagePoolInit(d.State(), 
storagePool)
                                if err != nil {
                                        return response.InternalError(err)
                                }
+
+                               migrationType = ps.MigrationType()
+
+                               if migrationType == 
migration.MigrationFSType_RSYNC {
+                                       inst, err = instanceCreateFromImage(d, 
args, req.Source.BaseImage, nil)
+                                       if err != nil {
+                                               return 
response.InternalError(err)
+                                       }
+                               } else {
+                                       inst, err = instanceCreateAsEmpty(d, 
args)
+                                       if err != nil {
+                                               return 
response.InternalError(err)
+                                       }
+                               }
                        }
                }
        }
@@ -410,7 +420,6 @@ func createFromMigration(d *Daemon, project string, req 
*api.InstancesPost) resp
                push = true
        }
 
-       instanceOnly := req.Source.InstanceOnly || req.Source.ContainerOnly
        migrationArgs := MigrationSinkArgs{
                Url: req.Source.Operation,
                Dialer: websocket.Dialer{
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to