The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/7080

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
Adds some changes that were required for VM backup support that could also be applied to non-VM backups too:

- Adds `shared.CompressedTarReader` function that avoids the repetition of setting up a tar reader stream.
- Switches BTRFS optimized backup restore to use tar reader (avoiding the need for temporary directory unpack).
- Adds `instancewriter.WriteFileFromReader` function that allows streaming a file into a tarball (avoiding temporary file creation).
- Switches index.yaml file write to use `instancewriter.WriteFileFromReader`.
- Renames `db.ContainerID` function to `InstanceID`.
From b9a3fedcb9ea692d129ca935be2a974d9114cd65 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Wed, 25 Mar 2020 11:53:28 +0000
Subject: [PATCH 01/16] lxc/storage/drivers/driver/btrfs/volumes:
 CreateVolumeFromBackup to use tar reader for optimized volume restore

Avoids the need to unpack the tarball to a temporary location.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/storage/drivers/driver_btrfs_volumes.go | 82 ++++++++++-----------
 1 file changed, 38 insertions(+), 44 deletions(-)

diff --git a/lxd/storage/drivers/driver_btrfs_volumes.go 
b/lxd/storage/drivers/driver_btrfs_volumes.go
index 97d6773305..e1db35fe2d 100644
--- a/lxd/storage/drivers/driver_btrfs_volumes.go
+++ b/lxd/storage/drivers/driver_btrfs_volumes.go
@@ -1,6 +1,7 @@
 package drivers
 
 import (
+       "context"
        "fmt"
        "io"
        "io/ioutil"
@@ -119,39 +120,45 @@ func (d *btrfs) CreateVolumeFromBackup(vol Volume, 
snapshots []string, srcData i
                // And lastly the main volume.
                d.DeleteVolume(vol, op)
        }
-
        // Only execute the revert function if we have had an error internally.
        revert.Add(revertHook)
 
-       // Create a temporary directory to unpack the backup into.
-       unpackDir, err := ioutil.TempDir(GetVolumeMountPath(d.name, 
vol.volType, ""), "backup.")
-       if err != nil {
-               return nil, nil, errors.Wrapf(err, "Failed to create temporary 
directory under '%s'", GetVolumeMountPath(d.name, vol.volType, ""))
-       }
-       defer os.RemoveAll(unpackDir)
+       // Define function to unpack a volume from a backup tarball file.
+       unpackVolume := func(r io.ReadSeeker, unpacker []string, srcFile 
string, mountPath string) error {
+               d.Logger().Debug("Unpacking optimized volume", 
log.Ctx{"source": srcFile, "target": mountPath})
+               tr, cancelFunc, err := 
shared.CompressedTarReader(context.Background(), r, unpacker)
+               if err != nil {
+                       return err
+               }
+               defer cancelFunc()
 
-       err = os.Chmod(unpackDir, 0100)
-       if err != nil {
-               return nil, nil, errors.Wrapf(err, "Failed to chmod '%s'", 
unpackDir)
-       }
+               for {
+                       hdr, err := tr.Next()
+                       if err == io.EOF {
+                               break // End of archive
+                       }
+                       if err != nil {
+                               return err
+                       }
 
-       // Find the compression algorithm used for backup source data.
-       srcData.Seek(0, 0)
-       tarArgs, _, _, err := shared.DetectCompressionFile(srcData)
-       if err != nil {
-               return nil, nil, err
-       }
+                       if hdr.Name == srcFile {
+                               // Extract the backup.
+                               err = shared.RunCommandWithFds(tr, nil, 
"btrfs", "receive", "-e", mountPath)
+                               if err != nil {
+                                       return err
+                               }
+
+                               cancelFunc()
+                               return nil
+                       }
+               }
 
-       // Prepare tar arguments.
-       args := append(tarArgs, []string{
-               "-",
-               "--strip-components=1",
-               "-C", unpackDir, "backup",
-       }...)
+               return fmt.Errorf("Could not find %q", srcFile)
+       }
 
-       // Unpack the backup.
+       // Find the compression algorithm used for backup source data.
        srcData.Seek(0, 0)
-       err = shared.RunCommandWithFds(srcData, nil, "tar", args...)
+       _, _, unpacker, err := shared.DetectCompressionFile(srcData)
        if err != nil {
                return nil, nil, err
        }
@@ -167,39 +174,26 @@ func (d *btrfs) CreateVolumeFromBackup(vol Volume, 
snapshots []string, srcData i
        // Restore backups from oldest to newest.
        snapshotsDir := GetVolumeSnapshotDir(d.name, vol.volType, vol.name)
        for _, snapName := range snapshots {
-               // Open the backup.
-               feeder, err := os.Open(filepath.Join(unpackDir, "snapshots", 
fmt.Sprintf("%s.bin", snapName)))
-               if err != nil {
-                       return nil, nil, errors.Wrapf(err, "Failed to open 
'%s'", filepath.Join(unpackDir, "snapshots", fmt.Sprintf("%s.bin", snapName)))
-               }
-               defer feeder.Close()
-
-               // Extract the backup.
-               err = shared.RunCommandWithFds(feeder, nil, "btrfs", "receive", 
"-e", snapshotsDir)
+               err = unpackVolume(srcData, unpacker, 
fmt.Sprintf("backup/snapshots/%s.bin", snapName), snapshotsDir)
                if err != nil {
                        return nil, nil, err
                }
        }
 
-       // Open the backup.
-       feeder, err := os.Open(filepath.Join(unpackDir, "container.bin"))
+       // Create a temporary directory to unpack the backup into.
+       unpackDir, err := ioutil.TempDir(GetVolumeMountPath(d.name, 
vol.volType, ""), "backup.")
        if err != nil {
-               return nil, nil, errors.Wrapf(err, "Failed to open '%s'", 
filepath.Join(unpackDir, "container.bin"))
+               return nil, nil, errors.Wrapf(err, "Failed to create temporary 
directory under '%s'", GetVolumeMountPath(d.name, vol.volType, ""))
        }
-       defer feeder.Close()
+       defer os.RemoveAll(unpackDir)
 
-       // Extrack the backup.
-       err = shared.RunCommandWithFds(feeder, nil, "btrfs", "receive", "-e", 
unpackDir)
+       err = unpackVolume(srcData, unpacker, 
fmt.Sprintf("backup/container.bin"), unpackDir)
        if err != nil {
                return nil, nil, err
        }
-       defer d.deleteSubvolume(filepath.Join(unpackDir, ".backup"), true)
 
        // Re-create the writable subvolume.
        err = d.snapshotSubvolume(filepath.Join(unpackDir, ".backup"), 
vol.MountPath(), false, false)
-       if err != nil {
-               return nil, nil, err
-       }
 
        revert.Success()
        return nil, revertHook, nil

From 6fa271e577b9148d0d4e53d17f3e766dc0636b6d Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 24 Mar 2020 14:18:43 +0000
Subject: [PATCH 02/16] shared/archive: Adds CompressedTarReader function

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 shared/archive.go | 44 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/shared/archive.go b/shared/archive.go
index 2746f60d4c..875874ffdb 100644
--- a/shared/archive.go
+++ b/shared/archive.go
@@ -1,12 +1,16 @@
 package shared
 
 import (
+       "archive/tar"
        "bytes"
+       "context"
        "fmt"
        "io"
        "os"
+       "os/exec"
 )
 
+// DetectCompression detects compression from a file name.
 func DetectCompression(fname string) ([]string, string, []string, error) {
        f, err := os.Open(fname)
        if err != nil {
@@ -54,3 +58,43 @@ func DetectCompressionFile(f io.Reader) ([]string, string, 
[]string, error) {
                return nil, "", nil, fmt.Errorf("Unsupported compression")
        }
 }
+
+// CompressedTarReader returns a tar reader from the supplied (optionally 
compressed) tarball stream.
+// The unpacker arguments are those returned by DetectCompressionFile().
+// The returned cancelFunc should be called when finished with reader to clean 
up any resources used. This can be
+// done before reading to the end of the tarball if desired.
+func CompressedTarReader(ctx context.Context, r io.ReadSeeker, unpacker 
[]string) (*tar.Reader, context.CancelFunc, error) {
+       ctx, cancelFunc := context.WithCancel(ctx)
+
+       r.Seek(0, 0)
+       var tr *tar.Reader
+
+       if len(unpacker) > 0 {
+               cmd := exec.CommandContext(ctx, unpacker[0], unpacker[1:]...)
+               cmd.Stdin = r
+               stdout, err := cmd.StdoutPipe()
+               if err != nil {
+                       return nil, cancelFunc, err
+               }
+
+               err = cmd.Start()
+               if err != nil {
+                       stdout.Close()
+                       return nil, cancelFunc, err
+               }
+
+               // Wait for context and command to finish in go routine so 
reader can return.
+               go func() {
+                       select {
+                       case <-ctx.Done():
+                               cmd.Wait()
+                       }
+               }()
+
+               tr = tar.NewReader(stdout)
+       } else {
+               tr = tar.NewReader(r)
+       }
+
+       return tr, cancelFunc, nil
+}

From b989bf3be50049972d06b06785c83d71e04cb8f9 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 24 Mar 2020 14:19:09 +0000
Subject: [PATCH 03/16] lxd/backup/backup: shared.CompressedTarReader usage

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/backup/backup.go | 33 ++++++---------------------------
 1 file changed, 6 insertions(+), 27 deletions(-)

diff --git a/lxd/backup/backup.go b/lxd/backup/backup.go
index 813ab82157..47be52f619 100644
--- a/lxd/backup/backup.go
+++ b/lxd/backup/backup.go
@@ -1,15 +1,14 @@
 package backup
 
 import (
-       "archive/tar"
        "context"
        "fmt"
        "io"
        "os"
-       "os/exec"
        "strings"
        "time"
 
+       "github.com/pkg/errors"
        "gopkg.in/yaml.v2"
 
        "github.com/lxc/lxd/lxd/project"
@@ -38,7 +37,6 @@ type Info struct {
 
 // GetInfo extracts backup information from a given ReadSeeker.
 func GetInfo(r io.ReadSeeker) (*Info, error) {
-       var tr *tar.Reader
        result := Info{}
        hasIndexFile := false
 
@@ -52,35 +50,16 @@ func GetInfo(r io.ReadSeeker) (*Info, error) {
        if err != nil {
                return nil, err
        }
-       r.Seek(0, 0)
 
        if unpacker == nil {
                return nil, fmt.Errorf("Unsupported backup compression")
        }
 
-       ctx, cancelFunc := context.WithCancel(context.Background())
-       defer cancelFunc()
-
-       if len(unpacker) > 0 {
-               cmd := exec.CommandContext(ctx, unpacker[0], unpacker[1:]...)
-               cmd.Stdin = r
-
-               stdout, err := cmd.StdoutPipe()
-               if err != nil {
-                       return nil, err
-               }
-               defer stdout.Close()
-
-               err = cmd.Start()
-               if err != nil {
-                       return nil, err
-               }
-               defer cmd.Wait()
-
-               tr = tar.NewReader(stdout)
-       } else {
-               tr = tar.NewReader(r)
+       tr, cancelFunc, err := shared.CompressedTarReader(context.Background(), 
r, unpacker)
+       if err != nil {
+               return nil, err
        }
+       defer cancelFunc()
 
        for {
                hdr, err := tr.Next()
@@ -88,7 +67,7 @@ func GetInfo(r io.ReadSeeker) (*Info, error) {
                        break // End of archive
                }
                if err != nil {
-                       return nil, err
+                       return nil, errors.Wrapf(err, "Error reading backup 
file info")
                }
 
                if hdr.Name == "backup/index.yaml" {

From 8bded34d8488c6d2a81b8a3c547e94bd64bd9952 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 24 Mar 2020 17:53:02 +0000
Subject: [PATCH 04/16] test/suites/static/analysis: Reinstates checks for
 shared/instancewriter

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 test/suites/static_analysis.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/test/suites/static_analysis.sh b/test/suites/static_analysis.sh
index ff95fb4057..8d5814d9f2 100644
--- a/test/suites/static_analysis.sh
+++ b/test/suites/static_analysis.sh
@@ -109,7 +109,7 @@ test_static_analysis() {
       golint -set_exit_status shared/api/...
       golint -set_exit_status shared/cancel/...
       golint -set_exit_status shared/cmd/...
-      golint -set_exit_status shared/containerwriter/...
+      golint -set_exit_status shared/instancewriter/...
       golint -set_exit_status shared/dnsutil/...
       golint -set_exit_status shared/eagain/...
       golint -set_exit_status shared/generate/...

From 1826b5ff51ea133e7ec3f9c907b27e7a13b1b220 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 24 Mar 2020 11:43:50 +0000
Subject: [PATCH 05/16] lxd/instance/post: InstanceID usage

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/instance_post.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lxd/instance_post.go b/lxd/instance_post.go
index b48974c624..791d43d8cd 100644
--- a/lxd/instance_post.go
+++ b/lxd/instance_post.go
@@ -244,7 +244,7 @@ func containerPost(d *Daemon, r *http.Request) 
response.Response {
        }
 
        // Check that the name isn't already in use.
-       id, _ := d.cluster.ContainerID(project, req.Name)
+       id, _ := d.cluster.InstanceID(project, req.Name)
        if id > 0 {
                return response.Conflict(fmt.Errorf("Name '%s' already in use", 
req.Name))
        }
@@ -373,7 +373,7 @@ func containerPostClusteringMigrate(d *Daemon, c 
instance.Instance, oldName, new
                }
 
                // Restore the original value of "volatile.apply_template"
-               id, err := d.cluster.ContainerID(c.Project(), destName)
+               id, err := d.cluster.InstanceID(c.Project(), destName)
                if err != nil {
                        return errors.Wrap(err, "Failed to get ID of moved 
instance")
                }

From 9cd93a2e1851caa19466aeb2ad10064231d6a069 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 24 Mar 2020 11:43:28 +0000
Subject: [PATCH 06/16] lxd/db/containers: Renames ContainerID to InstanceID

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/db/containers.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 47476e0046..dd4fed1e15 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -632,8 +632,8 @@ WHERE instances.id=?
        return project, name, err
 }
 
-// ContainerID returns the ID of the container with the given name.
-func (c *Cluster) ContainerID(project, name string) (int, error) {
+// InstanceID returns the ID of the instance with the given name.
+func (c *Cluster) InstanceID(project, name string) (int, error) {
        var id int64
        err := c.Transaction(func(tx *ClusterTx) error {
                var err error

From 5090fa9666165b80a053f1e5e923041363754258 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 24 Mar 2020 16:27:01 +0000
Subject: [PATCH 07/16] lxd/instances/post: Logging in createFromBackup

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/instances_post.go | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/lxd/instances_post.go b/lxd/instances_post.go
index 87e6be7235..dc79878a26 100644
--- a/lxd/instances_post.go
+++ b/lxd/instances_post.go
@@ -586,6 +586,7 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
 
        // Parse the backup information.
        backupFile.Seek(0, 0)
+       logger.Debug("Reading backup file info")
        bInfo, err := backup.GetInfo(backupFile)
        if err != nil {
                backupFile.Close()
@@ -598,6 +599,16 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
                bInfo.Pool = pool
        }
 
+       logger.Debug("Backup file info loaded", log.Ctx{
+               "type":      bInfo.Type,
+               "name":      bInfo.Name,
+               "project":   bInfo.Project,
+               "backend":   bInfo.Backend,
+               "pool":      bInfo.Pool,
+               "optimized": *bInfo.OptimizedStorage,
+               "snapshots": bInfo.Snapshots,
+       })
+
        // Check storage pool exists.
        _, _, err = d.State().Cluster.StoragePoolGet(bInfo.Pool)
        if errors.Cause(err) == db.ErrNoSuchObject {

From a5188f301507e7091d14b07e499177a520b91f44 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Mon, 23 Mar 2020 17:48:37 +0000
Subject: [PATCH 08/16] lxd/instances/post: Logging message change from
 container to instance

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/instances_post.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/instances_post.go b/lxd/instances_post.go
index dc79878a26..91302d98e3 100644
--- a/lxd/instances_post.go
+++ b/lxd/instances_post.go
@@ -710,7 +710,7 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
 
 func containersPost(d *Daemon, r *http.Request) response.Response {
        project := projectParam(r)
-       logger.Debugf("Responding to container create")
+       logger.Debugf("Responding to instance create")
 
        // If we're getting binary content, process separately
        if r.Header.Get("Content-Type") == "application/octet-stream" {

From efb0d45e894e91c6224fe6986fd8f95f26dd8439 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 20 Mar 2020 15:30:33 +0000
Subject: [PATCH 09/16] lxd/instances/post: Switches to revert package in
 createFromBackup

Other minor formatting changes.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/instances_post.go | 36 ++++++++++++++++--------------------
 1 file changed, 16 insertions(+), 20 deletions(-)

diff --git a/lxd/instances_post.go b/lxd/instances_post.go
index 91302d98e3..fb832fcdf1 100644
--- a/lxd/instances_post.go
+++ b/lxd/instances_post.go
@@ -26,6 +26,7 @@ import (
        "github.com/lxc/lxd/lxd/operations"
        projecthelpers "github.com/lxc/lxd/lxd/project"
        "github.com/lxc/lxd/lxd/response"
+       "github.com/lxc/lxd/lxd/revert"
        storagePools "github.com/lxc/lxd/lxd/storage"
        "github.com/lxc/lxd/shared"
        "github.com/lxc/lxd/shared/api"
@@ -536,17 +537,20 @@ func createFromCopy(d *Daemon, project string, req 
*api.InstancesPost) response.
 }
 
 func createFromBackup(d *Daemon, project string, data io.Reader, pool string) 
response.Response {
+       revert := revert.New()
+       defer revert.Fail()
+
        // Create temporary file to store uploaded backup data.
        backupFile, err := ioutil.TempFile("", "lxd_backup_")
        if err != nil {
                return response.InternalError(err)
        }
        defer os.Remove(backupFile.Name())
+       revert.Add(func() { backupFile.Close() })
 
        // Stream uploaded backup data into temporary file.
        _, err = io.Copy(backupFile, data)
        if err != nil {
-               backupFile.Close()
                return response.InternalError(err)
        }
 
@@ -554,7 +558,6 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
        backupFile.Seek(0, 0)
        _, algo, decomArgs, err := shared.DetectCompressionFile(backupFile)
        if err != nil {
-               backupFile.Close()
                return response.InternalError(err)
        }
 
@@ -565,7 +568,6 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
                // Create temporary file to store the decompressed tarball in.
                tarFile, err := ioutil.TempFile("", "lxd_decompress_")
                if err != nil {
-                       backupFile.Close()
                        return response.InternalError(err)
                }
                defer os.Remove(tarFile.Name())
@@ -589,7 +591,6 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
        logger.Debug("Reading backup file info")
        bInfo, err := backup.GetInfo(backupFile)
        if err != nil {
-               backupFile.Close()
                return response.BadRequest(err)
        }
        bInfo.Project = project
@@ -636,25 +637,19 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
                return response.InternalError(err)
        }
 
+       // Copy reverter so far so we can use it inside run after this function 
has finished.
+       runRevert := revert.Clone()
+
        run := func(op *operations.Operation) error {
                defer backupFile.Close()
+               defer runRevert.Fail()
 
                // Dump tarball to storage.
                postHook, revertHook, err := 
instanceCreateFromBackup(d.State(), *bInfo, backupFile)
                if err != nil {
                        return errors.Wrap(err, "Create instance from backup")
                }
-
-               revert := true
-               defer func() {
-                       if !revert {
-                               return
-                       }
-
-                       if revertHook != nil {
-                               revertHook()
-                       }
-               }()
+               revert.Add(revertHook)
 
                body, err := json.Marshal(&internalImportPost{
                        Name:  bInfo.Name,
@@ -664,14 +659,16 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
                        return errors.Wrap(err, "Marshal internal import 
request")
                }
 
+               // Generate internal request to import instance from storage.
                req := &http.Request{
                        Body: ioutil.NopCloser(bytes.NewReader(body)),
                }
+
                req.URL = &url.URL{
                        RawQuery: fmt.Sprintf("project=%s", project),
                }
-               resp := internalImport(d, req)
 
+               resp := internalImport(d, req)
                if resp.String() != "success" {
                        return fmt.Errorf("Internal import request: %v", 
resp.String())
                }
@@ -690,7 +687,7 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
                        }
                }
 
-               revert = false
+               runRevert.Success()
                return nil
        }
 
@@ -698,13 +695,12 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
        resources["instances"] = []string{bInfo.Name}
        resources["containers"] = resources["instances"]
 
-       op, err := operations.OperationCreate(d.State(), project, 
operations.OperationClassTask, db.OperationBackupRestore,
-               resources, nil, run, nil, nil)
+       op, err := operations.OperationCreate(d.State(), project, 
operations.OperationClassTask, db.OperationBackupRestore, resources, nil, run, 
nil, nil)
        if err != nil {
-               backupFile.Close()
                return response.InternalError(err)
        }
 
+       revert.Success()
        return operations.OperationResponse(op)
 }
 

From 824186f4b5f2a393329037bc5fb3b33907cd11a5 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Mon, 23 Mar 2020 15:24:05 +0000
Subject: [PATCH 10/16] lxd: Merges instanceCreateFromBackup into
 createFromBackup

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/instance.go       | 21 ---------------------
 lxd/instances_post.go | 13 +++++++++++--
 2 files changed, 11 insertions(+), 23 deletions(-)

diff --git a/lxd/instance.go b/lxd/instance.go
index eb306f4d62..e66561789c 100644
--- a/lxd/instance.go
+++ b/lxd/instance.go
@@ -3,7 +3,6 @@ package main
 import (
        "context"
        "fmt"
-       "io"
        "os"
        "os/exec"
        "path/filepath"
@@ -16,7 +15,6 @@ import (
        cron "gopkg.in/robfig/cron.v2"
 
        "github.com/flosch/pongo2"
-       "github.com/lxc/lxd/lxd/backup"
        "github.com/lxc/lxd/lxd/cluster"
        "github.com/lxc/lxd/lxd/db"
        deviceConfig "github.com/lxc/lxd/lxd/device/config"
@@ -72,25 +70,6 @@ func instanceCreateAsEmpty(d *Daemon, args db.InstanceArgs) 
(instance.Instance,
        return inst, nil
 }
 
-// instanceCreateFromBackup imports a backup file to restore an instance. 
Because the backup file
-// is unpacked and restored onto the storage device before the instance is 
created in the database
-// it is necessary to return two functions; a post hook that can be run once 
the instance has been
-// created in the database to run any storage layer finalisations, and a 
revert hook that can be
-// run if the instance database load process fails that will remove anything 
created thus far.
-func instanceCreateFromBackup(s *state.State, info backup.Info, srcData 
io.ReadSeeker) (func(instance.Instance) error, func(), error) {
-       pool, err := storagePools.GetPoolByName(s, info.Pool)
-       if err != nil {
-               return nil, nil, err
-       }
-
-       postHook, revertHook, err := pool.CreateInstanceFromBackup(info, 
srcData, nil)
-       if err != nil {
-               return nil, nil, err
-       }
-
-       return postHook, revertHook, nil
-}
-
 // instanceCreateFromImage creates an instance from a rootfs image.
 func instanceCreateFromImage(d *Daemon, args db.InstanceArgs, hash string, op 
*operations.Operation) (instance.Instance, error) {
        s := d.State()
diff --git a/lxd/instances_post.go b/lxd/instances_post.go
index fb832fcdf1..768a82d4a0 100644
--- a/lxd/instances_post.go
+++ b/lxd/instances_post.go
@@ -644,8 +644,17 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
                defer backupFile.Close()
                defer runRevert.Fail()
 
-               // Dump tarball to storage.
-               postHook, revertHook, err := 
instanceCreateFromBackup(d.State(), *bInfo, backupFile)
+               pool, err := storagePools.GetPoolByName(d.State(), bInfo.Pool)
+               if err != nil {
+                       return err
+               }
+
+               // Dump tarball to storage. Because the backup file is unpacked 
and restored onto the storage
+               // device before the instance is created in the database it is 
necessary to return two functions;
+               // a post hook that can be run once the instance has been 
created in the database to run any
+               // storage layer finalisations, and a revert hook that can be 
run if the instance database load
+               // process fails that will remove anything created thus far.
+               postHook, revertHook, err := 
pool.CreateInstanceFromBackup(*bInfo, backupFile, nil)
                if err != nil {
                        return errors.Wrap(err, "Create instance from backup")
                }

From 5682095f3ccf80097a08edde7440382f2f3bf4d9 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 20 Mar 2020 12:10:10 +0000
Subject: [PATCH 11/16] lxd/storage/drivers/utils: Adds blockDevSizeBytes
 function

For retrieving size of block devices in bytes.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/storage/drivers/utils.go | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/lxd/storage/drivers/utils.go b/lxd/storage/drivers/utils.go
index fdd1bfb29b..b68e2af454 100644
--- a/lxd/storage/drivers/utils.go
+++ b/lxd/storage/drivers/utils.go
@@ -7,6 +7,7 @@ import (
        "os"
        "path/filepath"
        "sort"
+       "strconv"
        "strings"
        "time"
 
@@ -732,3 +733,18 @@ func ShiftZFSSkipper(dir string, absPath string, fi 
os.FileInfo) bool {
 
        return false
 }
+
+// blockDevSizeBytes returns the size of a block device.
+func blockDevSizeBytes(blockDevPath string) (int64, error) {
+       output, err := shared.RunCommand("blockdev", "--getsize64", 
blockDevPath)
+       if err != nil {
+               return -1, err
+       }
+
+       sizeBytes, err := strconv.ParseInt(strings.TrimSpace(output), 10, 64)
+       if err != nil {
+               return -1, err
+       }
+
+       return sizeBytes, nil
+}

From 4a604e61a2b6fcef4f20eaa6c636dfebd4cdce3e Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 20 Mar 2020 12:10:42 +0000
Subject: [PATCH 12/16] lxd/storage/drivers/driver/ceph/volumes: Updates
 SetVolumeQuota to use blockDevSizeBytes

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/storage/drivers/driver_ceph_volumes.go | 15 ++-------------
 1 file changed, 2 insertions(+), 13 deletions(-)

diff --git a/lxd/storage/drivers/driver_ceph_volumes.go 
b/lxd/storage/drivers/driver_ceph_volumes.go
index 9cb8cd49a8..fa967dbcdb 100644
--- a/lxd/storage/drivers/driver_ceph_volumes.go
+++ b/lxd/storage/drivers/driver_ceph_volumes.go
@@ -4,10 +4,7 @@ import (
        "encoding/json"
        "fmt"
        "io"
-       "io/ioutil"
        "os"
-       "path/filepath"
-       "strconv"
        "strings"
 
        "github.com/pborman/uuid"
@@ -676,26 +673,18 @@ func (d *ceph) SetVolumeQuota(vol Volume, size string, op 
*operations.Operation)
                return err
        }
 
-       // The grow/shrink functions use Mount/Unmount which may cause an
-       // unmap, so make sure to keep a reference.
+       // The grow/shrink functions use Mount/Unmount which may cause an 
unmap, so make sure to keep a reference.
        oldKeepDevice := vol.keepDevice
        vol.keepDevice = true
        defer func() {
                vol.keepDevice = oldKeepDevice
        }()
 
-       RBDSize, err := ioutil.ReadFile(fmt.Sprintf("/sys/class/block/%s/size", 
filepath.Base(RBDDevPath)))
+       oldSizeBytes, err := blockDevSizeBytes(RBDDevPath)
        if err != nil {
                return errors.Wrapf(err, "Error getting current size")
        }
 
-       RBDSizeBlocks, err := strconv.Atoi(strings.TrimSpace(string(RBDSize)))
-       if err != nil {
-               return errors.Wrapf(err, "Error getting converting current size 
to integer")
-       }
-
-       oldSizeBytes := int64(RBDSizeBlocks * 512)
-
        newSizeBytes, err := units.ParseByteSizeString(size)
        if err != nil {
                return err

From a232f3d2caf3f2a32dd76d8d98f4540901e04054 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 20 Mar 2020 12:09:04 +0000
Subject: [PATCH 13/16] shared/instancewriter/instance/file/info: Adds FileInfo
 for os.FileInfo implementation

Used to generate 'fake' file info when streaming block devices into a tarball.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 shared/instancewriter/instance_file_info.go | 52 +++++++++++++++++++++
 1 file changed, 52 insertions(+)
 create mode 100644 shared/instancewriter/instance_file_info.go

diff --git a/shared/instancewriter/instance_file_info.go 
b/shared/instancewriter/instance_file_info.go
new file mode 100644
index 0000000000..0d0b288a4a
--- /dev/null
+++ b/shared/instancewriter/instance_file_info.go
@@ -0,0 +1,52 @@
+package instancewriter
+
+import (
+       "archive/tar"
+       "os"
+       "time"
+)
+
+// FileInfo static file implementation of os.FileInfo.
+type FileInfo struct {
+       FileName    string
+       FileSize    int64
+       FileMode    os.FileMode
+       FileModTime time.Time
+}
+
+// Name of file.
+func (f *FileInfo) Name() string {
+       return f.FileName
+}
+
+// Size of file.
+func (f *FileInfo) Size() int64 {
+       return f.FileSize
+}
+
+// Mode of file.
+func (f *FileInfo) Mode() os.FileMode {
+       return f.FileMode
+}
+
+// ModTime of file.
+func (f *FileInfo) ModTime() time.Time {
+       return f.FileModTime
+}
+
+// IsDir is file a directory.
+func (f *FileInfo) IsDir() bool {
+       return false
+}
+
+// Sys returns further unix attributes for a file owned by root.
+func (f *FileInfo) Sys() interface{} {
+       return &tar.Header{
+               Uid:        0,
+               Gid:        0,
+               Uname:      "root",
+               Gname:      "root",
+               AccessTime: time.Now(),
+               ChangeTime: time.Now(),
+       }
+}

From 8d12f9e5fb73c33d05c77fae7ea5bee0f0f8200e Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 20 Mar 2020 12:08:32 +0000
Subject: [PATCH 14/16] shared/instancewriter/instance/tar/writer: Adds
 WriteFileFromReader function

For streaming files into a tarball from an io.Reader.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 shared/instancewriter/instance_tar_writer.go | 23 ++++++++++++++++++--
 1 file changed, 21 insertions(+), 2 deletions(-)

diff --git a/shared/instancewriter/instance_tar_writer.go 
b/shared/instancewriter/instance_tar_writer.go
index 52a422f9ab..021858fa64 100644
--- a/shared/instancewriter/instance_tar_writer.go
+++ b/shared/instancewriter/instance_tar_writer.go
@@ -103,7 +103,8 @@ func (ctw *InstanceTarWriter) WriteFile(name string, 
srcPath string, fi os.FileI
                }
        }
 
-       if err := ctw.tarWriter.WriteHeader(hdr); err != nil {
+       err = ctw.tarWriter.WriteHeader(hdr)
+       if err != nil {
                return errors.Wrap(err, "Failed to write tar header")
        }
 
@@ -114,7 +115,8 @@ func (ctw *InstanceTarWriter) WriteFile(name string, 
srcPath string, fi os.FileI
                }
                defer f.Close()
 
-               if _, err := io.Copy(ctw.tarWriter, f); err != nil {
+               _, err = io.Copy(ctw.tarWriter, f)
+               if err != nil {
                        return errors.Wrapf(err, "Failed to copy file content 
%q", srcPath)
                }
        }
@@ -122,6 +124,23 @@ func (ctw *InstanceTarWriter) WriteFile(name string, 
srcPath string, fi os.FileI
        return nil
 }
 
+// WriteFileFromReader streams a file into the tarball using the src reader.
+// A manually generated os.FileInfo should be supplied so that the tar header 
can be added before streaming starts.
+func (ctw *InstanceTarWriter) WriteFileFromReader(src io.Reader, fi 
os.FileInfo) error {
+       hdr, err := tar.FileInfoHeader(fi, "")
+       if err != nil {
+               return errors.Wrap(err, "Failed to create tar info header")
+       }
+
+       err = ctw.tarWriter.WriteHeader(hdr)
+       if err != nil {
+               return errors.Wrap(err, "Failed to write tar header")
+       }
+
+       _, err = io.Copy(ctw.tarWriter, src)
+       return err
+}
+
 // Close finishes writing the tarball.
 func (ctw *InstanceTarWriter) Close() error {
        err := ctw.tarWriter.Close()

From ab2201a7c0c0f977cc50b8b0e68c59169c55d690 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 20 Mar 2020 12:12:28 +0000
Subject: [PATCH 15/16] lxd/backup: Switches index.yaml file generation to use
 WriteFileFromReader in backupCreate

This is to avoid the need for a temporary dir and file.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/backup.go | 37 +++++++++++--------------------------
 1 file changed, 11 insertions(+), 26 deletions(-)

diff --git a/lxd/backup.go b/lxd/backup.go
index b96fb44060..8385bb75d0 100644
--- a/lxd/backup.go
+++ b/lxd/backup.go
@@ -1,11 +1,10 @@
 package main
 
 import (
+       "bytes"
        "fmt"
        "io"
-       "io/ioutil"
        "os"
-       "path/filepath"
        "time"
 
        "context"
@@ -95,15 +94,6 @@ func backupCreate(s *state.State, args 
db.InstanceBackupArgs, sourceInst instanc
 
        target := shared.VarPath("backups", 
project.Instance(sourceInst.Project(), b.Name()))
 
-       // Create temp dir for storing transient files that will be removed at 
end.
-       tmpDirPath := fmt.Sprintf("%s_tmp", target)
-       logger.Debug("Creating temporary backup directory", log.Ctx{"path": 
tmpDirPath})
-       err = os.Mkdir(tmpDirPath, 0700)
-       if err != nil {
-               return err
-       }
-       defer os.RemoveAll(tmpDirPath)
-
        // Setup the tarball writer.
        logger.Debug("Opening backup tarball for writing", log.Ctx{"path": 
target})
        tarFileWriter, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY, 0600)
@@ -142,9 +132,8 @@ func backupCreate(s *state.State, args 
db.InstanceBackupArgs, sourceInst instanc
        }(tarWriterRes)
 
        // Write index file.
-       indexFile := filepath.Join(tmpDirPath, "index.yaml")
-       logger.Debug("Adding backup index file", log.Ctx{"path": indexFile})
-       err = backupWriteIndex(sourceInst, pool, b.OptimizedStorage(), 
!b.InstanceOnly(), indexFile, tarWriter)
+       logger.Debug("Adding backup index file")
+       err = backupWriteIndex(sourceInst, pool, b.OptimizedStorage(), 
!b.InstanceOnly(), tarWriter)
        if err != nil {
                return errors.Wrapf(err, "Error writing backup index file")
        }
@@ -176,7 +165,7 @@ func backupCreate(s *state.State, args 
db.InstanceBackupArgs, sourceInst instanc
 }
 
 // backupWriteIndex generates an index.yaml file and then writes it to the 
root of the backup tarball.
-func backupWriteIndex(sourceInst instance.Instance, pool storagePools.Pool, 
optimized bool, snapshots bool, indexFile string, tarWriter 
*instancewriter.InstanceTarWriter) error {
+func backupWriteIndex(sourceInst instance.Instance, pool storagePools.Pool, 
optimized bool, snapshots bool, tarWriter *instancewriter.InstanceTarWriter) 
error {
        indexInfo := backup.Info{
                Name:             sourceInst.Name(),
                Pool:             pool.Name(),
@@ -203,21 +192,17 @@ func backupWriteIndex(sourceInst instance.Instance, pool 
storagePools.Pool, opti
        if err != nil {
                return err
        }
+       r := bytes.NewReader(indexData)
 
-       // Write index JSON to file.
-       err = ioutil.WriteFile(indexFile, indexData, 0644)
-       if err != nil {
-               return err
-       }
-       defer os.Remove(indexFile)
-
-       indexFileInfo, err := os.Lstat(indexFile)
-       if err != nil {
-               return err
+       indexFileInfo := instancewriter.FileInfo{
+               FileName:    "backup/index.yaml",
+               FileSize:    int64(len(indexData)),
+               FileMode:    0644,
+               FileModTime: time.Now(),
        }
 
        // Write to tarball.
-       err = tarWriter.WriteFile("backup/index.yaml", indexFile, indexFileInfo)
+       err = tarWriter.WriteFileFromReader(r, &indexFileInfo)
        if err != nil {
                return err
        }

From a5993df492627c09ed3bd10aa9381af6423f7843 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Wed, 25 Mar 2020 11:59:34 +0000
Subject: [PATCH 16/16] lxd/api/internal: d.cluster.InstanceID usage

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/api_internal.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 9ca6c1aed4..88e46b1432 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -531,7 +531,7 @@ func internalImport(d *Daemon, r *http.Request) 
response.Response {
        }
 
        // Check if an entry for the container already exists in the db.
-       _, containerErr := d.cluster.ContainerID(projectName, req.Name)
+       _, containerErr := d.cluster.InstanceID(projectName, req.Name)
        if containerErr != nil {
                if containerErr != db.ErrNoSuchObject {
                        return response.SmartError(containerErr)
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to