The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/4972

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
Closes #4966

Signed-off-by: Stéphane Graber <[email protected]>
From da3d4565fd1d38827fb3508ffff3d081459fab36 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <[email protected]>
Date: Fri, 24 Aug 2018 00:07:01 -0400
Subject: [PATCH] lxd/backups: Make compression configurable
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Closes #4966

Signed-off-by: Stéphane Graber <[email protected]>
---
 doc/api-extensions.md   |  4 ++++
 doc/server.md           |  1 +
 lxd/backup.go           | 49 ++++++++++++++++++++++++++++++-----------
 lxd/cluster/config.go   |  1 +
 lxd/container.go        |  9 +++++++-
 lxd/storage.go          |  2 +-
 lxd/storage_btrfs.go    | 48 +++++++++++++++++++++++++++++-----------
 lxd/storage_ceph.go     | 29 ++++++++++++++++++------
 lxd/storage_dir.go      | 26 +++++++++++++++++-----
 lxd/storage_lvm.go      | 26 +++++++++++++++++-----
 lxd/storage_mock.go     |  2 +-
 lxd/storage_zfs.go      | 45 ++++++++++++++++++++++++++++---------
 scripts/bash/lxd-client |  3 ++-
 shared/version/api.go   |  1 +
 test/suites/backup.sh   | 32 +++++++++++++--------------
 15 files changed, 202 insertions(+), 76 deletions(-)

diff --git a/doc/api-extensions.md b/doc/api-extensions.md
index 1a867b13fe..426b388407 100644
--- a/doc/api-extensions.md
+++ b/doc/api-extensions.md
@@ -575,3 +575,7 @@ This effectively allows for "lxc list" to get all it needs 
in one query.
 ## candid\_authentication
 This introduces the new candid.api.url config option and removes
 core.macaroon.endpoint.
+
+## backup\_compression
+This introduces a new backups.compression\_algorithm config key which
+allows configuration of backup compression.
diff --git a/doc/server.md b/doc/server.md
index 7216031d8e..afda09b248 100644
--- a/doc/server.md
+++ b/doc/server.md
@@ -10,6 +10,7 @@ currently supported:
 
 Key                             | Type      | Default   | API extension        
    | Description
 :--                             | :---      | :------   | :------------        
    | :----------
+backups.compression\_algorithm  | string    | gzip      | backup\_compression  
    | Compression algorithm to use for new images (bzip2, gzip, lzma, xz or 
none)
 candid.api.url                  | string    | -         | 
candid\_authentication   | URL of the the external authentication endpoint 
using Candid
 cluster.offline\_threshold      | integer   | 20        | clustering           
    | Number of seconds after which an unresponsive node is considered offline
 core.debug\_address             | string    | -         | pprof\_http          
    | Address to bind the pprof debug server to (HTTP)
diff --git a/lxd/backup.go b/lxd/backup.go
index d17b256a64..f313b7cd7e 100644
--- a/lxd/backup.go
+++ b/lxd/backup.go
@@ -12,6 +12,7 @@ import (
 
        "gopkg.in/yaml.v2"
 
+       "github.com/lxc/lxd/lxd/cluster"
        "github.com/lxc/lxd/lxd/db"
        "github.com/lxc/lxd/lxd/state"
        "github.com/lxc/lxd/shared"
@@ -176,7 +177,6 @@ func (b *backup) Render() *api.ContainerBackup {
 }
 
 func backupGetInfo(r io.ReadSeeker) (*backupInfo, error) {
-       var buf bytes.Buffer
        var tr *tar.Reader
        result := backupInfo{}
        hasBinaryFormat := false
@@ -184,13 +184,29 @@ func backupGetInfo(r io.ReadSeeker) (*backupInfo, error) {
 
        // Extract
        r.Seek(0, 0)
-
-       err := shared.RunCommandWithFds(r, &buf, "xz", "-d")
+       _, _, unpacker, err := shared.DetectCompressionFile(r)
        if err != nil {
                return nil, err
        }
+       r.Seek(0, 0)
+
+       if unpacker == nil {
+               return nil, fmt.Errorf("Unsupported backup compression")
+       }
+
+       if len(unpacker) > 0 {
+               var buf bytes.Buffer
+
+               err := shared.RunCommandWithFds(r, &buf, unpacker[0], 
unpacker[1:]...)
+               if err != nil {
+                       return nil, err
+               }
+
+               tr = tar.NewReader(&buf)
+       } else {
+               tr = tar.NewReader(r)
+       }
 
-       tr = tar.NewReader(&buf)
        for {
                hdr, err := tr.Next()
                if err == io.EOF {
@@ -288,7 +304,7 @@ func backupFixStoragePool(c *db.Cluster, b backupInfo) 
error {
        return nil
 }
 
-func backupCreateTarball(path string, backup backup) error {
+func backupCreateTarball(s *state.State, path string, backup backup) error {
        container := backup.container
 
        // Create the index
@@ -351,19 +367,26 @@ func backupCreateTarball(path string, backup backup) 
error {
        }
 
        // Compress it
-       compressedPath, err := compressFile(backupPath, "xz")
+       compress, err := cluster.ConfigGetString(s.Cluster, 
"backups.compression_algorithm")
        if err != nil {
                return err
        }
 
-       err = os.Remove(backupPath)
-       if err != nil {
-               return err
-       }
+       if compress != "none" {
+               compressedPath, err := compressFile(backupPath, compress)
+               if err != nil {
+                       return err
+               }
 
-       err = os.Rename(compressedPath, backupPath)
-       if err != nil {
-               return err
+               err = os.Remove(backupPath)
+               if err != nil {
+                       return err
+               }
+
+               err = os.Rename(compressedPath, backupPath)
+               if err != nil {
+                       return err
+               }
        }
 
        // Set permissions
diff --git a/lxd/cluster/config.go b/lxd/cluster/config.go
index f41dceb3d3..0b12b46ac4 100644
--- a/lxd/cluster/config.go
+++ b/lxd/cluster/config.go
@@ -201,6 +201,7 @@ func configGet(cluster *db.Cluster) (*Config, error) {
 
 // ConfigSchema defines available server configuration keys.
 var ConfigSchema = config.Schema{
+       "backups.compression_algorithm":  {Default: "gzip", Validator: 
validateCompression},
        "cluster.offline_threshold":      {Type: config.Int64, Default: 
offlineThresholdDefault(), Validator: offlineThresholdValidator},
        "core.https_allowed_headers":     {},
        "core.https_allowed_methods":     {},
diff --git a/lxd/container.go b/lxd/container.go
index 40956b360c..a6136eb16b 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -707,8 +707,15 @@ func containerCreateFromBackup(s *state.State, info 
backupInfo, data io.ReadSeek
                fixBackupFile = true
        }
 
+       // Find the compression algorithm
+       tarArgs, _, _, err := shared.DetectCompressionFile(data)
+       if err != nil {
+               return err
+       }
+       data.Seek(0, 0)
+
        // Unpack tarball
-       err := pool.ContainerBackupLoad(info, data)
+       err = pool.ContainerBackupLoad(info, data, tarArgs)
        if err != nil {
                return err
        }
diff --git a/lxd/storage.go b/lxd/storage.go
index 6d6b98baac..1785ce5a3a 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -188,7 +188,7 @@ type storage interface {
        ContainerSnapshotStop(c container) (bool, error)
 
        ContainerBackupCreate(backup backup, sourceContainer container) error
-       ContainerBackupLoad(info backupInfo, data io.ReadSeeker) error
+       ContainerBackupLoad(info backupInfo, data io.ReadSeeker, tarArgs 
[]string) error
 
        // For use in migrating snapshots.
        ContainerSnapshotCreateEmpty(c container) error
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index a39b4cb21f..1559321483 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -1692,7 +1692,7 @@ func (s *storageBtrfs) ContainerBackupCreate(backup 
backup, source container) er
        }
 
        // Pack the backup
-       err = backupCreateTarball(tmpPath, backup)
+       err = backupCreateTarball(s.s, tmpPath, backup)
        if err != nil {
                return err
        }
@@ -1700,7 +1700,7 @@ func (s *storageBtrfs) ContainerBackupCreate(backup 
backup, source container) er
        return nil
 }
 
-func (s *storageBtrfs) doContainerBackupLoadOptimized(info backupInfo, data 
io.ReadSeeker) error {
+func (s *storageBtrfs) doContainerBackupLoadOptimized(info backupInfo, data 
io.ReadSeeker, tarArgs []string) error {
        containerName, _, _ := containerGetParentAndSnapshotName(info.Name)
 
        containerMntPoint := getContainerMountPoint(s.pool.Name, "")
@@ -1721,10 +1721,16 @@ func (s *storageBtrfs) 
doContainerBackupLoadOptimized(info backupInfo, data io.R
                return err
        }
 
+       // Prepare tar arguments
+       args := append(tarArgs, []string{
+               "-",
+               "--strip-components=1",
+               "-C", unpackPath, "backup",
+       }...)
+
        // Extract container
        data.Seek(0, 0)
-       err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-",
-               "--strip-components=1", "-C", unpackPath, "backup")
+       err = shared.RunCommandWithFds(data, nil, "tar", args...)
        if err != nil {
                logger.Errorf("Failed to untar \"%s\" into \"%s\": %s", 
"backup", unpackPath, err)
                return err
@@ -1792,7 +1798,7 @@ func (s *storageBtrfs) 
doContainerBackupLoadOptimized(info backupInfo, data io.R
        return nil
 }
 
-func (s *storageBtrfs) doContainerBackupLoadVanilla(info backupInfo, data 
io.ReadSeeker) error {
+func (s *storageBtrfs) doContainerBackupLoadVanilla(info backupInfo, data 
io.ReadSeeker, tarArgs []string) error {
        // create the main container
        err := s.doContainerCreate(info.Name, info.Privileged)
        if err != nil {
@@ -1802,11 +1808,20 @@ func (s *storageBtrfs) 
doContainerBackupLoadVanilla(info backupInfo, data io.Rea
        containerMntPoint := getContainerMountPoint(s.pool.Name, info.Name)
        // Extract container
        for _, snap := range info.Snapshots {
-               // Extract snapshots
                cur := fmt.Sprintf("backup/snapshots/%s", snap)
+
+               // Prepare tar arguments
+               args := append(tarArgs, []string{
+                       "-",
+                       "--recursive-unlink",
+                       "--xattrs-include=*",
+                       "--strip-components=3",
+                       "-C", containerMntPoint, cur,
+               }...)
+
+               // Extract snapshots
                data.Seek(0, 0)
-               err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-",
-                       "--recursive-unlink", "--xattrs-include=*", 
"--strip-components=3", "-C", containerMntPoint, cur)
+               err = shared.RunCommandWithFds(data, nil, "tar", args...)
                if err != nil {
                        logger.Errorf("Failed to untar \"%s\" into \"%s\": %s", 
cur, containerMntPoint, err)
                        return err
@@ -1819,10 +1834,17 @@ func (s *storageBtrfs) 
doContainerBackupLoadVanilla(info backupInfo, data io.Rea
                }
        }
 
+       // Prepare tar arguments
+       args := append(tarArgs, []string{
+               "-",
+               "--strip-components=2",
+               "--xattrs-include=*",
+               "-C", containerMntPoint, "backup/container",
+       }...)
+
        // Extract container
        data.Seek(0, 0)
-       err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-",
-               "--strip-components=2", "--xattrs-include=*", "-C", 
containerMntPoint, "backup/container")
+       err = shared.RunCommandWithFds(data, nil, "tar", args...)
        if err != nil {
                logger.Errorf("Failed to untar \"backup/container\" into 
\"%s\": %s", containerMntPoint, err)
                return err
@@ -1831,14 +1853,14 @@ func (s *storageBtrfs) 
doContainerBackupLoadVanilla(info backupInfo, data io.Rea
        return nil
 }
 
-func (s *storageBtrfs) ContainerBackupLoad(info backupInfo, data 
io.ReadSeeker) error {
+func (s *storageBtrfs) ContainerBackupLoad(info backupInfo, data 
io.ReadSeeker, tarArgs []string) error {
        logger.Debugf("Loading BTRFS storage volume for backup \"%s\" on 
storage pool \"%s\"", info.Name, s.pool.Name)
 
        if info.HasBinaryFormat {
-               return s.doContainerBackupLoadOptimized(info, data)
+               return s.doContainerBackupLoadOptimized(info, data, tarArgs)
        }
 
-       return s.doContainerBackupLoadVanilla(info, data)
+       return s.doContainerBackupLoadVanilla(info, data, tarArgs)
 }
 
 func (s *storageBtrfs) ImageCreate(fingerprint string) error {
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 2dd3ccd0af..f34ca55409 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -1918,7 +1918,7 @@ func (s *storageCeph) ContainerBackupCreate(backup 
backup, source container) err
        }
 
        // Pack the backup
-       err = backupCreateTarball(tmpPath, backup)
+       err = backupCreateTarball(s.s, tmpPath, backup)
        if err != nil {
                return err
        }
@@ -1932,7 +1932,7 @@ func (s *storageCeph) ContainerBackupCreate(backup 
backup, source container) err
 // - for each snapshot dump the contents into the empty storage volume and
 //   after each dump take a snapshot of the rbd storage volume
 // - dump the container contents into the rbd storage volume.
-func (s *storageCeph) ContainerBackupLoad(info backupInfo, data io.ReadSeeker) 
error {
+func (s *storageCeph) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, 
tarArgs []string) error {
        // create the main container
        err := s.doContainerCreate(info.Name, info.Privileged)
        if err != nil {
@@ -1948,12 +1948,20 @@ func (s *storageCeph) ContainerBackupLoad(info 
backupInfo, data io.ReadSeeker) e
        containerMntPoint := getContainerMountPoint(s.pool.Name, info.Name)
        // Extract container
        for _, snap := range info.Snapshots {
-               // Extract snapshots
                cur := fmt.Sprintf("backup/snapshots/%s", snap)
 
+               // Prepare tar arguments
+               args := append(tarArgs, []string{
+                       "-",
+                       "--recursive-unlink",
+                       "--strip-components=3",
+                       "--xattrs-include=*",
+                       "-C", containerMntPoint, cur,
+               }...)
+
+               // Extract snapshots
                data.Seek(0, 0)
-               err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-",
-                       "--recursive-unlink", "--strip-components=3", 
"--xattrs-include=*", "-C", containerMntPoint, cur)
+               err = shared.RunCommandWithFds(data, nil, "tar", args...)
                if err != nil {
                        logger.Errorf("Failed to untar \"%s\" into \"%s\": %s", 
cur, containerMntPoint, err)
                        return err
@@ -1979,10 +1987,17 @@ func (s *storageCeph) ContainerBackupLoad(info 
backupInfo, data io.ReadSeeker) e
                }
        }
 
+       // Prepare tar arguments
+       args := append(tarArgs, []string{
+               "-",
+               "--strip-components=2",
+               "--xattrs-include=*",
+               "-C", containerMntPoint, "backup/container",
+       }...)
+
        // Extract container
        data.Seek(0, 0)
-       err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-",
-               "--strip-components=2", "--xattrs-include=*", "-C", 
containerMntPoint, "backup/container")
+       err = shared.RunCommandWithFds(data, nil, "tar", args...)
        if err != nil {
                logger.Errorf("Failed to untar \"backup/container\" into 
\"%s\": %s", containerMntPoint, err)
                return err
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index ba63a7597d..0e2fa6efae 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -1113,7 +1113,7 @@ func (s *storageDir) ContainerBackupCreate(backup backup, 
source container) erro
        }
 
        // Pack the backup
-       err = backupCreateTarball(tmpPath, backup)
+       err = backupCreateTarball(s.s, tmpPath, backup)
        if err != nil {
                return err
        }
@@ -1121,7 +1121,7 @@ func (s *storageDir) ContainerBackupCreate(backup backup, 
source container) erro
        return nil
 }
 
-func (s *storageDir) ContainerBackupLoad(info backupInfo, data io.ReadSeeker) 
error {
+func (s *storageDir) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, 
tarArgs []string) error {
        _, err := s.StoragePoolMount()
        if err != nil {
                return err
@@ -1140,10 +1140,17 @@ func (s *storageDir) ContainerBackupLoad(info 
backupInfo, data io.ReadSeeker) er
                return err
        }
 
+       // Prepare tar arguments
+       args := append(tarArgs, []string{
+               "-",
+               "--strip-components=2",
+               "--xattrs-include=*",
+               "-C", containerMntPoint, "backup/container",
+       }...)
+
        // Extract container
        data.Seek(0, 0)
-       err = shared.RunCommandWithFds(data, nil, "tar", "-xJf",
-               "-", "--strip-components=2", "--xattrs-include=*", "-C", 
containerMntPoint, "backup/container")
+       err = shared.RunCommandWithFds(data, nil, "tar", args...)
        if err != nil {
                return err
        }
@@ -1160,10 +1167,17 @@ func (s *storageDir) ContainerBackupLoad(info 
backupInfo, data io.ReadSeeker) er
                        return err
                }
 
+               // Prepare tar arguments
+               args := append(tarArgs, []string{
+                       "-",
+                       "--strip-components=2",
+                       "--xattrs-include=*",
+                       "-C", containerMntPoint, "backup/snapshots",
+               }...)
+
                // Extract snapshots
                data.Seek(0, 0)
-               err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-",
-                       "--strip-components=2", "--xattrs-include=*", "-C", 
snapshotMntPoint, "backup/snapshots")
+               err = shared.RunCommandWithFds(data, nil, "tar", args...)
                if err != nil {
                        return err
                }
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 036761210e..18e986b0db 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -1679,7 +1679,7 @@ func (s *storageLvm) ContainerBackupCreate(backup backup, 
source container) erro
        }
 
        // Pack the backup
-       err = backupCreateTarball(tmpPath, backup)
+       err = backupCreateTarball(s.s, tmpPath, backup)
        if err != nil {
                return err
        }
@@ -1687,16 +1687,23 @@ func (s *storageLvm) ContainerBackupCreate(backup 
backup, source container) erro
        return nil
 }
 
-func (s *storageLvm) ContainerBackupLoad(info backupInfo, data io.ReadSeeker) 
error {
+func (s *storageLvm) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, 
tarArgs []string) error {
        containerPath, err := s.doContainerBackupLoad(info.Name, 
info.Privileged, false)
        if err != nil {
                return err
        }
 
+       // Prepare tar arguments
+       args := append(tarArgs, []string{
+               "-",
+               "--strip-components=2",
+               "--xattrs-include=*",
+               "-C", containerPath, "backup/container",
+       }...)
+
        // Extract container
        data.Seek(0, 0)
-       err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-", 
"--strip-components=2", "--xattrs-include=*",
-               "-C", containerPath, "backup/container")
+       err = shared.RunCommandWithFds(data, nil, "tar", args...)
        if err != nil {
                return err
        }
@@ -1708,10 +1715,17 @@ func (s *storageLvm) ContainerBackupLoad(info 
backupInfo, data io.ReadSeeker) er
                        return err
                }
 
+               // Prepare tar arguments
+               args := append(tarArgs, []string{
+                       "-",
+                       "--strip-components=3",
+                       "--xattrs-include=*",
+                       "-C", containerPath, fmt.Sprintf("backup/snapshots/%s", 
snap),
+               }...)
+
                // Extract snapshots
                data.Seek(0, 0)
-               err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-",
-                       "--strip-components=3", "--xattrs-include=*", "-C", 
containerPath, fmt.Sprintf("backup/snapshots/%s", snap))
+               err = shared.RunCommandWithFds(data, nil, "tar", args...)
                if err != nil {
                        return err
                }
diff --git a/lxd/storage_mock.go b/lxd/storage_mock.go
index 201ea1d065..11e60b1212 100644
--- a/lxd/storage_mock.go
+++ b/lxd/storage_mock.go
@@ -194,7 +194,7 @@ func (s *storageMock) ContainerBackupCreate(backup backup, 
sourceContainer conta
        return nil
 }
 
-func (s *storageMock) ContainerBackupLoad(info backupInfo, data io.ReadSeeker) 
error {
+func (s *storageMock) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, 
tarArgs []string) error {
        return nil
 }
 
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index d96eaae42e..d7cf6829f9 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -2044,7 +2044,7 @@ func (s *storageZfs) ContainerBackupCreate(backup backup, 
source container) erro
        }
 
        // Pack the backup
-       err = backupCreateTarball(tmpPath, backup)
+       err = backupCreateTarball(s.s, tmpPath, backup)
        if err != nil {
                return err
        }
@@ -2052,7 +2052,7 @@ func (s *storageZfs) ContainerBackupCreate(backup backup, 
source container) erro
        return nil
 }
 
-func (s *storageZfs) doContainerBackupLoadOptimized(info backupInfo, data 
io.ReadSeeker) error {
+func (s *storageZfs) doContainerBackupLoadOptimized(info backupInfo, data 
io.ReadSeeker, tarArgs []string) error {
        containerName, _, _ := containerGetParentAndSnapshotName(info.Name)
        containerMntPoint := getContainerMountPoint(s.pool.Name, containerName)
        err := createContainerMountpoint(containerMntPoint, 
containerPath(info.Name, false), info.Privileged)
@@ -2073,9 +2073,16 @@ func (s *storageZfs) doContainerBackupLoadOptimized(info 
backupInfo, data io.Rea
                return err
        }
 
+       // Prepare tar arguments
+       args := append(tarArgs, []string{
+               "-",
+               "--strip-components=1",
+               "-C", unpackPath, "backup",
+       }...)
+
        // Extract container
        data.Seek(0, 0)
-       err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-", 
"--strip-components=1", "-C", unpackPath, "backup")
+       err = shared.RunCommandWithFds(data, nil, "tar", args...)
        if err != nil {
                // can't use defer because it needs to run before the mount
                os.RemoveAll(unpackPath)
@@ -2155,7 +2162,7 @@ func (s *storageZfs) doContainerBackupLoadOptimized(info 
backupInfo, data io.Rea
        return nil
 }
 
-func (s *storageZfs) doContainerBackupLoadVanilla(info backupInfo, data 
io.ReadSeeker) error {
+func (s *storageZfs) doContainerBackupLoadVanilla(info backupInfo, data 
io.ReadSeeker, tarArgs []string) error {
        // create the main container
        err := s.doContainerCreate(info.Name, info.Privileged)
        if err != nil {
@@ -2174,9 +2181,18 @@ func (s *storageZfs) doContainerBackupLoadVanilla(info 
backupInfo, data io.ReadS
                // Extract snapshots
                cur := fmt.Sprintf("backup/snapshots/%s", snap)
 
+               // Prepare tar arguments
+               args := append(tarArgs, []string{
+                       "-",
+                       "--recursive-unlink",
+                       "--strip-components=3",
+                       "--xattrs-include=*",
+                       "-C", containerMntPoint, cur,
+               }...)
+
+               // Unpack
                data.Seek(0, 0)
-               err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-",
-                       "--recursive-unlink", "--strip-components=3", 
"--xattrs-include=*", "-C", containerMntPoint, cur)
+               err = shared.RunCommandWithFds(data, nil, "tar", args...)
                if err != nil {
                        logger.Errorf("Failed to untar \"%s\" into \"%s\": %s", 
cur, containerMntPoint, err)
                        return err
@@ -2189,10 +2205,17 @@ func (s *storageZfs) doContainerBackupLoadVanilla(info 
backupInfo, data io.ReadS
                }
        }
 
+       // Prepare tar arguments
+       args := append(tarArgs, []string{
+               "-",
+               "--strip-components=2",
+               "--xattrs-include=*",
+               "-C", containerMntPoint, "backup/container",
+       }...)
+
        // Extract container
        data.Seek(0, 0)
-       err = shared.RunCommandWithFds(data, nil, "tar", "-xJf", "-",
-               "--strip-components=2", "--xattrs-include=*", "-C", 
containerMntPoint, "backup/container")
+       err = shared.RunCommandWithFds(data, nil, "tar", args...)
        if err != nil {
                logger.Errorf("Failed to untar \"backup/container\" into 
\"%s\": %s", containerMntPoint, err)
                return err
@@ -2201,14 +2224,14 @@ func (s *storageZfs) doContainerBackupLoadVanilla(info 
backupInfo, data io.ReadS
        return nil
 }
 
-func (s *storageZfs) ContainerBackupLoad(info backupInfo, data io.ReadSeeker) 
error {
+func (s *storageZfs) ContainerBackupLoad(info backupInfo, data io.ReadSeeker, 
tarArgs []string) error {
        logger.Debugf("Loading ZFS storage volume for backup \"%s\" on storage 
pool \"%s\"", info.Name, s.pool.Name)
 
        if info.HasBinaryFormat {
-               return s.doContainerBackupLoadOptimized(info, data)
+               return s.doContainerBackupLoadOptimized(info, data, tarArgs)
        }
 
-       return s.doContainerBackupLoadVanilla(info, data)
+       return s.doContainerBackupLoadVanilla(info, data, tarArgs)
 }
 
 // - create temporary directory ${LXD_DIR}/images/lxd_images_
diff --git a/scripts/bash/lxd-client b/scripts/bash/lxd-client
index 0657c74bcf..8003717efc 100644
--- a/scripts/bash/lxd-client
+++ b/scripts/bash/lxd-client
@@ -64,7 +64,8 @@ _have lxc && {
       list manpage monitor move network profile publish query remote rename \
       restart restore shell snapshot start stop storage version"
 
-    global_keys="core.https_address core.https_allowed_credentials \
+    global_keys="backups.compression_algorithm,
+      core.https_address core.https_allowed_credentials \
       core.https_allowed_headers core.https_allowed_methods \
       core.https_allowed_origin candid.api.url core.proxy_https \
       core.proxy_http core.proxy_ignore_hosts core.trust_password \
diff --git a/shared/version/api.go b/shared/version/api.go
index a81f75099e..0ac1c116c7 100644
--- a/shared/version/api.go
+++ b/shared/version/api.go
@@ -121,6 +121,7 @@ var APIExtensions = []string{
        "network_nat_order",
        "container_full",
        "candid_authentication",
+       "backup_compression",
 }
 
 // APIExtensionsCount returns the number of available API extensions.
diff --git a/test/suites/backup.sh b/test/suites/backup.sh
index d11b90fdc6..3eb89f8999 100644
--- a/test/suites/backup.sh
+++ b/test/suites/backup.sh
@@ -203,20 +203,20 @@ test_backup_import() {
 
   # create backup
   if [ "$lxd_backend" = "btrfs" ] || [ "$lxd_backend" = "zfs" ]; then
-    lxc export b1 "${LXD_DIR}/c1-optimized.tar.xz" --optimized-storage 
--container-only
+    lxc export b1 "${LXD_DIR}/c1-optimized.tar.gz" --optimized-storage 
--container-only
   fi
 
-  lxc export b1 "${LXD_DIR}/c1.tar.xz" --container-only
+  lxc export b1 "${LXD_DIR}/c1.tar.gz" --container-only
   lxc delete --force b1
 
   # import backup, and ensure it's valid and runnable
-  lxc import "${LXD_DIR}/c1.tar.xz"
+  lxc import "${LXD_DIR}/c1.tar.gz"
   lxc info b1
   lxc start b1
   lxc delete --force b1
 
   if [ "$lxd_backend" = "btrfs" ] || [ "$lxd_backend" = "zfs" ]; then
-    lxc import "${LXD_DIR}/c1-optimized.tar.xz"
+    lxc import "${LXD_DIR}/c1-optimized.tar.gz"
     lxc info b1
     lxc start b1
     lxc delete --force b1
@@ -225,13 +225,13 @@ test_backup_import() {
   # with snapshots
 
   if [ "$lxd_backend" = "btrfs" ] || [ "$lxd_backend" = "zfs" ]; then
-    lxc export b2 "${LXD_DIR}/c2-optimized.tar.xz" --optimized-storage
+    lxc export b2 "${LXD_DIR}/c2-optimized.tar.gz" --optimized-storage
   fi
 
-  lxc export b2 "${LXD_DIR}/c2.tar.xz"
+  lxc export b2 "${LXD_DIR}/c2.tar.gz"
   lxc delete --force b2
 
-  lxc import "${LXD_DIR}/c2.tar.xz"
+  lxc import "${LXD_DIR}/c2.tar.gz"
   lxc info b2 | grep snap0
   lxc start b2
   lxc stop b2 --force
@@ -241,7 +241,7 @@ test_backup_import() {
   lxc delete --force b2
 
   if [ "$lxd_backend" = "btrfs" ] || [ "$lxd_backend" = "zfs" ]; then
-    lxc import "${LXD_DIR}/c2-optimized.tar.xz"
+    lxc import "${LXD_DIR}/c2-optimized.tar.gz"
     lxc info b2 | grep snap0
     lxc start b2
     lxc stop b2 --force
@@ -265,16 +265,16 @@ test_backup_export() {
   # container only
 
   if [ "$lxd_backend" = "btrfs" ] || [ "$lxd_backend" = "zfs" ]; then
-    lxc export b1 "${LXD_DIR}/c1-optimized.tar.xz" --optimized-storage 
--container-only
-    tar -xJf "${LXD_DIR}/c1-optimized.tar.xz" -C "${LXD_DIR}/optimized"
+    lxc export b1 "${LXD_DIR}/c1-optimized.tar.gz" --optimized-storage 
--container-only
+    tar -xzf "${LXD_DIR}/c1-optimized.tar.gz" -C "${LXD_DIR}/optimized"
 
     [ -f "${LXD_DIR}/optimized/backup/index.yaml" ]
     [ -f "${LXD_DIR}/optimized/backup/container.bin" ]
     [ ! -d "${LXD_DIR}/optimized/backup/snapshots" ]
   fi
 
-  lxc export b1 "${LXD_DIR}/c1.tar.xz" --container-only
-  tar -xJf "${LXD_DIR}/c1.tar.xz" -C "${LXD_DIR}/non-optimized"
+  lxc export b1 "${LXD_DIR}/c1.tar.gz" --container-only
+  tar -xzf "${LXD_DIR}/c1.tar.gz" -C "${LXD_DIR}/non-optimized"
 
   # check tarball content
   [ -f "${LXD_DIR}/non-optimized/backup/index.yaml" ]
@@ -286,16 +286,16 @@ test_backup_export() {
   # with snapshots
 
   if [ "$lxd_backend" = "btrfs" ] || [ "$lxd_backend" = "zfs" ]; then
-    lxc export b1 "${LXD_DIR}/c2-optimized.tar.xz" --optimized-storage
-    tar -xJf "${LXD_DIR}/c2-optimized.tar.xz" -C "${LXD_DIR}/optimized"
+    lxc export b1 "${LXD_DIR}/c2-optimized.tar.gz" --optimized-storage
+    tar -xzf "${LXD_DIR}/c2-optimized.tar.gz" -C "${LXD_DIR}/optimized"
 
     [ -f "${LXD_DIR}/optimized/backup/index.yaml" ]
     [ -f "${LXD_DIR}/optimized/backup/container.bin" ]
     [ -f "${LXD_DIR}/optimized/backup/snapshots/snap0.bin" ]
   fi
 
-  lxc export b1 "${LXD_DIR}/c2.tar.xz"
-  tar -xJf "${LXD_DIR}/c2.tar.xz" -C "${LXD_DIR}/non-optimized"
+  lxc export b1 "${LXD_DIR}/c2.tar.gz"
+  tar -xzf "${LXD_DIR}/c2.tar.gz" -C "${LXD_DIR}/non-optimized"
 
   # check tarball content
   [ -f "${LXD_DIR}/non-optimized/backup/index.yaml" ]
_______________________________________________
lxc-devel mailing list
[email protected]
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to