The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6618

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
More storage API fixes/improvements ahead of merging btrfs.
From 169c153f0339ebe4b8cb832880a4213207a33947 Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.h...@canonical.com>
Date: Fri, 29 Nov 2019 08:30:30 +0100
Subject: [PATCH 1/7] lxd: Mark container snapshots as such

This ensures that snapshots are marked correctly as such in the
database.

Signed-off-by: Thomas Hipp <thomas.h...@canonical.com>
---
 lxd/container_lxc.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 9de0a935bf..3f240befd8 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -232,7 +232,7 @@ func containerLXCCreate(s *state.State, args 
db.InstanceArgs) (instance.Instance
        }
 
        // Create a new database entry for the container's storage volume
-       _, err = s.Cluster.StoragePoolVolumeCreate(args.Project, args.Name, "", 
storagePoolVolumeTypeContainer, false, poolID, volumeConfig)
+       _, err = s.Cluster.StoragePoolVolumeCreate(args.Project, args.Name, "", 
storagePoolVolumeTypeContainer, c.IsSnapshot(), poolID, volumeConfig)
        if err != nil {
                c.Delete()
                return nil, err

From 8f9a4a67a6c391788929f1a00a8cf6e1c6ad2c9f Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.h...@canonical.com>
Date: Tue, 19 Nov 2019 18:08:48 +0100
Subject: [PATCH 2/7] lxd/storage/locking: New storage locking package
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This moves and exports the locking logic previously kept within the
drivers package and updates the existing code to make use of it.

Signed-off-by: Thomas Hipp <thomas.h...@canonical.com>
Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/storage/drivers/lock.go   | 50 ---------------------------
 lxd/storage/drivers/volume.go | 12 +++----
 lxd/storage/locking/lock.go   | 64 +++++++++++++++++++++++++++++++++++
 3 files changed, 69 insertions(+), 57 deletions(-)
 delete mode 100644 lxd/storage/drivers/lock.go
 create mode 100644 lxd/storage/locking/lock.go

diff --git a/lxd/storage/drivers/lock.go b/lxd/storage/drivers/lock.go
deleted file mode 100644
index b0f27bec63..0000000000
--- a/lxd/storage/drivers/lock.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package drivers
-
-import (
-       "sync"
-
-       "github.com/lxc/lxd/shared/logger"
-)
-
-// lxdStorageLockMap is a hashmap that allows functions to check whether the
-// operation they are about to perform is already in progress. If it is the
-// channel can be used to wait for the operation to finish. If it is not, the
-// function that wants to perform the operation should store its code in the
-// hashmap.
-// Note that any access to this map must be done while holding a lock.
-var lxdStorageOngoingOperationMap = map[string]chan bool{}
-
-// lxdStorageMapLock is used to access lxdStorageOngoingOperationMap.
-var lxdStorageMapLock sync.Mutex
-
-func lock(lockID string) func() {
-       lxdStorageMapLock.Lock()
-
-       if waitChannel, ok := lxdStorageOngoingOperationMap[lockID]; ok {
-               lxdStorageMapLock.Unlock()
-
-               _, ok := <-waitChannel
-               if ok {
-                       logger.Warnf("Received value over semaphore, this 
should ot have happened")
-               }
-
-               // Give the benefit of the doubt and assume that the other
-               // thread actually succeeded in mounting the storage pool.
-               return nil
-       }
-
-       lxdStorageOngoingOperationMap[lockID] = make(chan bool)
-       lxdStorageMapLock.Unlock()
-
-       return func() {
-               lxdStorageMapLock.Lock()
-
-               waitChannel, ok := lxdStorageOngoingOperationMap[lockID]
-               if ok {
-                       close(waitChannel)
-                       delete(lxdStorageOngoingOperationMap, lockID)
-               }
-
-               lxdStorageMapLock.Unlock()
-       }
-}
diff --git a/lxd/storage/drivers/volume.go b/lxd/storage/drivers/volume.go
index 9bfb687a44..1f0df96278 100644
--- a/lxd/storage/drivers/volume.go
+++ b/lxd/storage/drivers/volume.go
@@ -5,6 +5,7 @@ import (
        "os"
 
        "github.com/lxc/lxd/lxd/operations"
+       "github.com/lxc/lxd/lxd/storage/locking"
        "github.com/lxc/lxd/shared"
 )
 
@@ -107,13 +108,10 @@ func (v Volume) CreateMountPath() error {
 func (v Volume) MountTask(task func(mountPath string, op 
*operations.Operation) error, op *operations.Operation) error {
        parentName, snapName, isSnap := 
shared.InstanceGetParentAndSnapshotName(v.name)
 
-       mountLockID := fmt.Sprintf("mount/%s/%s/%s", v.pool, v.volType, v.name)
-       umountLockID := fmt.Sprintf("umount/%s/%s/%s", v.pool, v.volType, 
v.name)
-
        // If the volume is a snapshot then call the snapshot specific 
mount/unmount functions as
        // these will mount the snapshot read only.
        if isSnap {
-               unlock := lock(mountLockID)
+               unlock := locking.Lock(v.pool, string(v.volType), v.name)
 
                ourMount, err := v.driver.MountVolumeSnapshot(v.volType, 
parentName, snapName, op)
                if err != nil {
@@ -125,13 +123,13 @@ func (v Volume) MountTask(task func(mountPath string, op 
*operations.Operation)
 
                if ourMount {
                        defer func() {
-                               unlock := lock(umountLockID)
+                               unlock := locking.Lock(v.pool, 
string(v.volType), v.name)
                                v.driver.UnmountVolumeSnapshot(v.volType, 
parentName, snapName, op)
                                unlock()
                        }()
                }
        } else {
-               unlock := lock(mountLockID)
+               unlock := locking.Lock(v.pool, string(v.volType), v.name)
 
                ourMount, err := v.driver.MountVolume(v.volType, v.name, op)
                if err != nil {
@@ -143,7 +141,7 @@ func (v Volume) MountTask(task func(mountPath string, op 
*operations.Operation)
 
                if ourMount {
                        defer func() {
-                               unlock := lock(umountLockID)
+                               unlock := locking.Lock(v.pool, 
string(v.volType), v.name)
                                v.driver.UnmountVolume(v.volType, v.name, op)
                                unlock()
                        }()
diff --git a/lxd/storage/locking/lock.go b/lxd/storage/locking/lock.go
new file mode 100644
index 0000000000..bafdcecddb
--- /dev/null
+++ b/lxd/storage/locking/lock.go
@@ -0,0 +1,64 @@
+package locking
+
+import (
+       "fmt"
+       "sync"
+)
+
+// ongoingOperationMap is a hashmap that allows functions to check whether the
+// operation they are about to perform is already in progress. If it is the
+// channel can be used to wait for the operation to finish. If it is not, the
+// function that wants to perform the operation should store its code in the
+// hashmap.
+// Note that any access to this map must be done while holding a lock.
+var ongoingOperationMap = map[string]chan struct{}{}
+
+// ongoingOperationMapLock is used to access ongoingOperationMap.
+var ongoingOperationMapLock sync.Mutex
+
+// Lock creates a lock for a specific storage volume to allow activities that
+// require exclusive access to take place. Will block until the lock is
+// established. On success, it returns an unlock function which needs to be
+// called to unlock the lock.
+func Lock(poolName string, volType string, volName string) func() {
+       lockID := fmt.Sprintf("%s/%s/%s", poolName, volType, volName)
+
+       for {
+               // Get exclusive access to the map and see if there is already 
an operation ongoing.
+               ongoingOperationMapLock.Lock()
+               waitCh, ok := ongoingOperationMap[lockID]
+               ongoingOperationMapLock.Unlock()
+
+               if !ok {
+                       // No ongoing operation, create a new channel to 
indicate our new operation.
+                       waitCh = make(chan struct{})
+                       ongoingOperationMap[lockID] = waitCh
+
+                       // Return a function that will complete the operation.
+                       return func() {
+                               // Get exclusive access to the map.
+                               ongoingOperationMapLock.Lock()
+                               doneCh, ok := ongoingOperationMap[lockID]
+
+                               // Load our existing operation.
+                               if ok {
+                                       // Close the channel to indicate to 
other waiting users
+                                       // they can now try again to create a 
new operation.
+                                       close(doneCh)
+
+                                       // Remove our existing operation entry 
from the map.
+                                       delete(ongoingOperationMap, lockID)
+                               }
+
+                               // Release the lock now that the done channel 
is closed and the
+                               // map entry has been deleted, this will allow 
any waiting users
+                               // to try and get access to the map to create a 
new operation.
+                               ongoingOperationMapLock.Unlock()
+                       }
+               }
+
+               // An existing operation is ongoing, lets wait for that to 
finish and then try
+               // to get exlusive access to create a new operation again.
+               <-waitCh
+       }
+}

From 6044eb14bee880be75cff2f67ea8b907d17d4f25 Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.h...@canonical.com>
Date: Tue, 19 Nov 2019 18:10:30 +0100
Subject: [PATCH 3/7] lxd/storage: Lock image creation

This adds a lock to the image creation. It also adds a volume entry to
the database for optimized images.

Signed-off-by: Thomas Hipp <thomas.h...@canonical.com>
---
 lxd/storage/backend_lxd.go | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 7d3f265458..14d213a479 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -17,6 +17,7 @@ import (
        "github.com/lxc/lxd/lxd/project"
        "github.com/lxc/lxd/lxd/state"
        "github.com/lxc/lxd/lxd/storage/drivers"
+       "github.com/lxc/lxd/lxd/storage/locking"
        "github.com/lxc/lxd/lxd/storage/memorypipe"
        "github.com/lxc/lxd/shared"
        "github.com/lxc/lxd/shared/api"
@@ -1569,6 +1570,11 @@ func (b *lxdBackend) EnsureImage(fingerprint string, op 
*operations.Operation) e
                return nil // Nothing to do for drivers that don't support 
optimized images volumes.
        }
 
+       // We need to lock this operation to ensure that the image is not being
+       // created multiple times.
+       unlock := locking.Lock(b.name, string(drivers.VolumeTypeImage), 
fingerprint)
+       defer unlock()
+
        // Check if we already have a suitable volume.
        if b.driver.HasVolume(drivers.VolumeTypeImage, fingerprint) {
                return nil

From 9875a6404337c6b2a37bc6ec4904483aec820ed4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Thu, 12 Dec 2019 20:55:32 -0500
Subject: [PATCH 4/7] lxd/backup: Rename HasBinaryFormat to OptimizedStorage
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This lines it up with the API and index file.

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/backup/backup.go       | 8 ++++----
 lxd/containers_post.go     | 2 +-
 lxd/storage/backend_lxd.go | 2 +-
 lxd/storage_zfs.go         | 2 +-
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/lxd/backup/backup.go b/lxd/backup/backup.go
index c50ad7697a..cc432d7541 100644
--- a/lxd/backup/backup.go
+++ b/lxd/backup/backup.go
@@ -31,14 +31,14 @@ type Info struct {
        Privileged      bool     `json:"privileged" yaml:"privileged"`
        Pool            string   `json:"pool" yaml:"pool"`
        Snapshots       []string `json:"snapshots,omitempty" 
yaml:"snapshots,omitempty"`
-       HasBinaryFormat bool     `json:"-" yaml:"-"`
+       OptimizedStorage bool     `json:"-" yaml:"-"`
 }
 
 // GetInfo extracts backup information from a given ReadSeeker.
 func GetInfo(r io.ReadSeeker) (*Info, error) {
        var tr *tar.Reader
        result := Info{}
-       hasBinaryFormat := false
+       optimizedStorage := false
        hasIndexFile := false
 
        // Extract
@@ -93,7 +93,7 @@ func GetInfo(r io.ReadSeeker) (*Info, error) {
                }
 
                if hdr.Name == "backup/container.bin" {
-                       hasBinaryFormat = true
+                       optimizedStorage = true
                }
        }
 
@@ -101,7 +101,7 @@ func GetInfo(r io.ReadSeeker) (*Info, error) {
                return nil, fmt.Errorf("Backup is missing index.yaml")
        }
 
-       result.HasBinaryFormat = hasBinaryFormat
+       result.OptimizedStorage = optimizedStorage
        return &result, nil
 }
 
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index ccb5a4aab6..1ba3baef48 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -658,7 +658,7 @@ func createFromBackup(d *Daemon, project string, data 
io.Reader, pool string) re
                // The storage pool doesn't exist. If backup is in binary 
format (so we cannot alter
                // the backup.yaml) or the pool has been specified directly 
from the user restoring
                // the backup then we cannot proceed so return an error.
-               if bInfo.HasBinaryFormat || pool != "" {
+               if bInfo.OptimizedStorage || pool != "" {
                        return response.InternalError(errors.Wrap(err, "Storage 
pool not found"))
                }
 
diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 14d213a479..de2cfe57b2 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -394,7 +394,7 @@ func (b *lxdBackend) CreateInstance(inst instance.Instance, 
op *operations.Opera
 // created in the database to run any storage layer finalisations, and a 
revert hook that can be
 // run if the instance database load process fails that will remove anything 
created thus far.
 func (b *lxdBackend) CreateInstanceFromBackup(srcBackup backup.Info, srcData 
io.ReadSeeker, op *operations.Operation) (func(instance.Instance) error, 
func(), error) {
-       logger := logging.AddContext(b.logger, log.Ctx{"project": 
srcBackup.Project, "instance": srcBackup.Name, "snapshots": 
srcBackup.Snapshots, "hasBinaryFormat": srcBackup.HasBinaryFormat})
+       logger := logging.AddContext(b.logger, log.Ctx{"project": 
srcBackup.Project, "instance": srcBackup.Name, "snapshots": 
srcBackup.Snapshots, "optimizedStorage": srcBackup.OptimizedStorage})
        logger.Debug("CreateInstanceFromBackup started")
        defer logger.Debug("CreateInstanceFromBackup finished")
 
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 12c6776022..3231dc85c3 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -2310,7 +2310,7 @@ func (s *storageZfs) doContainerBackupLoadVanilla(info 
backup.Info, data io.Read
 func (s *storageZfs) ContainerBackupLoad(info backup.Info, data io.ReadSeeker, 
tarArgs []string) error {
        logger.Debugf("Loading ZFS storage volume for backup \"%s\" on storage 
pool \"%s\"", info.Name, s.pool.Name)
 
-       if info.HasBinaryFormat {
+       if info.OptimizedStorage {
                return s.doContainerBackupLoadOptimized(info, data, tarArgs)
        }
 

From 6b2d5abb7d783119d85fa73003136d0513e413bc Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.h...@canonical.com>
Date: Tue, 3 Dec 2019 16:57:10 +0100
Subject: [PATCH 5/7] lxd/storage/drivers: Update RestoreBackupVolume signature

This adds the optimized argument to RestoreBackupVolume.

Signed-off-by: Thomas Hipp <thomas.h...@canonical.com>
---
 lxd/storage/drivers/driver_cephfs.go | 2 +-
 lxd/storage/drivers/driver_dir.go    | 2 +-
 lxd/storage/drivers/interface.go     | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/lxd/storage/drivers/driver_cephfs.go 
b/lxd/storage/drivers/driver_cephfs.go
index fa1c759458..26e6f20e4b 100644
--- a/lxd/storage/drivers/driver_cephfs.go
+++ b/lxd/storage/drivers/driver_cephfs.go
@@ -973,7 +973,7 @@ func (d *cephfs) BackupVolume(vol Volume, targetPath 
string, optimized bool, sna
        return ErrNotImplemented
 }
 
-func (d *cephfs) RestoreBackupVolume(vol Volume, snapshots []string, srcData 
io.ReadSeeker, op *operations.Operation) (func(vol Volume) error, func(), 
error) {
+func (d *cephfs) RestoreBackupVolume(vol Volume, snapshots []string, srcData 
io.ReadSeeker, optimizedStorage bool, op *operations.Operation) (func(vol 
Volume) error, func(), error) {
        return nil, nil, ErrNotImplemented
 }
 
diff --git a/lxd/storage/drivers/driver_dir.go 
b/lxd/storage/drivers/driver_dir.go
index 88de930d29..4a07ea51ce 100644
--- a/lxd/storage/drivers/driver_dir.go
+++ b/lxd/storage/drivers/driver_dir.go
@@ -1019,7 +1019,7 @@ func (d *dir) BackupVolume(vol Volume, targetPath string, 
_, snapshots bool, op
 }
 
 // RestoreBackupVolume restores a backup tarball onto the storage device.
-func (d *dir) RestoreBackupVolume(vol Volume, snapshots []string, srcData 
io.ReadSeeker, op *operations.Operation) (func(vol Volume) error, func(), 
error) {
+func (d *dir) RestoreBackupVolume(vol Volume, snapshots []string, srcData 
io.ReadSeeker, optimizedStorage bool, op *operations.Operation) (func(vol 
Volume) error, func(), error) {
        revert := true
        revertPaths := []string{}
 
diff --git a/lxd/storage/drivers/interface.go b/lxd/storage/drivers/interface.go
index 0732ebeb69..4dd46e7669 100644
--- a/lxd/storage/drivers/interface.go
+++ b/lxd/storage/drivers/interface.go
@@ -75,5 +75,5 @@ type Driver interface {
 
        // Backup.
        BackupVolume(vol Volume, targetPath string, optimized bool, snapshots 
bool, op *operations.Operation) error
-       RestoreBackupVolume(vol Volume, snapshots []string, srcData 
io.ReadSeeker, op *operations.Operation) (func(vol Volume) error, func(), error)
+       RestoreBackupVolume(vol Volume, snapshots []string, srcData 
io.ReadSeeker, optimizedStorage bool, op *operations.Operation) (func(vol 
Volume) error, func(), error)
 }

From b3bff372bdbd4fd790e9429854d89a809a1757be Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.h...@canonical.com>
Date: Tue, 3 Dec 2019 16:57:56 +0100
Subject: [PATCH 6/7] lxd/storage: Update call to RestoreBackupVolume

Signed-off-by: Thomas Hipp <thomas.h...@canonical.com>
---
 lxd/storage/backend_lxd.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index de2cfe57b2..06696e0fb6 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -414,7 +414,7 @@ func (b *lxdBackend) CreateInstanceFromBackup(srcBackup 
backup.Info, srcData io.
        }()
 
        // Unpack the backup into the new storage volume(s).
-       volPostHook, revertHook, err := b.driver.RestoreBackupVolume(vol, 
srcBackup.Snapshots, srcData, op)
+       volPostHook, revertHook, err := b.driver.RestoreBackupVolume(vol, 
srcBackup.Snapshots, srcData, srcBackup.OptimizedStorage, op)
        if err != nil {
                return nil, nil, err
        }

From c812f78c749773accd42d52ba20f97b78d544693 Mon Sep 17 00:00:00 2001
From: Thomas Hipp <thomas.h...@canonical.com>
Date: Wed, 4 Dec 2019 10:58:00 +0100
Subject: [PATCH 7/7] test/suites: Satisfy shellcheck

Signed-off-by: Thomas Hipp <thomas.h...@canonical.com>
---
 test/suites/backup.sh |  4 ++--
 test/suites/devlxd.sh | 10 +++++-----
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/test/suites/backup.sh b/test/suites/backup.sh
index 5e073fe40a..3ddac39ea3 100644
--- a/test/suites/backup.sh
+++ b/test/suites/backup.sh
@@ -270,10 +270,10 @@ test_backup_import_with_project() {
   lxc profile device remove default root
 
   # This should fail as the expected storage is not available, and there is no 
default
-  ! lxc import "${LXD_DIR}/c3.tar.gz"
+  ! lxc import "${LXD_DIR}/c3.tar.gz" || false
 
   # Specify pool explicitly; this should fails as the pool doesn't exist
-  ! lxc import "${LXD_DIR}/c3.tar.gz" -s pool_1
+  ! lxc import "${LXD_DIR}/c3.tar.gz" -s pool_1 || false
 
   # Specify pool explicitly
   lxc import "${LXD_DIR}/c3.tar.gz" -s pool_2
diff --git a/test/suites/devlxd.sh b/test/suites/devlxd.sh
index e4ce296eee..e50d8ee38b 100644
--- a/test/suites/devlxd.sh
+++ b/test/suites/devlxd.sh
@@ -1,11 +1,11 @@
 test_devlxd() {
   ensure_import_testimage
 
-  # shellcheck disable=SC2164
-  cd "${TEST_DIR}"
-  go build -tags netgo -a -installsuffix devlxd ../deps/devlxd-client.go
-  # shellcheck disable=SC2164
-  cd -
+  (
+    # shellcheck disable=SC2164
+    cd "${TEST_DIR}"
+    go build -tags netgo -a -installsuffix devlxd ../deps/devlxd-client.go
+  )
 
   lxc launch testimage devlxd -c security.devlxd=false
 
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to