The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/7733

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
Just a test to see if this change introduced the LVM issues we are seeing in jenkins.
From b2b4662c10bdf4dbbcb3ca89536b48d1e4fad98b Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 6 Aug 2020 11:02:54 +0100
Subject: [PATCH 1/5] test/suites/storage: LVM size tweaks

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 test/suites/storage.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/test/suites/storage.sh b/test/suites/storage.sh
index 75eeb13cf6..6890228a4a 100644
--- a/test/suites/storage.sh
+++ b/test/suites/storage.sh
@@ -765,12 +765,12 @@ test_storage() {
 
   if [ "$lxd_backend" = "lvm" ]; then
     QUOTA1="20MB"
-    rootMinKB1="17000"
+    rootMinKB1="14000"
     rootMaxKB1="20000"
 
     # Increase quota enough to require a new 4MB LVM extent.
     QUOTA2="25MB"
-    rootMinKB2="21000"
+    rootMinKB2="19000"
     rootMaxKB2="23000"
   fi
 

From b9c995d5c309dc1d17928c1a5427f2f6a47e0099 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 6 Aug 2020 14:57:43 +0100
Subject: [PATCH 2/5] Revert "lxd/storage/locking: Moves package to
 lxd/locking"

This reverts commit 81f6c3e67ea624a6f9dfab383e8c90517bbaa91c.
---
 lxd/{ => storage}/locking/lock.go | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
 rename lxd/{ => storage}/locking/lock.go (100%)

diff --git a/lxd/locking/lock.go b/lxd/storage/locking/lock.go
similarity index 100%
rename from lxd/locking/lock.go
rename to lxd/storage/locking/lock.go

From 7d06080ff15642d7ecd2c89569a3a7c9a584c634 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 6 Aug 2020 14:57:48 +0100
Subject: [PATCH 3/5] Revert "lxd/locking: Renames variables to make them
 generic"

This reverts commit 5630989206e2e18dc2b33d1da1fcc7606e2217ed.
---
 lxd/storage/locking/lock.go | 44 +++++++++++++++++++++----------------
 1 file changed, 25 insertions(+), 19 deletions(-)

diff --git a/lxd/storage/locking/lock.go b/lxd/storage/locking/lock.go
index 02c0ec992d..727974484c 100644
--- a/lxd/storage/locking/lock.go
+++ b/lxd/storage/locking/lock.go
@@ -1,38 +1,44 @@
 package locking
 
 import (
+       "fmt"
        "sync"
 )
 
-// locks is a hashmap that allows functions to check whether the operation 
they are about to perform
-// is already in progress. If it is the channel can be used to wait for the 
operation to finish. If it is not, the
-// function that wants to perform the operation should store its code in the 
hashmap.
+// ongoingOperationMap is a hashmap that allows functions to check whether the
+// operation they are about to perform is already in progress. If it is the
+// channel can be used to wait for the operation to finish. If it is not, the
+// function that wants to perform the operation should store its code in the
+// hashmap.
 // Note that any access to this map must be done while holding a lock.
-var locks = map[string]chan struct{}{}
+var ongoingOperationMap = map[string]chan struct{}{}
 
-// locksMutex is used to access locks safely.
-var locksMutex sync.Mutex
+// ongoingOperationMapLock is used to access ongoingOperationMap.
+var ongoingOperationMapLock sync.Mutex
+
+// Lock creates a lock for a specific storage volume to allow activities that
+// require exclusive access to take place. Will block until the lock is
+// established. On success, it returns an unlock function which needs to be
+// called to unlock the lock.
+func Lock(poolName string, volType string, volName string) func() {
+       lockID := fmt.Sprintf("%s/%s/%s", poolName, volType, volName)
 
-// Lock creates a lock for a specific storage volume to allow activities that 
require exclusive access to occur.
-// Will block until the lock is established. On success, it returns an unlock 
function which needs to be called to
-// unlock the lock.
-func Lock(lockName string) func() {
        for {
                // Get exclusive access to the map and see if there is already 
an operation ongoing.
-               locksMutex.Lock()
-               waitCh, ok := locks[lockName]
+               ongoingOperationMapLock.Lock()
+               waitCh, ok := ongoingOperationMap[lockID]
 
                if !ok {
                        // No ongoing operation, create a new channel to 
indicate our new operation.
                        waitCh = make(chan struct{})
-                       locks[lockName] = waitCh
-                       locksMutex.Unlock()
+                       ongoingOperationMap[lockID] = waitCh
+                       ongoingOperationMapLock.Unlock()
 
                        // Return a function that will complete the operation.
                        return func() {
                                // Get exclusive access to the map.
-                               locksMutex.Lock()
-                               doneCh, ok := locks[lockName]
+                               ongoingOperationMapLock.Lock()
+                               doneCh, ok := ongoingOperationMap[lockID]
 
                                // Load our existing operation.
                                if ok {
@@ -41,19 +47,19 @@ func Lock(lockName string) func() {
                                        close(doneCh)
 
                                        // Remove our existing operation entry 
from the map.
-                                       delete(locks, lockName)
+                                       delete(ongoingOperationMap, lockID)
                                }
 
                                // Release the lock now that the done channel 
is closed and the
                                // map entry has been deleted, this will allow 
any waiting users
                                // to try and get access to the map to create a 
new operation.
-                               locksMutex.Unlock()
+                               ongoingOperationMapLock.Unlock()
                        }
                }
 
                // An existing operation is ongoing, lets wait for that to 
finish and then try
                // to get exlusive access to create a new operation again.
-               locksMutex.Unlock()
+               ongoingOperationMapLock.Unlock()
                <-waitCh
        }
 }

From 8d5c9f396814fce7067ab7f71caf97bd6fa0b52a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 6 Aug 2020 14:57:52 +0100
Subject: [PATCH 4/5] Revert "lxd/storage/drivers/utils: Adds OperationLockName
 function"

This reverts commit 0fa1c760732ac7d944132e9c404013423267ad3c.
---
 lxd/storage/drivers/utils.go | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/lxd/storage/drivers/utils.go b/lxd/storage/drivers/utils.go
index 055fd73e2d..d4539b693b 100644
--- a/lxd/storage/drivers/utils.go
+++ b/lxd/storage/drivers/utils.go
@@ -791,8 +791,3 @@ func PathNameDecode(text string) string {
        // converted back to "/" before making a final pass to convert "\0" 
back to original "-".
        return strings.Replace(strings.Replace(strings.Replace(text, "--", 
"\000", -1), "-", "/", -1), "\000", "-", -1)
 }
-
-// OperationLockName returns the storage specific lock name to use with 
locking package.
-func OperationLockName(poolName string, volType string, volName string) string 
{
-       return fmt.Sprintf("%s/%s/%s", poolName, volType, volName)
-}

From 881826c079119d6174456641e56ea7b3efd0f1cb Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Thu, 6 Aug 2020 14:58:06 +0100
Subject: [PATCH 5/5] Revert "lxd/storage: locking.Lock usage with
 OperationLockName wrapper"

This reverts commit 5bee3de5687db7ede611e99bf8683670f378b1ff.
---
 lxd/storage/backend_lxd.go              |  6 +++---
 lxd/storage/drivers/driver_lvm_utils.go |  4 ++--
 lxd/storage/drivers/volume.go           | 18 +++++++++---------
 3 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 6b09f11924..03bbfa1c92 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -16,13 +16,13 @@ import (
        "github.com/lxc/lxd/lxd/db"
        "github.com/lxc/lxd/lxd/instance"
        "github.com/lxc/lxd/lxd/instance/instancetype"
-       "github.com/lxc/lxd/lxd/locking"
        "github.com/lxc/lxd/lxd/migration"
        "github.com/lxc/lxd/lxd/operations"
        "github.com/lxc/lxd/lxd/project"
        "github.com/lxc/lxd/lxd/revert"
        "github.com/lxc/lxd/lxd/state"
        "github.com/lxc/lxd/lxd/storage/drivers"
+       "github.com/lxc/lxd/lxd/storage/locking"
        "github.com/lxc/lxd/lxd/storage/memorypipe"
        "github.com/lxc/lxd/shared"
        "github.com/lxc/lxd/shared/api"
@@ -2028,7 +2028,7 @@ func (b *lxdBackend) EnsureImage(fingerprint string, op 
*operations.Operation) e
        // We need to lock this operation to ensure that the image is not being 
created multiple times.
        // Uses a lock name of "EnsureImage_<fingerprint>" to avoid deadlocking 
with CreateVolume below that also
        // establishes a lock on the volume type & name if it needs to mount 
the volume before filling.
-       unlock := locking.Lock(drivers.OperationLockName(b.name, 
string(drivers.VolumeTypeImage), fmt.Sprintf("EnsureImage_%v", fingerprint)))
+       unlock := locking.Lock(b.name, string(drivers.VolumeTypeImage), 
fmt.Sprintf("EnsureImage_%v", fingerprint))
        defer unlock()
 
        // Load image info from database.
@@ -2124,7 +2124,7 @@ func (b *lxdBackend) DeleteImage(fingerprint string, op 
*operations.Operation) e
 
        // We need to lock this operation to ensure that the image is not being
        // deleted multiple times.
-       unlock := locking.Lock(drivers.OperationLockName(b.name, 
string(drivers.VolumeTypeImage), fingerprint))
+       unlock := locking.Lock(b.name, string(drivers.VolumeTypeImage), 
fingerprint)
        defer unlock()
 
        // Load image info from database.
diff --git a/lxd/storage/drivers/driver_lvm_utils.go 
b/lxd/storage/drivers/driver_lvm_utils.go
index ee81d9e021..e8ad0216cf 100644
--- a/lxd/storage/drivers/driver_lvm_utils.go
+++ b/lxd/storage/drivers/driver_lvm_utils.go
@@ -10,9 +10,9 @@ import (
 
        "github.com/pkg/errors"
 
-       "github.com/lxc/lxd/lxd/locking"
        "github.com/lxc/lxd/lxd/operations"
        "github.com/lxc/lxd/lxd/revert"
+       "github.com/lxc/lxd/lxd/storage/locking"
        "github.com/lxc/lxd/shared"
        log "github.com/lxc/lxd/shared/log15"
        "github.com/lxc/lxd/shared/units"
@@ -56,7 +56,7 @@ func (d *lvm) openLoopFile(source string) (*os.File, error) {
        }
 
        if filepath.IsAbs(source) && !shared.IsBlockdevPath(source) {
-               unlock := locking.Lock(OperationLockName(d.name, "", ""))
+               unlock := locking.Lock(d.name, "", "")
                defer unlock()
 
                // Try to prepare new loop device.
diff --git a/lxd/storage/drivers/volume.go b/lxd/storage/drivers/volume.go
index d5678e0b2f..0c5cd7e88b 100644
--- a/lxd/storage/drivers/volume.go
+++ b/lxd/storage/drivers/volume.go
@@ -6,9 +6,9 @@ import (
 
        "github.com/pkg/errors"
 
-       "github.com/lxc/lxd/lxd/locking"
        "github.com/lxc/lxd/lxd/operations"
        "github.com/lxc/lxd/lxd/revert"
+       "github.com/lxc/lxd/lxd/storage/locking"
        "github.com/lxc/lxd/shared"
        "github.com/lxc/lxd/shared/units"
 )
@@ -187,7 +187,7 @@ func (v Volume) MountTask(task func(mountPath string, op 
*operations.Operation)
        // If the volume is a snapshot then call the snapshot specific 
mount/unmount functions as
        // these will mount the snapshot read only.
        if v.IsSnapshot() {
-               unlock := locking.Lock(OperationLockName(v.pool, 
string(v.volType), v.name))
+               unlock := locking.Lock(v.pool, string(v.volType), v.name)
 
                ourMount, err := v.driver.MountVolumeSnapshot(v, op)
                if err != nil {
@@ -199,13 +199,13 @@ func (v Volume) MountTask(task func(mountPath string, op 
*operations.Operation)
 
                if ourMount {
                        defer func() {
-                               unlock := 
locking.Lock(OperationLockName(v.pool, string(v.volType), v.name))
+                               unlock := locking.Lock(v.pool, 
string(v.volType), v.name)
                                v.driver.UnmountVolumeSnapshot(v, op)
                                unlock()
                        }()
                }
        } else {
-               unlock := locking.Lock(OperationLockName(v.pool, 
string(v.volType), v.name))
+               unlock := locking.Lock(v.pool, string(v.volType), v.name)
 
                ourMount, err := v.driver.MountVolume(v, op)
                if err != nil {
@@ -217,7 +217,7 @@ func (v Volume) MountTask(task func(mountPath string, op 
*operations.Operation)
 
                if ourMount {
                        defer func() {
-                               unlock := 
locking.Lock(OperationLockName(v.pool, string(v.volType), v.name))
+                               unlock := locking.Lock(v.pool, 
string(v.volType), v.name)
                                v.driver.UnmountVolume(v, op)
                                unlock()
                        }()
@@ -233,7 +233,7 @@ func (v Volume) UnmountTask(task func(op 
*operations.Operation) error, op *opera
        // If the volume is a snapshot then call the snapshot specific 
mount/unmount functions as
        // these will mount the snapshot read only.
        if v.IsSnapshot() {
-               unlock := locking.Lock(OperationLockName(v.pool, 
string(v.volType), v.name))
+               unlock := locking.Lock(v.pool, string(v.volType), v.name)
 
                ourUnmount, err := v.driver.UnmountVolumeSnapshot(v, op)
                if err != nil {
@@ -245,13 +245,13 @@ func (v Volume) UnmountTask(task func(op 
*operations.Operation) error, op *opera
 
                if ourUnmount {
                        defer func() {
-                               unlock := 
locking.Lock(OperationLockName(v.pool, string(v.volType), v.name))
+                               unlock := locking.Lock(v.pool, 
string(v.volType), v.name)
                                v.driver.MountVolumeSnapshot(v, op)
                                unlock()
                        }()
                }
        } else {
-               unlock := locking.Lock(OperationLockName(v.pool, 
string(v.volType), v.name))
+               unlock := locking.Lock(v.pool, string(v.volType), v.name)
 
                ourUnmount, err := v.driver.UnmountVolume(v, op)
                if err != nil {
@@ -263,7 +263,7 @@ func (v Volume) UnmountTask(task func(op 
*operations.Operation) error, op *opera
 
                if ourUnmount {
                        defer func() {
-                               unlock := 
locking.Lock(OperationLockName(v.pool, string(v.volType), v.name))
+                               unlock := locking.Lock(v.pool, 
string(v.volType), v.name)
                                v.driver.MountVolume(v, op)
                                unlock()
                        }()
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to