The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/8125

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
Unifies old and new schema so same function can be used on both 4.0 and current branches.
From 1f3a943afcfb2d0d69a0782c6bbaaad62a2466a6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 6 Nov 2020 11:05:45 +0000
Subject: [PATCH 1/4] lxd/cluster/connect: Updates ConnectIfVolumeIsRemote to
 use tx.GetStorageVolumeNodes

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/cluster/connect.go | 31 +++++++++++++++++--------------
 1 file changed, 17 insertions(+), 14 deletions(-)

diff --git a/lxd/cluster/connect.go b/lxd/cluster/connect.go
index 335341b022..825d3aee23 100644
--- a/lxd/cluster/connect.go
+++ b/lxd/cluster/connect.go
@@ -125,20 +125,22 @@ func ConnectIfVolumeIsRemote(s *state.State, poolName 
string, projectName string
                return nil, err
        }
 
-       var addresses []string
+       localNodeID := s.Cluster.GetNodeID()
+       var nodes []db.NodeInfo
        err = s.Cluster.Transaction(func(tx *db.ClusterTx) error {
                poolID, err := tx.GetStoragePoolID(poolName)
                if err != nil {
                        return err
                }
 
-               addresses, err = tx.GetStorageVolumeNodeAddresses(poolID, 
projectName, volumeName, volumeType)
+               nodes, err = tx.GetStorageVolumeNodes(poolID, projectName, 
volumeName, volumeType)
                if err != nil {
                        return err
                }
 
                return nil
        })
+
        if err != nil && err != db.ErrNoClusterMember {
                return nil, err
        }
@@ -162,29 +164,30 @@ func ConnectIfVolumeIsRemote(s *state.State, poolName 
string, projectName string
                                return nil, errors.Wrapf(err, "Failed getting 
cluster member info for %q", remoteInstance.Node)
                        }
 
-                       // Replace address list with instance's cluster member.
-                       addresses = []string{instNode.Address}
+                       // Replace node list with instance's cluster member 
node (which might be local member).
+                       nodes = []db.NodeInfo{instNode}
                } else {
-                       // Volume isn't exclusively attached to an instance and 
has no fixed node.
-                       addresses = []string{""} // Use local cluster member.
+                       // Volume isn't exclusively attached to an instance. 
Use local cluster member.
+                       return nil, nil
                }
        }
 
-       addressCount := len(addresses)
-       if addressCount > 1 {
-               return nil, fmt.Errorf("More than one cluster member has a 
volume named %q", volumeName)
-       } else if addressCount < 1 {
+       nodeCount := len(nodes)
+       if nodeCount > 1 {
+               return nil, fmt.Errorf("More than one cluster member has a 
volume named %q. Use --target flag to specify member", volumeName)
+       } else if nodeCount < 1 {
+               // Should never get here.
                return nil, fmt.Errorf("Volume %q has empty cluster member 
list", volumeName)
        }
 
-       address := addresses[0]
-       // Use local cluster member.
-       if address == "" {
+       node := nodes[0]
+       if node.ID == localNodeID {
+               // Use local cluster member if volume belongs to this local 
node.
                return nil, nil
        }
 
        // Connect to remote cluster member.
-       return Connect(address, cert, false)
+       return Connect(node.Address, cert, false)
 }
 
 // SetupTrust is a convenience around InstanceServer.CreateCertificate that

From 450e05daa8661187db89bd1dc78831e75d8a8ffa Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 30 Oct 2020 13:50:50 +0000
Subject: [PATCH 2/4] lxd/db/storage/volumes: Adds workaround for old remote
 volume schema in GetStorageVolumeNodeAddresses

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/db/storage_volumes.go | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index 738c51849c..a028910c37 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -677,8 +677,23 @@ func (c *ClusterTx) GetStorageVolumeNodeAddresses(poolID 
int64, projectName stri
 
        sort.Strings(addresses)
 
-       if len(addresses) == 0 {
+       addressCount := len(addresses)
+       if addressCount == 0 {
                return nil, ErrNoSuchObject
+       } else if addressCount > 1 {
+               driver, err := c.GetStoragePoolDriver(poolID)
+               if err != nil {
+                       return nil, err
+               }
+
+               // Earlier schema versions created a volume DB record for each 
cluster member for remote storage
+               // pools, so if the storage driver is one of those remote pools 
and the addressCount is >1 then we
+               // take this to mean that the volume doesn't have an explicit 
cluster member and is therefore
+               // equivalent to db.ErrNoClusterMember that is used in newer 
schemas where a single remote volume
+               // DB record is created that is not associated to any single 
member.
+               if driver == "ceph" || driver == "cephfs" {
+                       return nil, ErrNoClusterMember
+               }
        }
 
        return addresses, nil

From 9d20ce2cb375ace5dd0b60be7b0f7d686d7762ab Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 6 Nov 2020 11:12:19 +0000
Subject: [PATCH 3/4] lxd/db/storage/volumes: Renames
 GetStorageVolumeNodeAddresses to GetStorageVolumeNodes

And updates to return a list of db.NodeInfo structs rather than just addresses. 
This is more useful and removes the percularities around returning empty node 
address for local nodes.

This makes it dependent on the caller to identify a local node.

Also unifies the function to handle both current and legacy volume records for 
remote drivers.

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/db/storage_volumes.go | 58 +++++++++++++++++----------------------
 1 file changed, 25 insertions(+), 33 deletions(-)

diff --git a/lxd/db/storage_volumes.go b/lxd/db/storage_volumes.go
index a028910c37..261784ce1f 100644
--- a/lxd/db/storage_volumes.go
+++ b/lxd/db/storage_volumes.go
@@ -5,7 +5,6 @@ package db
 import (
        "database/sql"
        "fmt"
-       "sort"
        "strings"
        "time"
 
@@ -622,27 +621,20 @@ type StorageVolumeArgs struct {
        ContentType string
 }
 
-// GetStorageVolumeNodeAddresses returns the addresses of all nodes on which 
the
-// volume with the given name if defined.
-//
+// GetStorageVolumeNodes returns the node info of all nodes on which the 
volume with the given name is defined.
 // The volume name can be either a regular name or a volume snapshot name.
-//
-// The empty string is used in place of the address of the current node.
-func (c *ClusterTx) GetStorageVolumeNodeAddresses(poolID int64, projectName 
string, volumeName string, volumeType int) ([]string, error) {
-       nodes := []struct {
-               id      int64
-               address string
-       }{}
-       dest := func(i int) []interface{} {
-               nodes = append(nodes, struct {
-                       id      int64
-                       address string
-               }{})
-               return []interface{}{&nodes[i].id, &nodes[i].address}
+// If the volume is defined, but without a specific node, then the 
ErrNoClusterMember error is returned.
+// If the volume is not found then the ErrNoSuchObject error is returned.
+func (c *ClusterTx) GetStorageVolumeNodes(poolID int64, projectName string, 
volumeName string, volumeType int) ([]NodeInfo, error) {
+       nodes := []NodeInfo{}
 
+       dest := func(i int) []interface{} {
+               nodes = append(nodes, NodeInfo{})
+               return []interface{}{&nodes[i].ID, &nodes[i].Address, 
&nodes[i].Name}
        }
+
        sql := `
-       SELECT coalesce(nodes.id,0) AS nodeID, coalesce(nodes.address,"") AS 
nodeAddress
+       SELECT coalesce(nodes.id,0) AS nodeID, coalesce(nodes.address,"") AS 
nodeAddress, coalesce(nodes.name,"") AS nodeName
        FROM storage_volumes_all
        JOIN projects ON projects.id = storage_volumes_all.project_id
        LEFT JOIN nodes ON storage_volumes_all.node_id=nodes.id
@@ -661,26 +653,21 @@ func (c *ClusterTx) GetStorageVolumeNodeAddresses(poolID 
int64, projectName stri
                return nil, err
        }
 
-       addresses := []string{}
+       if len(nodes) == 0 {
+               return nil, ErrNoSuchObject
+       }
+
        for _, node := range nodes {
                // Volume is defined without a cluster member.
-               if node.id == 0 && node.address == "" {
+               if node.ID == 0 {
                        return nil, ErrNoClusterMember
                }
-
-               address := node.address
-               if node.id == c.nodeID {
-                       address = ""
-               }
-               addresses = append(addresses, address)
        }
 
-       sort.Strings(addresses)
-
-       addressCount := len(addresses)
-       if addressCount == 0 {
+       nodeCount := len(nodes)
+       if nodeCount == 0 {
                return nil, ErrNoSuchObject
-       } else if addressCount > 1 {
+       } else if nodeCount > 1 {
                driver, err := c.GetStoragePoolDriver(poolID)
                if err != nil {
                        return nil, err
@@ -691,12 +678,17 @@ func (c *ClusterTx) GetStorageVolumeNodeAddresses(poolID 
int64, projectName stri
                // take this to mean that the volume doesn't have an explicit 
cluster member and is therefore
                // equivalent to db.ErrNoClusterMember that is used in newer 
schemas where a single remote volume
                // DB record is created that is not associated to any single 
member.
-               if driver == "ceph" || driver == "cephfs" {
+               if StorageRemoteDriverNames == nil {
+                       return nil, fmt.Errorf("No remote storage drivers 
function defined")
+               }
+
+               remoteDrivers := StorageRemoteDriverNames()
+               if shared.StringInSlice(driver, remoteDrivers) {
                        return nil, ErrNoClusterMember
                }
        }
 
-       return addresses, nil
+       return nodes, nil
 }
 
 // Return the name of the node a storage volume is on.

From 4433baa52d7e961c3c7e161d810d022fd6722d78 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Fri, 6 Nov 2020 11:31:07 +0000
Subject: [PATCH 4/4] lxd/db/storage/volumes/test: Updates test for
 TestGetStorageVolumeNodes

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/db/storage_volumes_test.go | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/lxd/db/storage_volumes_test.go b/lxd/db/storage_volumes_test.go
index 3e13fc5f68..897677cc80 100644
--- a/lxd/db/storage_volumes_test.go
+++ b/lxd/db/storage_volumes_test.go
@@ -11,7 +11,11 @@ import (
 )
 
 // Addresses of all nodes with matching volume name are returned.
-func TestStorageVolumeNodeAddresses(t *testing.T) {
+func TestGetStorageVolumeNodes(t *testing.T) {
+       db.StorageRemoteDriverNames = func() []string {
+               return []string{"ceph", "cephfs"}
+       }
+
        tx, cleanup := db.NewTestClusterTx(t)
        defer cleanup()
 
@@ -29,10 +33,21 @@ func TestStorageVolumeNodeAddresses(t *testing.T) {
        addVolume(t, tx, poolID, nodeID3, "volume2")
        addVolume(t, tx, poolID, nodeID2, "volume2")
 
-       addresses, err := tx.GetStorageVolumeNodeAddresses(poolID, "default", 
"volume1", 1)
+       nodes, err := tx.GetStorageVolumeNodes(poolID, "default", "volume1", 1)
        require.NoError(t, err)
 
-       assert.Equal(t, []string{"", "1.2.3.4:666"}, addresses)
+       assert.Equal(t, []db.NodeInfo{
+               db.NodeInfo{
+                       ID:      nodeID1,
+                       Name:    "none",
+                       Address: "0.0.0.0",
+               },
+               db.NodeInfo{
+                       ID:      nodeID2,
+                       Name:    "node2",
+                       Address: "1.2.3.4:666",
+               },
+       }, nodes)
 }
 
 func addPool(t *testing.T, tx *db.ClusterTx, name string) int64 {
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to