The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/8163

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===

From b62438c41c6b51c9776b8af468d84fbb51ccd9fa Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 17 Nov 2020 11:26:09 +0000
Subject: [PATCH 1/5] lxd/images: Add word "Image" to log line to give context

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/images.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/images.go b/lxd/images.go
index 4fb55b9087..123ac310d6 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -1241,7 +1241,7 @@ func autoUpdateImage(d *Daemon, op *operations.Operation, 
id int, info *api.Imag
 
                hash = newInfo.Fingerprint
                if hash == fingerprint {
-                       logger.Debug("Already up to date", log.Ctx{"fp": 
fingerprint})
+                       logger.Debug("Image already up to date", log.Ctx{"fp": 
fingerprint})
                        continue
                }
 

From 3833ce4d916d0d603713dc1f5ecb6b1a9611e35a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 17 Nov 2020 11:27:07 +0000
Subject: [PATCH 2/5] lxd/daemon/images: Add contextual logging and use "fp"
 rather than "image" for consistency with other code areas

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/daemon_images.go | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go
index 3a5b244d7e..a491ae4f4e 100644
--- a/lxd/daemon_images.go
+++ b/lxd/daemon_images.go
@@ -173,41 +173,45 @@ func (d *Daemon) ImageDownload(op *operations.Operation, 
server string, protocol
        }
 
        if imgInfo != nil {
-               logger.Debugf("Image %q already exists in the DB", fp)
                info = imgInfo
+               ctxMap = log.Ctx{"fp": info.Fingerprint}
+               logger.Debug("Image already exists in the DB", ctxMap)
 
                // If not requested in a particular pool, we're done.
                if storagePool == "" {
                        return info, nil
                }
 
-               // Get the ID of the target storage pool
+               ctxMap["pool"] = storagePool
+
+               // Get the ID of the target storage pool.
                poolID, err := d.cluster.GetStoragePoolID(storagePool)
                if err != nil {
                        return nil, err
                }
 
-               // Check if the image is already in the pool
+               // Check if the image is already in the pool.
                poolIDs, err := d.cluster.GetPoolsWithImage(info.Fingerprint)
                if err != nil {
                        return nil, err
                }
 
                if shared.Int64InSlice(poolID, poolIDs) {
-                       logger.Debugf("Image already exists on storage pool 
%q", storagePool)
+                       logger.Debug("Image already exists on storage pool", 
ctxMap)
                        return info, nil
                }
 
-               // Import the image in the pool
-               logger.Debugf("Image does not exist on storage pool %q", 
storagePool)
+               // Import the image in the pool.
+               logger.Debug("Image does not exist on storage pool", ctxMap)
 
                err = imageCreateInPool(d, info, storagePool)
                if err != nil {
-                       logger.Debugf("Failed to create image on storage pool 
%q: %v", storagePool, err)
-                       return nil, err
+                       ctxMap["err"] = err
+                       logger.Debug("Failed to create image on storage pool", 
ctxMap)
+                       return nil, errors.Wrapf(err, "Failed to create image 
%q on storage pool %q", info.Fingerprint, storagePool)
                }
 
-               logger.Debugf("Created image on storage pool %q", storagePool)
+               logger.Debugf("Created image on storage pool", ctxMap)
                return info, nil
        }
 
@@ -217,9 +221,7 @@ func (d *Daemon) ImageDownload(op *operations.Operation, 
server string, protocol
                // We are already downloading the image
                imagesDownloadingLock.Unlock()
 
-               logger.Debug(
-                       "Already downloading the image, waiting for it to 
succeed",
-                       log.Ctx{"image": fp})
+               logger.Debug("Already downloading the image, waiting for it to 
succeed", log.Ctx{"fp": fp})
 
                // Wait until the download finishes (channel closes)
                <-waitChannel
@@ -228,7 +230,7 @@ func (d *Daemon) ImageDownload(op *operations.Operation, 
server string, protocol
                _, imgInfo, err := d.cluster.GetImage(project, fp, false)
                if err != nil {
                        // Other download failed, lets try again
-                       logger.Error("Other image download didn't succeed", 
log.Ctx{"image": fp})
+                       logger.Error("Other image download didn't succeed", 
log.Ctx{"fp": fp})
                } else {
                        // Other download succeeded, we're done
                        return imgInfo, nil
@@ -256,7 +258,7 @@ func (d *Daemon) ImageDownload(op *operations.Operation, 
server string, protocol
        if op == nil {
                ctxMap = log.Ctx{"alias": alias, "server": server}
        } else {
-               ctxMap = log.Ctx{"trigger": op.URL(), "image": fp, "operation": 
op.ID(), "alias": alias, "server": server}
+               ctxMap = log.Ctx{"trigger": op.URL(), "fp": fp, "operation": 
op.ID(), "alias": alias, "server": server}
        }
        logger.Info("Downloading image", ctxMap)
 

From d5dcf5465221a35baa96e7e4d20d2969b26a1490 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 17 Nov 2020 11:28:14 +0000
Subject: [PATCH 3/5] lxd/profiles/utils: Remove container references, improve
 comments

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/profiles_utils.go | 96 +++++++++++++++++++++----------------------
 1 file changed, 48 insertions(+), 48 deletions(-)

diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
index 2a6aa30700..a601e140f1 100644
--- a/lxd/profiles_utils.go
+++ b/lxd/profiles_utils.go
@@ -7,7 +7,7 @@ import (
        deviceConfig "github.com/lxc/lxd/lxd/device/config"
        "github.com/lxc/lxd/lxd/instance"
        "github.com/lxc/lxd/lxd/instance/instancetype"
-       projecthelpers "github.com/lxc/lxd/lxd/project"
+       "github.com/lxc/lxd/lxd/project"
        "github.com/lxc/lxd/shared"
        "github.com/lxc/lxd/shared/api"
        "github.com/pkg/errors"
@@ -16,66 +16,69 @@ import (
 func doProfileUpdate(d *Daemon, projectName string, name string, id int64, 
profile *api.Profile, req api.ProfilePut) error {
        // Check project limits.
        err := d.cluster.Transaction(func(tx *db.ClusterTx) error {
-               return projecthelpers.AllowProfileUpdate(tx, projectName, name, 
req)
+               return project.AllowProfileUpdate(tx, projectName, name, req)
        })
        if err != nil {
                return err
        }
 
-       // Sanity checks
+       // Sanity checks.
        err = instance.ValidConfig(d.os, req.Config, true, false)
        if err != nil {
                return err
        }
 
-       // At this point we don't know the instance type, so just use 
instancetype.Any type for validation.
+       // Profiles can be applied to any instance type, so just use 
instancetype.Any type for validation so that
+       // instance type specific validation checks are not performed.
        err = instance.ValidDevices(d.State(), d.cluster, projectName, 
instancetype.Any, deviceConfig.NewDevices(req.Devices), false)
        if err != nil {
                return err
        }
 
-       containers, err := getProfileContainersInfo(d.cluster, projectName, 
name)
+       insts, err := getProfileInstancesInfo(d.cluster, projectName, name)
        if err != nil {
-               return errors.Wrapf(err, "failed to query instances associated 
with profile '%s'", name)
+               return errors.Wrapf(err, "Failed to query instances associated 
with profile %q", name)
        }
 
-       // Check if the root device is supposed to be changed or removed.
+       // Check if the root disk device's pool is supposed to be changed or 
removed and prevent that if there are
+       // instances using that root disk device.
        oldProfileRootDiskDeviceKey, oldProfileRootDiskDevice, _ := 
shared.GetRootDiskDevice(profile.Devices)
        _, newProfileRootDiskDevice, _ := shared.GetRootDiskDevice(req.Devices)
-       if len(containers) > 0 && oldProfileRootDiskDevice["pool"] != "" && 
newProfileRootDiskDevice["pool"] == "" || (oldProfileRootDiskDevice["pool"] != 
newProfileRootDiskDevice["pool"]) {
-               // Check for containers using the device
-               for _, container := range containers {
-                       // Check if the device is locally overridden
-                       k, v, _ := 
shared.GetRootDiskDevice(container.Devices.CloneNative())
+       if len(insts) > 0 && oldProfileRootDiskDevice["pool"] != "" && 
newProfileRootDiskDevice["pool"] == "" || (oldProfileRootDiskDevice["pool"] != 
newProfileRootDiskDevice["pool"]) {
+               // Check for instances using the device.
+               for _, inst := range insts {
+                       // Check if the device is locally overridden.
+                       k, v, _ := 
shared.GetRootDiskDevice(inst.Devices.CloneNative())
                        if k != "" && v["pool"] != "" {
                                continue
                        }
 
-                       // Check what profile the device comes from
-                       profiles := container.Profiles
+                       // Check what profile the device comes from by working 
backwards along the profiles list.
+                       profiles := inst.Profiles
                        for i := len(profiles) - 1; i >= 0; i-- {
-                               _, profile, err := 
d.cluster.GetProfile(projecthelpers.Default, profiles[i])
+                               // tomp TODO project.Default here look 
suspicious.
+                               _, profile, err := 
d.cluster.GetProfile(project.Default, profiles[i])
                                if err != nil {
                                        return err
                                }
 
-                               // Check if we find a match for the device
+                               // Check if we find a match for the device.
                                _, ok := 
profile.Devices[oldProfileRootDiskDeviceKey]
                                if ok {
-                                       // Found the profile
+                                       // Found the profile.
                                        if profiles[i] == name {
-                                               // If it's the current profile, 
then we can't modify that root device
+                                               // If it's the current profile, 
then we can't modify that root device.
                                                return fmt.Errorf("At least one 
instance relies on this profile's root disk device")
-                                       } else {
-                                               // If it's not, then move on to 
the next container
-                                               break
                                        }
+
+                                       // If it's not, then move on to the 
next instance.
+                                       break
                                }
                        }
                }
        }
 
-       // Update the database
+       // Update the database.
        err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
                return tx.UpdateProfile(projectName, name, db.Profile{
                        Project:     projectName,
@@ -89,8 +92,7 @@ func doProfileUpdate(d *Daemon, projectName string, name 
string, id int64, profi
                return err
        }
 
-       // Update all the containers on this node using the profile. Must be
-       // done after db.TxCommit due to DB lock.
+       // Update all the instances on this node using the profile. Must be 
done after db.TxCommit due to DB lock.
        nodeName := ""
        err = d.cluster.Transaction(func(tx *db.ClusterTx) error {
                var err error
@@ -101,15 +103,15 @@ func doProfileUpdate(d *Daemon, projectName string, name 
string, id int64, profi
                return errors.Wrap(err, "failed to query local node name")
        }
        failures := map[string]error{}
-       for _, args := range containers {
-               err := doProfileUpdateContainer(d, name, profile.ProfilePut, 
nodeName, args)
+       for _, args := range insts {
+               err := doProfileUpdateInstance(d, name, profile.ProfilePut, 
nodeName, args)
                if err != nil {
                        failures[args.Name] = err
                }
        }
 
        if len(failures) != 0 {
-               msg := "The following containers failed to update (profile 
change still saved):\n"
+               msg := "The following instances failed to update (profile 
change still saved):\n"
                for cname, err := range failures {
                        msg += fmt.Sprintf(" - %s: %s\n", cname, err)
                }
@@ -132,21 +134,21 @@ func doProfileUpdateCluster(d *Daemon, project, name 
string, old api.ProfilePut)
                return errors.Wrap(err, "Failed to query local node name")
        }
 
-       containers, err := getProfileContainersInfo(d.cluster, project, name)
+       insts, err := getProfileInstancesInfo(d.cluster, project, name)
        if err != nil {
-               return errors.Wrapf(err, "Failed to query instances associated 
with profile '%s'", name)
+               return errors.Wrapf(err, "Failed to query instances associated 
with profile %q", name)
        }
 
        failures := map[string]error{}
-       for _, args := range containers {
-               err := doProfileUpdateContainer(d, name, old, nodeName, args)
+       for _, args := range insts {
+               err := doProfileUpdateInstance(d, name, old, nodeName, args)
                if err != nil {
                        failures[args.Name] = err
                }
        }
 
        if len(failures) != 0 {
-               msg := "The following containers failed to update (profile 
change still saved):\n"
+               msg := "The following instances failed to update (profile 
change still saved):\n"
                for cname, err := range failures {
                        msg += fmt.Sprintf(" - %s: %s\n", cname, err)
                }
@@ -156,10 +158,10 @@ func doProfileUpdateCluster(d *Daemon, project, name 
string, old api.ProfilePut)
        return nil
 }
 
-// Profile update of a single container.
-func doProfileUpdateContainer(d *Daemon, name string, old api.ProfilePut, 
nodeName string, args db.InstanceArgs) error {
+// Profile update of a single instance.
+func doProfileUpdateInstance(d *Daemon, name string, old api.ProfilePut, 
nodeName string, args db.InstanceArgs) error {
        if args.Node != "" && args.Node != nodeName {
-               // No-op, this container does not belong to this node.
+               // No-op, this instance does not belong to this node.
                return nil
        }
 
@@ -197,26 +199,24 @@ func doProfileUpdateContainer(d *Daemon, name string, old 
api.ProfilePut, nodeNa
        }, true)
 }
 
-// Query the db for information about containers associated with the given
-// profile.
-func getProfileContainersInfo(cluster *db.Cluster, project, profile string) 
([]db.InstanceArgs, error) {
-       // Query the db for information about containers associated with the
-       // given profile.
-       names, err := cluster.GetInstancesWithProfile(project, profile)
+// Query the db for information about instances associated with the given 
profile.
+func getProfileInstancesInfo(cluster *db.Cluster, projectName string, 
profileName string) ([]db.InstanceArgs, error) {
+       // Query the db for information about instances associated with the 
given profile.
+       projectInstNames, err := cluster.GetInstancesWithProfile(projectName, 
profileName)
        if err != nil {
-               return nil, errors.Wrapf(err, "Failed to query instances with 
profile '%s'", profile)
+               return nil, errors.Wrapf(err, "Failed to query instances with 
profile %q", profileName)
        }
 
-       containers := []db.InstanceArgs{}
+       instances := []db.InstanceArgs{}
        err = cluster.Transaction(func(tx *db.ClusterTx) error {
-               for ctProject, ctNames := range names {
-                       for _, ctName := range ctNames {
-                               container, err := tx.GetInstance(ctProject, 
ctName)
+               for instProject, instNames := range projectInstNames {
+                       for _, instName := range instNames {
+                               inst, err := tx.GetInstance(instProject, 
instName)
                                if err != nil {
                                        return err
                                }
 
-                               containers = append(containers, 
db.InstanceToArgs(container))
+                               instances = append(instances, 
db.InstanceToArgs(inst))
                        }
                }
 
@@ -226,5 +226,5 @@ func getProfileContainersInfo(cluster *db.Cluster, project, 
profile string) ([]d
                return nil, errors.Wrapf(err, "Failed to fetch instances")
        }
 
-       return containers, nil
+       return instances, nil
 }

From 54ff08a9ec1941a2df0cad5df93b039bd7a2f4a0 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 17 Nov 2020 11:28:52 +0000
Subject: [PATCH 4/5] lxd/db/profiles: Updates GetInstancesWithProfile to
 return all instance types, not just containers

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/db/profiles.go | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/lxd/db/profiles.go b/lxd/db/profiles.go
index f04eeabe26..809451beb1 100644
--- a/lxd/db/profiles.go
+++ b/lxd/db/profiles.go
@@ -216,8 +216,7 @@ func (c *Cluster) GetInstancesWithProfile(project, profile 
string) (map[string][
                WHERE instances_profiles.profile_id ==
                  (SELECT profiles.id FROM profiles
                   JOIN projects ON projects.id == profiles.project_id
-                  WHERE profiles.name=? AND projects.name=?)
-               AND instances.type == 0`
+                  WHERE profiles.name=? AND projects.name=?)`
 
        results := map[string][]string{}
        inargs := []interface{}{profile, project}

From e45002d7b1cb87970d1eb7388cb181a69db36b06 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Tue, 17 Nov 2020 11:29:19 +0000
Subject: [PATCH 5/5] shared/instance: Improves comments

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 shared/instance.go | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/shared/instance.go b/shared/instance.go
index 023573a0cf..c79f618753 100644
--- a/shared/instance.go
+++ b/shared/instance.go
@@ -25,7 +25,7 @@ const (
 )
 
 // IsRootDiskDevice returns true if the given device representation is 
configured as root disk for
-// a container. It typically get passed a specific entry of 
api.Instance.Devices.
+// an instance. It typically get passed a specific entry of 
api.Instance.Devices.
 func IsRootDiskDevice(device map[string]string) bool {
        // Root disk devices also need a non-empty "pool" property, but we 
can't check that here
        // because this function is used with clients talking to older servers 
where there was no
@@ -38,7 +38,8 @@ func IsRootDiskDevice(device map[string]string) bool {
        return false
 }
 
-// GetRootDiskDevice returns the container device that is configured as root 
disk
+// GetRootDiskDevice returns the instance device that is configured as root 
disk.
+// Returns the device name and device config map.
 func GetRootDiskDevice(devices map[string]map[string]string) (string, 
map[string]string, error) {
        var devName string
        var dev map[string]string
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to