The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6527

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===

From a92f37d0e67c816a3b684169fa1eba5558b08ad7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Fri, 29 Nov 2019 00:01:26 -0500
Subject: [PATCH 01/11] lxd: Pass instance type to instanceLoadNodeAll
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/container.go          | 15 +++++++--------
 lxd/containers.go         |  5 +++--
 lxd/daemon.go             |  3 ++-
 lxd/db/containers.go      | 14 --------------
 lxd/db/containers_test.go |  4 ++--
 lxd/devices.go            |  7 ++++---
 lxd/devlxd.go             |  2 +-
 lxd/networks_utils.go     |  2 +-
 8 files changed, 20 insertions(+), 32 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 5cb34fc6c8..e72671d04c 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -41,10 +41,9 @@ import (
 )
 
 func init() {
-       // Expose instanceLoadNodeAll to the device package converting the 
response to a slice of Instances.
        // This is because container types are defined in the main package and 
are not importable.
        device.InstanceLoadNodeAll = func(s *state.State) ([]device.Instance, 
error) {
-               containers, err := instanceLoadNodeAll(s)
+               containers, err := instanceLoadNodeAll(s, instancetype.Any)
                if err != nil {
                        return nil, err
                }
@@ -1230,12 +1229,12 @@ func instanceLoadAll(s *state.State) 
([]instance.Instance, error) {
 }
 
 // Load all instances of this nodes.
-func instanceLoadNodeAll(s *state.State) ([]instance.Instance, error) {
+func instanceLoadNodeAll(s *state.State, instanceType instancetype.Type) 
([]instance.Instance, error) {
        // Get all the container arguments
-       var cts []db.Instance
+       var insts []db.Instance
        err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
                var err error
-               cts, err = tx.ContainerNodeList()
+               insts, err = tx.ContainerNodeProjectList("", instanceType)
                if err != nil {
                        return err
                }
@@ -1246,7 +1245,7 @@ func instanceLoadNodeAll(s *state.State) 
([]instance.Instance, error) {
                return nil, err
        }
 
-       return instanceLoadAllInternal(cts, s)
+       return instanceLoadAllInternal(insts, s)
 }
 
 // Load all instances of this nodes under the given project.
@@ -1342,7 +1341,7 @@ func instanceLoad(s *state.State, args db.InstanceArgs, 
profiles []api.Profile)
 func autoCreateContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
        f := func(ctx context.Context) {
                // Load all local instances
-               allContainers, err := instanceLoadNodeAll(d.State())
+               allContainers, err := instanceLoadNodeAll(d.State(), 
instancetype.Any)
                if err != nil {
                        logger.Error("Failed to load containers for scheduled 
snapshots", log.Ctx{"err": err})
                        return
@@ -1485,7 +1484,7 @@ func autoCreateContainerSnapshots(ctx context.Context, d 
*Daemon, instances []in
 func pruneExpiredContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
        f := func(ctx context.Context) {
                // Load all local instances
-               allInstances, err := instanceLoadNodeAll(d.State())
+               allInstances, err := instanceLoadNodeAll(d.State(), 
instancetype.Any)
                if err != nil {
                        logger.Error("Failed to load instances for snapshot 
expiry", log.Ctx{"err": err})
                        return
diff --git a/lxd/containers.go b/lxd/containers.go
index a3a0b1f86f..0b5b90b627 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -10,6 +10,7 @@ import (
 
        "github.com/lxc/lxd/lxd/db"
        "github.com/lxc/lxd/lxd/instance"
+       "github.com/lxc/lxd/lxd/instance/instancetype"
        "github.com/lxc/lxd/lxd/state"
        "github.com/lxc/lxd/shared"
        "github.com/lxc/lxd/shared/logger"
@@ -206,7 +207,7 @@ func (slice containerAutostartList) Swap(i, j int) {
 
 func containersRestart(s *state.State) error {
        // Get all the instances
-       result, err := instanceLoadNodeAll(s)
+       result, err := instanceLoadNodeAll(s, instancetype.Any)
        if err != nil {
                return err
        }
@@ -304,7 +305,7 @@ func containersShutdown(s *state.State) error {
        dbAvailable := true
 
        // Get all the instances
-       instances, err := instanceLoadNodeAll(s)
+       instances, err := instanceLoadNodeAll(s, instancetype.Any)
        if err != nil {
                // Mark database as offline
                dbAvailable = false
diff --git a/lxd/daemon.go b/lxd/daemon.go
index 2ad1a2d318..f63529b85c 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -34,6 +34,7 @@ import (
        "github.com/lxc/lxd/lxd/device"
        "github.com/lxc/lxd/lxd/endpoints"
        "github.com/lxc/lxd/lxd/events"
+       "github.com/lxc/lxd/lxd/instance/instancetype"
        "github.com/lxc/lxd/lxd/maas"
        "github.com/lxc/lxd/lxd/node"
        "github.com/lxc/lxd/lxd/rbac"
@@ -1027,7 +1028,7 @@ func (d *Daemon) Ready() error {
 }
 
 func (d *Daemon) numRunningContainers() (int, error) {
-       results, err := instanceLoadNodeAll(d.State())
+       results, err := instanceLoadNodeAll(d.State(), instancetype.Container)
        if err != nil {
                return 0, err
        }
diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 271bb58599..dd9b8e4644 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -533,20 +533,6 @@ func (c *ClusterTx) ContainerNodeMove(project, oldName, 
newName, newNode string)
        return nil
 }
 
-// ContainerNodeList returns all container objects on the local node.
-func (c *ClusterTx) ContainerNodeList() ([]Instance, error) {
-       node, err := c.NodeName()
-       if err != nil {
-               return nil, errors.Wrap(err, "Local node name")
-       }
-       filter := InstanceFilter{
-               Node: node,
-               Type: instancetype.Container,
-       }
-
-       return c.InstanceList(filter)
-}
-
 // ContainerNodeProjectList returns all container objects on the local node 
within the given project.
 func (c *ClusterTx) ContainerNodeProjectList(project string, instanceType 
instancetype.Type) ([]Instance, error) {
        node, err := c.NodeName()
diff --git a/lxd/db/containers_test.go b/lxd/db/containers_test.go
index 168273e5c5..3c3c594e9c 100644
--- a/lxd/db/containers_test.go
+++ b/lxd/db/containers_test.go
@@ -406,7 +406,7 @@ func TestContainersNodeList(t *testing.T) {
 }
 
 // All containers on a node are loaded in bulk.
-func TestContainerNodeList(t *testing.T) {
+func TestContainerNodeProjectList(t *testing.T) {
        tx, cleanup := db.NewTestClusterTx(t)
        defer cleanup()
 
@@ -427,7 +427,7 @@ func TestContainerNodeList(t *testing.T) {
        addContainerDevice(t, tx, "c2", "eth0", "nic", nil)
        addContainerDevice(t, tx, "c4", "root", "disk", map[string]string{"x": 
"y"})
 
-       containers, err := tx.ContainerNodeList()
+       containers, err := tx.ContainerNodeProjectList("", 
instancetype.Container)
        require.NoError(t, err)
        assert.Len(t, containers, 3)
 
diff --git a/lxd/devices.go b/lxd/devices.go
index 5939a0c494..f286288331 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -17,6 +17,7 @@ import (
        "github.com/lxc/lxd/lxd/cgroup"
        "github.com/lxc/lxd/lxd/device"
        "github.com/lxc/lxd/lxd/instance"
+       "github.com/lxc/lxd/lxd/instance/instancetype"
        "github.com/lxc/lxd/lxd/state"
        "github.com/lxc/lxd/shared"
        log "github.com/lxc/lxd/shared/log15"
@@ -293,7 +294,7 @@ func deviceTaskBalance(s *state.State) {
        }
 
        // Iterate through the instances
-       instances, err := instanceLoadNodeAll(s)
+       instances, err := instanceLoadNodeAll(s, instancetype.Container)
        if err != nil {
                logger.Error("Problem loading instances list", log.Ctx{"err": 
err})
                return
@@ -415,7 +416,7 @@ func deviceNetworkPriority(s *state.State, netif string) {
                return
        }
 
-       instances, err := instanceLoadNodeAll(s)
+       instances, err := instanceLoadNodeAll(s, instancetype.Container)
        if err != nil {
                return
        }
@@ -493,7 +494,7 @@ func deviceEventListener(s *state.State) {
 
 // devicesRegister calls the Register() function on all supported devices so 
they receive events.
 func devicesRegister(s *state.State) {
-       instances, err := instanceLoadNodeAll(s)
+       instances, err := instanceLoadNodeAll(s, instancetype.Container)
        if err != nil {
                logger.Error("Problem loading containers list", log.Ctx{"err": 
err})
                return
diff --git a/lxd/devlxd.go b/lxd/devlxd.go
index aa113de9c9..7e55e1783b 100644
--- a/lxd/devlxd.go
+++ b/lxd/devlxd.go
@@ -394,7 +394,7 @@ func findContainerForPid(pid int32, s *state.State) 
(*containerLXC, error) {
                return nil, err
        }
 
-       instances, err := instanceLoadNodeAll(s)
+       instances, err := instanceLoadNodeAll(s, instancetype.Container)
        if err != nil {
                return nil, err
        }
diff --git a/lxd/networks_utils.go b/lxd/networks_utils.go
index aa90ca415e..0f6194edef 100644
--- a/lxd/networks_utils.go
+++ b/lxd/networks_utils.go
@@ -640,7 +640,7 @@ func networkUpdateStatic(s *state.State, networkName 
string) error {
        }
 
        // Get all the instances
-       insts, err := instanceLoadNodeAll(s)
+       insts, err := instanceLoadNodeAll(s, instancetype.Any)
        if err != nil {
                return err
        }

From 12248eb765af1d8ea751320afce7d49e553453a3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Tue, 26 Nov 2019 17:49:30 -0500
Subject: [PATCH 02/11] lxd/vm: Tweak default memory
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/vm_qemu.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/vm_qemu.go b/lxd/vm_qemu.go
index 62df3470f1..9630aa62a6 100644
--- a/lxd/vm_qemu.go
+++ b/lxd/vm_qemu.go
@@ -1078,7 +1078,7 @@ func (vm *vmQemu) addMemoryConfig(sb *strings.Builder) 
error {
        // Configure memory limit.
        memSize := vm.expandedConfig["limits.memory"]
        if memSize == "" {
-               memSize = "1GB" // Default to 1GB if no memory limit specified.
+               memSize = "1GiB" // Default to 1GiB if no memory limit 
specified.
        }
 
        memSizeBytes, err := units.ParseByteSizeString(memSize)

From d104ca2dc79534a1ea5c49b26df94f41c4da48cb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Thu, 28 Nov 2019 23:39:16 -0500
Subject: [PATCH 03/11] lxd/vm: Add a virtio graphics card
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/vm_qemu.go | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/lxd/vm_qemu.go b/lxd/vm_qemu.go
index 9630aa62a6..caea8a882b 100644
--- a/lxd/vm_qemu.go
+++ b/lxd/vm_qemu.go
@@ -990,6 +990,10 @@ driver = "virtio-scsi-pci"
 bus = "qemu_pcie1"
 addr = "0x0"
 
+# Graphics card
+[device]
+driver = "virtio-gpu"
+
 # Balloon driver
 [device "qemu_pcie2"]
 driver = "pcie-root-port"

From 4e566ade035386e43753940ad2e3e759e18d0dbc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Tue, 26 Nov 2019 17:49:44 -0500
Subject: [PATCH 04/11] lxd/vm: Add ringbuffer on vserial
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/vm_qemu.go | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/lxd/vm_qemu.go b/lxd/vm_qemu.go
index caea8a882b..22c6e93c73 100644
--- a/lxd/vm_qemu.go
+++ b/lxd/vm_qemu.go
@@ -975,6 +975,11 @@ driver = "virtio-serial"
 [device]
 driver = "virtserialport"
 name = "org.linuxcontainers.lxd"
+chardev = "vserial"
+
+[chardev "vserial"]
+backend = "ringbuf"
+size = "16B"
 
 # PCIe root
 [device "qemu_pcie1"]

From 8bb00de3e45bbefafabb7736a309f59f44ca56da Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Thu, 28 Nov 2019 01:57:03 -0500
Subject: [PATCH 05/11] lxd-agent: Add vserial state notification
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd-agent/main_agent.go | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/lxd-agent/main_agent.go b/lxd-agent/main_agent.go
index 5bc25505ff..b93abf837e 100644
--- a/lxd-agent/main_agent.go
+++ b/lxd-agent/main_agent.go
@@ -2,10 +2,12 @@ package main
 
 import (
        "os"
+       "os/signal"
        "path/filepath"
 
        "github.com/pkg/errors"
        "github.com/spf13/cobra"
+       "golang.org/x/sys/unix"
 
        "github.com/lxc/lxd/lxd/vsock"
        "github.com/lxc/lxd/shared"
@@ -95,6 +97,25 @@ func (c *cmdAgent) Run(cmd *cobra.Command, args []string) 
error {
        // Prepare the HTTP server.
        httpServer := restServer(tlsConfig, cert, c.global.flagLogDebug, d)
 
+       // Serial notification.
+       if shared.PathExists("/dev/virtio-ports/org.linuxcontainers.lxd") {
+               vSerial, err := 
os.OpenFile("/dev/virtio-ports/org.linuxcontainers.lxd", os.O_RDWR, 0600)
+               if err != nil {
+                       return err
+               }
+               defer vSerial.Close()
+
+               vSerial.Write([]byte("STARTED\n"))
+
+               chSignal := make(chan os.Signal, 1)
+               signal.Notify(chSignal, unix.SIGTERM)
+               go func() {
+                       <-chSignal
+                       vSerial.Write([]byte("STOPPED\n"))
+                       os.Exit(0)
+               }()
+       }
+
        // Start the server.
        return httpServer.ServeTLS(networkTLSListener(l, tlsConfig), 
"agent.crt", "agent.key")
 }

From d9175691599ace95fbc0d6373536bb61e635662f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Thu, 28 Nov 2019 23:38:00 -0500
Subject: [PATCH 06/11] lxd/qmp: Introduce new QMP wrapper
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/qmp/errors.go  |  14 +++
 lxd/qmp/monitor.go | 276 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 290 insertions(+)
 create mode 100644 lxd/qmp/errors.go
 create mode 100644 lxd/qmp/monitor.go

diff --git a/lxd/qmp/errors.go b/lxd/qmp/errors.go
new file mode 100644
index 0000000000..27af7e0120
--- /dev/null
+++ b/lxd/qmp/errors.go
@@ -0,0 +1,14 @@
+package qmp
+
+import (
+       "fmt"
+)
+
+// ErrMonitorDisconnect is returned when interacting with a disconnected 
Monitor.
+var ErrMonitorDisconnect = fmt.Errorf("Monitor is disconnected")
+
+// ErrMonitorBadReturn is returned when the QMP data cannot be deserialized.
+var ErrMonitorBadReturn = fmt.Errorf("Monitor returned invalid data")
+
+// ErrMonitorBadConsole is retuned when the requested console doesn't exist.
+var ErrMonitorBadConsole = fmt.Errorf("Requested console couldn't be found")
diff --git a/lxd/qmp/monitor.go b/lxd/qmp/monitor.go
new file mode 100644
index 0000000000..b854f05d0c
--- /dev/null
+++ b/lxd/qmp/monitor.go
@@ -0,0 +1,276 @@
+package qmp
+
+import (
+       "encoding/json"
+       "fmt"
+       "os"
+       "strings"
+       "sync"
+       "time"
+
+       "github.com/digitalocean/go-qemu/qmp"
+
+       "github.com/lxc/lxd/shared"
+)
+
+var monitors = map[string]*Monitor{}
+var monitorsLock sync.Mutex
+
+// Monitor represents a QMP monitor.
+type Monitor struct {
+       path string
+       qmp  *qmp.SocketMonitor
+
+       agentReady   bool
+       disconnected bool
+       chDisconnect chan struct{}
+       eventHandler func(name string, data map[string]interface{})
+}
+
+// Connect creates or retrieves an existing QMP monitor for the path.
+func Connect(path string, eventHandler func(name string, data 
map[string]interface{})) (*Monitor, error) {
+       monitorsLock.Lock()
+       defer monitorsLock.Unlock()
+
+       // Look for an existing monitor.
+       monitor, ok := monitors[path]
+       if ok {
+               monitor.eventHandler = eventHandler
+               return monitor, nil
+       }
+
+       // Setup the connection.
+       qmpConn, err := qmp.NewSocketMonitor("unix", path, time.Second)
+       if err != nil {
+               return nil, err
+       }
+
+       err = qmpConn.Connect()
+       if err != nil {
+               return nil, err
+       }
+
+       // Setup the monitor struct.
+       monitor = &Monitor{}
+       monitor.path = path
+       monitor.qmp = qmpConn
+       monitor.chDisconnect = make(chan struct{}, 1)
+       monitor.eventHandler = eventHandler
+
+       // Spawn goroutines.
+       err = monitor.run()
+       if err != nil {
+               return nil, err
+       }
+
+       // Register in global map.
+       monitors[path] = monitor
+
+       return monitor, nil
+}
+
+func (m *Monitor) run() error {
+       // Start ringbuffer monitoring go routine.
+       go func() {
+               for {
+                       // Read the ringbuffer.
+                       resp, err := m.qmp.Run([]byte(`{"execute": 
"ringbuf-read", "arguments": {"device": "vserial", "size": 16, "format": 
"utf8"}}`))
+                       if err != nil {
+                               m.Disconnect()
+                               return
+                       }
+
+                       // Decode the response.
+                       var respDecoded struct {
+                               Return string `json:"return"`
+                       }
+
+                       err = json.Unmarshal(resp, &respDecoded)
+                       if err != nil {
+                               continue
+                       }
+
+                       // Extract the last entry.
+                       entries := strings.Split(respDecoded.Return, "\n")
+                       if len(entries) > 1 {
+                               status := entries[len(entries)-2]
+
+                               if status == "STARTED" {
+                                       m.agentReady = true
+                               } else if status == "STOPPED" {
+                                       m.agentReady = false
+                               }
+                       }
+
+                       // Wait until next read or cancel.
+                       select {
+                       case <-m.chDisconnect:
+                               return
+                       case <-time.After(10 * time.Second):
+                               continue
+                       }
+               }
+       }()
+
+       // Start event monitoring go routine.
+       chEvents, err := m.qmp.Events()
+       if err != nil {
+               return err
+       }
+
+       go func() {
+               for {
+                       select {
+                       case <-m.chDisconnect:
+                               return
+                       case e := <-chEvents:
+                               if e.Event == "" {
+                                       continue
+                               }
+
+                               if m.eventHandler != nil {
+                                       m.eventHandler(e.Event, e.Data)
+                               }
+                       }
+               }
+       }()
+
+       return nil
+}
+
+// Wait returns a channel that will be closed on disconnection.
+func (m *Monitor) Wait() (chan struct{}, error) {
+       // Check if disconnected
+       if m.disconnected {
+               return nil, ErrMonitorDisconnect
+       }
+
+       return m.chDisconnect, nil
+}
+
+// Disconnect forces a disconnection from QEMU.
+func (m *Monitor) Disconnect() {
+       // Stop all go routines and disconnect from socket.
+       close(m.chDisconnect)
+       m.disconnected = true
+       m.qmp.Disconnect()
+
+       // Remove from the map.
+       monitorsLock.Lock()
+       defer monitorsLock.Unlock()
+       delete(monitors, m.path)
+}
+
+// Status returns the current VM status.
+func (m *Monitor) Status() (string, error) {
+       // Check if disconnected
+       if m.disconnected {
+               return "", ErrMonitorDisconnect
+       }
+
+       // Query the status.
+       respRaw, err := m.qmp.Run([]byte("{'execute': 'query-status'}"))
+       if err != nil {
+               m.Disconnect()
+               return "", ErrMonitorDisconnect
+       }
+
+       // Process the response.
+       var respDecoded struct {
+               Return struct {
+                       Status string `json:"status"`
+               } `json:"return"`
+       }
+
+       err = json.Unmarshal(respRaw, &respDecoded)
+       if err != nil {
+               return "", ErrMonitorBadReturn
+       }
+
+       return respDecoded.Return.Status, nil
+}
+
+// Console fetches the File for a particular console.
+func (m *Monitor) Console(target string) (*os.File, error) {
+       // Check if disconnected
+       if m.disconnected {
+               return nil, ErrMonitorDisconnect
+       }
+
+       // Query the consoles.
+       respRaw, err := m.qmp.Run([]byte("{'execute': 'query-chardev'}"))
+       if err != nil {
+               m.Disconnect()
+               return nil, ErrMonitorDisconnect
+       }
+
+       // Process the response.
+       var respDecoded struct {
+               Return []struct {
+                       Label    string `json:"label"`
+                       Filename string `json:"filename"`
+               } `json:"return"`
+       }
+
+       err = json.Unmarshal(respRaw, &respDecoded)
+       if err != nil {
+               return nil, ErrMonitorBadReturn
+       }
+
+       // Look for the requested console.
+       for _, v := range respDecoded.Return {
+               if v.Label == target {
+                       ptsPath := strings.TrimPrefix(v.Filename, "pty:")
+
+                       if !shared.PathExists(ptsPath) {
+                               continue
+                       }
+
+                       // Open the PTS device
+                       console, err := os.OpenFile(ptsPath, os.O_RDWR, 0600)
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       return console, nil
+               }
+       }
+
+       return nil, ErrMonitorBadConsole
+}
+
+func (m *Monitor) runCmd(cmd string) error {
+       // Check if disconnected
+       if m.disconnected {
+               return ErrMonitorDisconnect
+       }
+
+       // Query the status.
+       _, err := m.qmp.Run([]byte(fmt.Sprintf("{'execute': '%s'}", cmd)))
+       if err != nil {
+               m.Disconnect()
+               return ErrMonitorDisconnect
+       }
+
+       return nil
+}
+
+// Powerdown tells the VM to gracefully shutdown.
+func (m *Monitor) Powerdown() error {
+       return m.runCmd("system_powerdown")
+}
+
+// Start tells QEMU to start the emulation.
+func (m *Monitor) Start() error {
+       return m.runCmd("cont")
+}
+
+// Quit tells QEMU to exit immediately.
+func (m *Monitor) Quit() error {
+       return m.runCmd("quit")
+}
+
+// AgentReady indicates whether an agent has been detected.
+func (m *Monitor) AgentReady() bool {
+       return m.agentReady
+}

From 3f642043c47743a68ed54b7336661b9f17b49981 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Thu, 28 Nov 2019 23:38:27 -0500
Subject: [PATCH 07/11] tests: Add lxd/qmp to golint
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 test/suites/static_analysis.sh | 1 +
 1 file changed, 1 insertion(+)

diff --git a/test/suites/static_analysis.sh b/test/suites/static_analysis.sh
index a65f5d62c8..b5edb6d6ea 100644
--- a/test/suites/static_analysis.sh
+++ b/test/suites/static_analysis.sh
@@ -86,6 +86,7 @@ test_static_analysis() {
       #golint -set_exit_status lxd/migration
       golint -set_exit_status lxd/node
       golint -set_exit_status lxd/operations
+      golint -set_exit_status lxd/qmp
       golint -set_exit_status lxd/response
       golint -set_exit_status lxd/state
       golint -set_exit_status lxd/storage/...

From ae25c56450fbadd16fcb8a1cc9fa34f954adf5ad Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Thu, 28 Nov 2019 23:43:11 -0500
Subject: [PATCH 08/11] lxd/vm: Port to new qmp package
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/vm_qemu.go | 209 +++++++++++++++++++++----------------------------
 1 file changed, 88 insertions(+), 121 deletions(-)

diff --git a/lxd/vm_qemu.go b/lxd/vm_qemu.go
index 22c6e93c73..86e214f320 100644
--- a/lxd/vm_qemu.go
+++ b/lxd/vm_qemu.go
@@ -16,7 +16,6 @@ import (
        "sync"
        "time"
 
-       "github.com/digitalocean/go-qemu/qmp"
        "github.com/gorilla/websocket"
        "github.com/pborman/uuid"
        "github.com/pkg/errors"
@@ -34,6 +33,7 @@ import (
        "github.com/lxc/lxd/lxd/maas"
        "github.com/lxc/lxd/lxd/operations"
        "github.com/lxc/lxd/lxd/project"
+       "github.com/lxc/lxd/lxd/qmp"
        "github.com/lxc/lxd/lxd/state"
        storagePools "github.com/lxc/lxd/lxd/storage"
        storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
@@ -314,6 +314,37 @@ func (vm *vmQemu) getStoragePool() (storagePools.Pool, 
error) {
        return vm.storagePool, nil
 }
 
+func (vm *vmQemu) eventHandler() func(event string, data 
map[string]interface{}) {
+       id := vm.id
+       state := vm.state
+
+       return func(event string, data map[string]interface{}) {
+               if !shared.StringInSlice(event, []string{"SHUTDOWN"}) {
+                       return
+               }
+
+               inst, err := instanceLoadById(state, id)
+               if err != nil {
+                       logger.Errorf("Failed to load instance with id=%d", id)
+                       return
+               }
+
+               if event == "SHUTDOWN" {
+                       target := "stop"
+                       entry, ok := data["reason"]
+                       if ok && entry == "guest-reset" {
+                               target = "reboot"
+                       }
+
+                       err = inst.(*vmQemu).OnStop(target)
+                       if err != nil {
+                               logger.Errorf("Failed to cleanly stop instance 
'%s': %v", project.Prefix(inst.Project(), inst.Name()), err)
+                               return
+                       }
+               }
+       }
+}
+
 // mount mounts the instance's config volume if needed.
 func (vm *vmQemu) mount() (ourMount bool, err error) {
        var pool storagePools.Pool
@@ -402,62 +433,62 @@ func (vm *vmQemu) Freeze() error {
        return nil
 }
 
+func (vm *vmQemu) OnStop(target string) error {
+       vm.cleanupDevices()
+       os.Remove(vm.pidFilePath())
+       os.Remove(vm.getMonitorPath())
+       vm.unmount()
+
+       if target == "reboot" {
+               return vm.Start(false)
+       }
+
+       return nil
+}
+
 func (vm *vmQemu) Shutdown(timeout time.Duration) error {
        if !vm.IsRunning() {
                return fmt.Errorf("The instance is already stopped")
        }
 
        // Connect to the monitor.
-       monitor, err := qmp.NewSocketMonitor("unix", vm.getMonitorPath(), 
vmVsockTimeout)
+       monitor, err := qmp.Connect(vm.getMonitorPath(), vm.eventHandler())
        if err != nil {
                return err
        }
 
-       err = monitor.Connect()
+       // Get the wait channel.
+       chDisconnect, err := monitor.Wait()
        if err != nil {
+               if err == qmp.ErrMonitorDisconnect {
+                       return nil
+               }
+
                return err
        }
-       defer monitor.Disconnect()
 
        // Send the system_powerdown command.
-       _, err = monitor.Run([]byte("{'execute': 'system_powerdown'}"))
+       err = monitor.Powerdown()
        if err != nil {
+               if err == qmp.ErrMonitorDisconnect {
+                       return nil
+               }
+
                return err
        }
-       monitor.Disconnect()
-
-       // Deal with the timeout.
-       chShutdown := make(chan struct{}, 1)
-       go func() {
-               for {
-                       // Connect to socket, check if still running, then 
disconnect so we don't
-                       // block the qemu monitor socket for other users (such 
as lxc list).
-                       if !vm.IsRunning() {
-                               close(chShutdown)
-                               return
-                       }
-
-                       time.Sleep(500 * time.Millisecond) // Don't consume too 
many resources.
-               }
-       }()
 
        // If timeout provided, block until the VM is not running or the 
timeout has elapsed.
        if timeout > 0 {
                select {
-               case <-chShutdown:
+               case <-chDisconnect:
                        return nil
                case <-time.After(timeout):
                        return fmt.Errorf("Instance was not shutdown after 
timeout")
                }
        } else {
-               <-chShutdown // Block until VM is not running if no timeout 
provided.
+               <-chDisconnect // Block until VM is not running if no timeout 
provided.
        }
 
-       vm.cleanupDevices()
-       os.Remove(vm.pidFilePath())
-       os.Remove(vm.getMonitorPath())
-       vm.unmount()
-
        return nil
 }
 
@@ -1353,49 +1384,34 @@ func (vm *vmQemu) Stop(stateful bool) error {
        }
 
        // Connect to the monitor.
-       monitor, err := qmp.NewSocketMonitor("unix", vm.getMonitorPath(), 
vmVsockTimeout)
+       monitor, err := qmp.Connect(vm.getMonitorPath(), vm.eventHandler())
        if err != nil {
-               return err
+               // If we fail to connect, it's most likely because the VM is 
already off.
+               return nil
        }
 
-       err = monitor.Connect()
+       // Get the wait channel.
+       chDisconnect, err := monitor.Wait()
        if err != nil {
-               return err
-       }
-       defer monitor.Disconnect()
+               if err == qmp.ErrMonitorDisconnect {
+                       return nil
+               }
 
-       // Send the quit command.
-       _, err = monitor.Run([]byte("{'execute': 'quit'}"))
-       if err != nil {
                return err
        }
-       monitor.Disconnect()
 
-       pid, err := vm.pid()
+       // Send the quit command.
+       err = monitor.Quit()
        if err != nil {
-               return err
-       }
-
-       // No PID found, qemu not running.
-       if pid < 0 {
-               return nil
-       }
-
-       // Check if qemu process still running, if so wait.
-       for {
-               procPath := fmt.Sprintf("/proc/%d", pid)
-               if shared.PathExists(procPath) {
-                       time.Sleep(500 * time.Millisecond)
-                       continue
+               if err == qmp.ErrMonitorDisconnect {
+                       return nil
                }
 
-               break
+               return err
        }
 
-       vm.cleanupDevices()
-       os.Remove(vm.pidFilePath())
-       os.Remove(vm.getMonitorPath())
-       vm.unmount()
+       // Wait for QEMU to exit (can take a while if pending I/O).
+       <-chDisconnect
 
        return nil
 }
@@ -2265,56 +2281,23 @@ func (vm *vmQemu) Console() (*os.File, chan error, 
error) {
        vmConsoleLock.Unlock()
 
        // Connect to the monitor.
-       monitor, err := qmp.NewSocketMonitor("unix", vm.getMonitorPath(), 
vmVsockTimeout)
+       monitor, err := qmp.Connect(vm.getMonitorPath(), vm.eventHandler())
        if err != nil {
                return nil, nil, err // The VM isn't running as no monitor 
socket available.
        }
 
-       err = monitor.Connect()
-       if err != nil {
-               return nil, nil, err // The capabilities handshake failed.
-       }
-       defer monitor.Disconnect()
-
-       // Send the status command.
-       respRaw, err := monitor.Run([]byte("{'execute': 'query-chardev'}"))
-       if err != nil {
-               return nil, nil, err // Status command failed.
-       }
-
-       var respDecoded struct {
-               Return []struct {
-                       Label    string `json:"label"`
-                       Filename string `json:"filename"`
-               } `json:"return"`
-       }
-
-       err = json.Unmarshal(respRaw, &respDecoded)
-       if err != nil {
-               return nil, nil, err // JSON decode failed.
-       }
-
-       var ptsPath string
-
-       for _, v := range respDecoded.Return {
-               if v.Label == "console" {
-                       ptsPath = strings.TrimPrefix(v.Filename, "pty:")
-               }
-       }
-
-       if ptsPath == "" {
-               return nil, nil, fmt.Errorf("No PTS path found")
-       }
-
-       console, err := os.OpenFile(ptsPath, os.O_RDWR, 0600)
+       // Get the console.
+       console, err := monitor.Console("console")
        if err != nil {
                return nil, nil, err
        }
 
+       // Record the console is in use.
        vmConsoleLock.Lock()
        vmConsole[vm.id] = true
        vmConsoleLock.Unlock()
 
+       // Handle console disconnection.
        go func() {
                <-chDisconnect
 
@@ -2791,38 +2774,22 @@ func (vm *vmQemu) InitPID() int {
 
 func (vm *vmQemu) statusCode() api.StatusCode {
        // Connect to the monitor.
-       monitor, err := qmp.NewSocketMonitor("unix", vm.getMonitorPath(), 
vmVsockTimeout)
-       if err != nil {
-               return api.Stopped // The VM isn't running as no monitor socket 
available.
-       }
-
-       err = monitor.Connect()
+       monitor, err := qmp.Connect(vm.getMonitorPath(), vm.eventHandler())
        if err != nil {
-               return api.Error // The capabilities handshake failed.
+               // If we fail to connect, chances are the VM isn't running.
+               return api.Stopped
        }
-       defer monitor.Disconnect()
 
-       // Send the status command.
-       respRaw, err := monitor.Run([]byte("{'execute': 'query-status'}"))
+       status, err := monitor.Status()
        if err != nil {
-               return api.Error // Status command failed.
-       }
-
-       var respDecoded struct {
-               ID     string `json:"id"`
-               Return struct {
-                       Running    bool   `json:"running"`
-                       Singlestep bool   `json:"singlestep"`
-                       Status     string `json:"status"`
-               } `json:"return"`
-       }
+               if err == qmp.ErrMonitorDisconnect {
+                       return api.Stopped
+               }
 
-       err = json.Unmarshal(respRaw, &respDecoded)
-       if err != nil {
-               return api.Error // JSON decode failed.
+               return api.Error
        }
 
-       if respDecoded.Return.Status == "running" {
+       if status == "running" {
                return api.Running
        }
 

From 585dbb7dd1051b4b8b22b23a00efbb81ad92d8ec Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Thu, 28 Nov 2019 23:43:40 -0500
Subject: [PATCH 09/11] lxd/vm: Don't start or reboot the VM
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Instead use QMP to start the emulation and use the event handler to
handle reboots.

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/vm_qemu.go | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/lxd/vm_qemu.go b/lxd/vm_qemu.go
index 86e214f320..727e90e282 100644
--- a/lxd/vm_qemu.go
+++ b/lxd/vm_qemu.go
@@ -588,6 +588,7 @@ func (vm *vmQemu) Start(stateful bool) error {
        }
 
        args := []string{
+               "-S",
                "-name", vm.Name(),
                "-uuid", vmUUID,
                "-daemonize",
@@ -595,6 +596,7 @@ func (vm *vmQemu) Start(stateful bool) error {
                "-nographic",
                "-serial", "chardev:console",
                "-nodefaults",
+               "-no-reboot",
                "-readconfig", confFile,
                "-pidfile", vm.pidFilePath(),
        }
@@ -612,6 +614,18 @@ func (vm *vmQemu) Start(stateful bool) error {
                return err
        }
 
+       // Start QMP monitoring.
+       monitor, err := qmp.Connect(vm.getMonitorPath(), vm.eventHandler())
+       if err != nil {
+               return err
+       }
+
+       // Start the VM.
+       err = monitor.Start()
+       if err != nil {
+               return err
+       }
+
        return nil
 }
 

From fa0fbc486138a2aeb44dc77302bcb7fb85a6e465 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Thu, 28 Nov 2019 23:44:17 -0500
Subject: [PATCH 10/11] lxd/vm: Use agent detection from QMP
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/vm_qemu.go | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

diff --git a/lxd/vm_qemu.go b/lxd/vm_qemu.go
index 727e90e282..a8d3a8a54b 100644
--- a/lxd/vm_qemu.go
+++ b/lxd/vm_qemu.go
@@ -48,6 +48,8 @@ import (
        "github.com/lxc/lxd/shared/units"
 )
 
+var vmQemuAgentOfflineErr = fmt.Errorf("LXD VM agent isn't currently running")
+
 var vmVsockTimeout time.Duration = time.Second
 
 var vmConsole = map[int]bool{}
@@ -2564,10 +2566,13 @@ func (vm *vmQemu) RenderState() (*api.InstanceState, 
error) {
        if statusCode == api.Running {
                status, err := vm.agentGetState()
                if err != nil {
-                       logger.Warn("Could not get VM state from agent", 
log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
+                       if err != vmQemuAgentOfflineErr {
+                               logger.Warn("Could not get VM state from 
agent", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
+                       }
+
+                       // Fallback data.
                        status = &api.InstanceState{}
                        status.Processes = -1
-
                        networks := map[string]api.InstanceStateNetwork{}
                        for k, m := range vm.ExpandedDevices() {
                                // We only care about nics.
@@ -2643,12 +2648,16 @@ func (vm *vmQemu) RenderState() (*api.InstanceState, 
error) {
 // agentGetState connects to the agent inside of the VM and does
 // an API call to get the current state.
 func (vm *vmQemu) agentGetState() (*api.InstanceState, error) {
-       // Ensure the correct vhost_vsock kernel module is loaded before 
establishing the vsock.
-       err := util.LoadModule("vhost_vsock")
+       // Check if the agent is running.
+       monitor, err := qmp.Connect(vm.getMonitorPath(), vm.eventHandler())
        if err != nil {
                return nil, err
        }
 
+       if !monitor.AgentReady() {
+               return nil, vmQemuAgentOfflineErr
+       }
+
        client, err := vm.getAgentClient()
        if err != nil {
                return nil, err

From b22d838bf44027ad43dd56a96858edf53b071b9b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Fri, 29 Nov 2019 00:09:34 -0500
Subject: [PATCH 11/11] lxd/vm: Restart monitor on startup
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/containers.go | 15 +++++++++++++++
 lxd/daemon.go     |  3 +++
 2 files changed, 18 insertions(+)

diff --git a/lxd/containers.go b/lxd/containers.go
index 0b5b90b627..67a899ef57 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -248,6 +248,21 @@ func containersRestart(s *state.State) error {
        return nil
 }
 
+func vmMonitor(s *state.State) error {
+       // Get all the instances
+       insts, err := instanceLoadNodeAll(s, instancetype.VM)
+       if err != nil {
+               return err
+       }
+
+       for _, inst := range insts {
+               // Retrieve running state, this will re-connect to QMP
+               inst.IsRunning()
+       }
+
+       return nil
+}
+
 type containerStopList []instance.Instance
 
 func (slice containerStopList) Len() int {
diff --git a/lxd/daemon.go b/lxd/daemon.go
index f63529b85c..a1d09b2b24 100644
--- a/lxd/daemon.go
+++ b/lxd/daemon.go
@@ -1018,6 +1018,9 @@ func (d *Daemon) Ready() error {
        // Restore containers
        containersRestart(s)
 
+       // Start monitoring VMs again
+       vmMonitor(s)
+
        // Re-balance in case things changed while LXD was down
        deviceTaskBalance(s)
 
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to