RE: [libvirt][PATCH RESEND v10 1/5] qemu: provide support to query the SGX capability

2022-02-22 Thread Huang, Haibin
I accept all comments. But I didn't your point for this comment .
> > +static void
> > +virQEMUCapsFormatSGXInfo(virQEMUCaps *qemuCaps, virBuffer *buf) {
> > +virSGXCapabilityPtr sgx =
> > +virQEMUCapsGetSGXCapabilities(qemuCaps);
> > +
> > +virBufferAddLit(buf, "\n");
> > +virBufferAdjustIndent(buf, 2);
> > +virBufferAsprintf(buf, "%s\n", sgx->flc ? "yes" :
> > + "no");
> 
> ... which is in contradiction with the way we format it.
[Haibin] sorry, I don't get your point, can you give me the details

Does you mean that change virBufferAsprintf(buf, "%s\n", sgx->flc ? 
"yes" : "no"); to virBufferAsprintf(buf, "%u\n", sgx->flc);

If yes, but I see the cpu->maximum is also like this.
   virBufferAsprintf(buf, "maximum ? "yes" : "no");



> -Original Message-
> From: Michal Prívozník 
> Sent: Wednesday, February 16, 2022 6:26 PM
> To: Huang, Haibin ; libvir-list@redhat.com;
> berra...@redhat.com; Ding, Jian-feng ; Yang,
> Lin A ; Lu, Lianhao 
> Subject: Re: [libvirt][PATCH RESEND v10 1/5] qemu: provide support to query
> the SGX capability
> 
> On 2/8/22 06:21, Haibin Huang wrote:
> > QEMU version >= 6.2.0 provides support for creating enclave on SGX x86
> > platform using Software Guard Extensions (SGX) feature.
> > This patch adds support to query the SGX capability from the qemu.
> >
> > Signed-off-by: Haibin Huang 
> > ---
> >  src/conf/domain_capabilities.c|  10 ++
> >  src/conf/domain_capabilities.h|  13 ++
> >  src/libvirt_private.syms  |   1 +
> >  src/qemu/qemu_capabilities.c  | 113 ++
> >  src/qemu/qemu_capabilities.h  |   4 +
> >  src/qemu/qemu_capspriv.h  |   4 +
> >  src/qemu/qemu_monitor.c   |  10 ++
> >  src/qemu/qemu_monitor.h   |   3 +
> >  src/qemu/qemu_monitor_json.c  |  72 +++
> >  src/qemu/qemu_monitor_json.h  |   9 ++
> >  .../caps_6.2.0.x86_64.replies |  22 +++-
> >  .../caps_6.2.0.x86_64.xml |   5 +
> >  .../caps_7.0.0.x86_64.replies |  22 +++-
> >  .../caps_7.0.0.x86_64.xml |   5 +
> >  14 files changed, 285 insertions(+), 8 deletions(-)
> >
> > diff --git a/src/conf/domain_capabilities.c
> > b/src/conf/domain_capabilities.c index c394a7a390..1170fd26df 100644
> > --- a/src/conf/domain_capabilities.c
> > +++ b/src/conf/domain_capabilities.c
> > @@ -78,6 +78,16 @@ virSEVCapabilitiesFree(virSEVCapability *cap)  }
> >
> >
> > +void
> > +virSGXCapabilitiesFree(virSGXCapability *cap) {
> > +if (!cap)
> > +return;
> > +
> > +VIR_FREE(cap);
> > +}
> > +
> > +
> >  static void
> >  virDomainCapsDispose(void *obj)
> >  {
> > diff --git a/src/conf/domain_capabilities.h
> > b/src/conf/domain_capabilities.h index 1d2f4ac7a5..973d543ce8 100644
> > --- a/src/conf/domain_capabilities.h
> > +++ b/src/conf/domain_capabilities.h
> > @@ -191,6 +191,13 @@ struct _virSEVCapability {
> >  unsigned int max_es_guests;
> >  };
> >
> > +typedef struct _virSGXCapability virSGXCapability; typedef
> > +virSGXCapability *virSGXCapabilityPtr; struct _virSGXCapability {
> > +bool flc;
> > +unsigned int epc_size;
> > +};
> > +
> >  typedef enum {
> >  VIR_DOMAIN_CAPS_FEATURE_IOTHREADS = 0,
> >  VIR_DOMAIN_CAPS_FEATURE_VMCOREINFO,
> > @@ -227,6 +234,7 @@ struct _virDomainCaps {
> >
> >  virDomainCapsFeatureGIC gic;
> >  virSEVCapability *sev;
> > +virSGXCapability *sgx;
> >  /* add new domain features here */
> >
> >  virTristateBool features[VIR_DOMAIN_CAPS_FEATURE_LAST];
> > @@ -275,3 +283,8 @@ void
> >  virSEVCapabilitiesFree(virSEVCapability *capabilities);
> >
> >  G_DEFINE_AUTOPTR_CLEANUP_FUNC(virSEVCapability,
> > virSEVCapabilitiesFree);
> > +
> > +void
> > +virSGXCapabilitiesFree(virSGXCapability *capabilities);
> > +
> > +G_DEFINE_AUTOPTR_CLEANUP_FUNC(virSGXCapability,
> > +virSGXCapabilitiesFree);
> > diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index
> > ba3462d849..0e74e4f20e 100644
> > --- a/src/libvirt_private.syms
> > +++ b/src/libvirt_private.syms
> > @@ -219,6 +219,7 @@ virDomainCapsEnumSet;  virDomainCapsFormat;
> > virDomainCapsNew;  virSEVCapabilitiesFree;
> > +virSGXCapabilitiesFree;
> >
> >
> >  # conf/domain_conf.h
> > diff --git a/src/qemu/qemu_capabilities.c
> > b/src/qemu/qemu_capabilities.c index 1b28c3f161..0e43dd2466 100644
> > --- a/src/qemu/qemu_capabilities.c
> > +++ b/src/qemu/qemu_capabilities.c
> > @@ -663,6 +663,7 @@ VIR_ENUM_IMPL(virQEMUCaps,
> >"device.json+hotplug", /* QEMU_CAPS_DEVICE_JSON */
> >"hvf", /* QEMU_CAPS_HVF */
> >"virtio-mem-pci.prealloc", /*
> > QEMU_CAPS_DEVICE_VIRTIO_MEM_PCI_PREALLOC */
> > +  "sgx-epc", /* QEMU_CAPS_SGX_EPC */
> >  );
> >
> >
> > @@ -744,6 +745,8 @@ struct _virQEMUCaps {
> >
> >  virSEVCapability *sevCapabilities;
>

Re: [PATCH v2 14/29] formatdomain.rst: add 'index' semantics for PowerNV domains

2022-02-22 Thread Daniel Henrique Barboza




On 2/21/22 10:21, Ján Tomko wrote:

On a Tuesday in 2022, Daniel Henrique Barboza wrote:

We're going to use the 'targetIndex' element for PowerNV PHBs. Clarify
that the same attribute will have a different meaning in this context.

Signed-off-by: Daniel Henrique Barboza 
---
docs/formatdomain.rst | 6 --
1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/docs/formatdomain.rst b/docs/formatdomain.rst
index 1e44d9a987..d700049c1c 100644
--- a/docs/formatdomain.rst
+++ b/docs/formatdomain.rst
@@ -3896,8 +3896,10 @@ generated by libvirt. :since:`Since 1.2.19 (QEMU only).`
   the user of the libvirt API to attach host devices to the correct
   pci-expander-bus when assigning them to the domain).
``index``
-   pci-root controllers for pSeries guests use this attribute to record the
-   order they will show up in the guest. :since:`Since 3.6.0`
+   pci-root controllers for ``pSeries`` guests use this attribute to record the
+   order they will show up in the guest (:since:`Since 3.6.0`). :since:`Since 
8.1.0`,
+   ``powernv`` domains uses this attribute to indicate the chip/socket slot a
+   pcie-root controller will use.


The clarification did not help me. I see no difference between this
description and the one for the chip-id attribute



Changed the description to:


``index``
   pci-root controllers for ``pSeries`` guests use this attribute to record the
   order they will show up in the guest (:since:`Since 3.6.0`). :since:`Since 
8.1.0`,
   ``powernv`` domains uses this attribute to indicate which slot inside the
   chip the pcie-root controller will use.



Thanks,


Daniel





Jano


``chip-id``
   pcie-root controllers for ``powernv`` domains use this attribute to indicate
   the chip that will own the controller. A chip is equivalent to a CPU socket.
--
2.34.1





Re: [libvirt PATCH 4/4] gitlab-ci: Introduce a new test 'integration' pipeline stage

2022-02-22 Thread Andrea Bolognani
On Tue, Feb 22, 2022 at 03:19:58PM +0100, Erik Skultety wrote:
> > Note that I have adjusted the value of log_filters to match what is
> > recommended in
> >
> >   https://libvirt.org/kbase/debuglogs.html#less-verbose-logging-for-qemu-vms
> >
> > but maybe there's a reason you had picked a different set of filters.
>
> I didn't even know we document this, so I always use the filters I empirically
> settled with. Given the general feeling about warnings usefulness in libvirt
> logs I either use 1 for debug logs or 4 for errors.
>
> If you feel strong I should use what we recommend on that page, I'll go with
> that, but I'll add '4:util.threadjob' as well as threadjobs are also verbose
> and don't add any value.

Maybe change the page so 4:util.threadjob is included in the
recommended configuration? Other than that, if you feel that your set
of filters will work best for the situation at hand you don't need to
change them.

> > > +libvirt-perl-bindings:
> > > +  stage: bindings
> > > +  trigger:
> > > +project: eskultety/libvirt-perl
> > > +branch: multi-project-ci
> > > +strategy: depend
> > > +
> > > +
> > > +centos-stream-8-tests:
> > > +  extends: .tests
> > > +  needs:
> > > +- libvirt-perl-bindings
> > > +- pipeline: $PARENT_PIPELINE_ID
> > > +  job: x86_64-centos-stream-8
> > > +- project: eskultety/libvirt-perl
> > > +  job: x86_64-centos-stream-8
> > > +  ref: multi-project-ci
> > > +  artifacts: true
> >
> > IIUC from the documentation and from reading around, using
> > strategy:depend will cause the local job to reflect the status of the
> > triggered pipeline. So far so good.
> >
> > What I am unclear about is, is there any guarantee that using
> > artifact:true with a job from an external project's pipeline will
> > expose the artifacts from the job that was executed as part of the
> > specific pipeline that we've just triggered ourselves as opposed to
> > some other pipeline that might have already been completed in the
> > past of might have completed in the meantime?
>
> Not just by using artifact:true or strategy:depend. The important bit is 
> having
> 'libvirt-perl-bindings' in the job's needs list. Let me explain, if you don't
> put the bindings trigger job to the requirements list of another job
> (centos-stream-8-tests in this case) what will happen is that the trigger job
> will be waiting for the external pipeline to finish, but centos-stream-8-tests
> job would execute basically as soon as the container project builds are
> finished because artifacts:true would download the latest RPM artifacts from 
> an
> earlier build...

No, I got that part. My question was whether

  other-project-pipeline:
trigger:
  project: other-project
  strategy: depend

  our-job:
needs:
  - other-project-pipeline
  - project: other-project
job: other-project-job
artifacts: true

actually guarantees that the instance of other-project-job whose
artifacts are available to our-job is the same one that was started
as part of the pipeline triggered by the other-project-pipeline job.

Looking at the YAML I wouldn't bet on this being the case, but
perhaps I've missed this guarantee being documented somewhere?

> > Taking a step back, why exactly are we triggering a rebuild of
> > libvirt-perl in the first place? Once we change that project's
> > pipeline so that RPMs are published as artifacts, can't we just grab
> > the ones from the latest successful pipeline? Maybe you've already
> > explained why you did things this way and I just missed it!
>
> ...which brings us here. Well, I adopted the mantra that all libvirt-friends
> projects depend on libvirt and given that we need libvirt-perl bindings to 
> test
> upstream, I'd like to always have the latest bindings available to test with
> the current upstream build. The other reason why I did the way you commented 
> on
> is that during development of the proposal many times I had to make changes to
> both libvirt and libvirt-perl in lockstep and it was tremendously frustrating
> to wait for the pipeline to get to the integration stage only to realize that
> the integration job didn't wait for the latest bindings and instead picked up
> the previous latest artifacts which I knew were either faulty or didn't 
> contain
> the necessary changes yet.

Of course that would be annoying when you're making changes to both
projects at the same time, but is that a scenario that we can expect
to be common once the integration tests are in place?

To be clear, I'm not necessarily against the way you're doing things
right now, it's just that it feels like using the artifacts from the
latest successful libvirt-perl pipeline would lower complexity, avoid
burning additional resources and reduce wait times.

If the only only downside is having a worse experience when making
changes to the pipeline, and we can expect that to be infrequent
enough, perhaps that's a reasonable tradeoff.

> > > +  variable

Re: [PATCH] qemu: blockjob: Avoid spurious log errors when cancelling a shallow copy with reused images

2022-02-22 Thread Ján Tomko

On a Tuesday in 2022, Peter Krempa wrote:

In case when a user starts a block copy operation with
VIR_DOMAIN_BLOCK_COPY_SHALLOW and VIR_DOMAIN_BLOCK_COPY_REUSE_EXT and
both the reused image and the original disk have a backing image libvirt
specifically does not insert the backing image until after the job is
asked to be completed via virBlockJobAbort with
VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT.

This is so that management applications can copy the backing image on
the background.

Now when a user aborts the block job instead of cancelling it we'd
ignore the fact that we didn't insert the backing image yet and the
cancellation would result into a 'blockdev-del' of a invalid node name
and thus an 'error' severity entry in the log.

To solve this issue we use the same conditions when the backing image
addition is avoided to remove the internal state for them prior to the
call to unplug the mirror destination.

Reported-by: Kashyap Chamarthy 
Signed-off-by: Peter Krempa 
---
src/qemu/qemu_blockjob.c | 17 +
1 file changed, 17 insertions(+)



Reviewed-by: Ján Tomko 

Jano


signature.asc
Description: PGP signature


[libvirt PATCH v2 3/5] conf: switch nvram parsing to use XML node / property helpers

2022-02-22 Thread Daniel P . Berrangé
Instead of using XPath, parse the  using the XML node and
property helpers so the code is consistent with the parsing of
.

Reviewed-by: Michal Privoznik 
Signed-off-by: Daniel P. Berrangé 
---
 src/conf/domain_conf.c | 17 +
 1 file changed, 13 insertions(+), 4 deletions(-)

diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 34fec887a3..1b423298ce 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -18211,21 +18211,30 @@ virDomainDefParseBootLoaderOptions(virDomainDef *def,
xmlXPathContextPtr ctxt)
 {
 xmlNodePtr loader_node = virXPathNode("./os/loader[1]", ctxt);
+xmlNodePtr nvram_node = virXPathNode("./os/nvram[1]", ctxt);
 const bool fwAutoSelect = def->os.firmware != 
VIR_DOMAIN_OS_DEF_FIRMWARE_NONE;
+virDomainLoaderDef *loader;
 
 if (!loader_node)
 return 0;
 
-def->os.loader = g_new0(virDomainLoaderDef, 1);
+def->os.loader = loader = g_new0(virDomainLoaderDef, 1);
 
 if (virDomainLoaderDefParseXML(loader_node,
def->os.loader,
fwAutoSelect) < 0)
 return -1;
 
-def->os.loader->nvram = virXPathString("string(./os/nvram[1])", ctxt);
-if (!fwAutoSelect)
-def->os.loader->nvramTemplate = 
virXPathString("string(./os/nvram[1]/@template)", ctxt);
+if (nvram_node) {
+if (!(loader->nvram = virXMLNodeContentString(nvram_node)))
+return -1;
+
+if (STREQ(loader->nvram, ""))
+VIR_FREE(loader->nvram);
+
+if (!fwAutoSelect)
+def->os.loader->nvramTemplate = virXMLPropString(nvram_node, 
"template");
+}
 
 return 0;
 }
-- 
2.34.1



[libvirt PATCH v2 4/5] conf: move nvram parsing into virDomainLoaderDefParseXML

2022-02-22 Thread Daniel P . Berrangé
The virDomainLoaderDef struct contains fields for both  and
 elements, so it makes sense to parse them in the same method,
just like we'll format them in the same method.

Reviewed-by: Michal Privoznik 
Signed-off-by: Daniel P. Berrangé 
---
 src/conf/domain_conf.c | 92 ++
 1 file changed, 40 insertions(+), 52 deletions(-)

diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 1b423298ce..d79af0b6c8 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -17795,29 +17795,51 @@ 
virDomainDefMaybeAddHostdevSCSIcontroller(virDomainDef *def)
 }
 
 static int
-virDomainLoaderDefParseXML(xmlNodePtr node,
-   virDomainLoaderDef *loader,
-   bool fwAutoSelect)
+virDomainLoaderDefParseXML(virDomainDef *def,
+   xmlXPathContextPtr ctxt)
 {
-if (!fwAutoSelect) {
-if (virXMLPropTristateBool(node, "readonly", VIR_XML_PROP_NONE,
-   &loader->readonly) < 0)
-return -1;
+xmlNodePtr loader_node = virXPathNode("./os/loader[1]", ctxt);
+xmlNodePtr nvram_node = virXPathNode("./os/nvram[1]", ctxt);
+const bool fwAutoSelect = def->os.firmware != 
VIR_DOMAIN_OS_DEF_FIRMWARE_NONE;
+virDomainLoaderDef *loader;
+
+if (!loader_node && !nvram_node)
+return 0;
+
+def->os.loader = loader = g_new0(virDomainLoaderDef, 1);
+
+if (loader_node) {
+if (!fwAutoSelect) {
+if (virXMLPropTristateBool(loader_node, "readonly", 
VIR_XML_PROP_NONE,
+   &loader->readonly) < 0)
+return -1;
+
+if (virXMLPropEnum(loader_node, "type", 
virDomainLoaderTypeFromString,
+   VIR_XML_PROP_NONZERO, &loader->type) < 0)
+return -1;
 
-if (virXMLPropEnum(node, "type", virDomainLoaderTypeFromString,
-   VIR_XML_PROP_NONZERO, &loader->type) < 0)
+if (!(loader->path = virXMLNodeContentString(loader_node)))
+return -1;
+
+if (STREQ(loader->path, ""))
+VIR_FREE(loader->path);
+}
+
+if (virXMLPropTristateBool(loader_node, "secure", VIR_XML_PROP_NONE,
+   &loader->secure) < 0)
 return -1;
+}
 
-if (!(loader->path = virXMLNodeContentString(node)))
+if (nvram_node) {
+if (!(loader->nvram = virXMLNodeContentString(nvram_node)))
 return -1;
 
-if (STREQ(loader->path, ""))
-VIR_FREE(loader->path);
-}
+if (STREQ(loader->nvram, ""))
+VIR_FREE(loader->nvram);
 
-if (virXMLPropTristateBool(node, "secure", VIR_XML_PROP_NONE,
-   &loader->secure) < 0)
-return -1;
+if (!fwAutoSelect)
+loader->nvramTemplate = virXMLPropString(nvram_node, "template");
+}
 
 return 0;
 }
@@ -18206,40 +18228,6 @@ virDomainDefParseBootFirmwareOptions(virDomainDef *def,
 }
 
 
-static int
-virDomainDefParseBootLoaderOptions(virDomainDef *def,
-   xmlXPathContextPtr ctxt)
-{
-xmlNodePtr loader_node = virXPathNode("./os/loader[1]", ctxt);
-xmlNodePtr nvram_node = virXPathNode("./os/nvram[1]", ctxt);
-const bool fwAutoSelect = def->os.firmware != 
VIR_DOMAIN_OS_DEF_FIRMWARE_NONE;
-virDomainLoaderDef *loader;
-
-if (!loader_node)
-return 0;
-
-def->os.loader = loader = g_new0(virDomainLoaderDef, 1);
-
-if (virDomainLoaderDefParseXML(loader_node,
-   def->os.loader,
-   fwAutoSelect) < 0)
-return -1;
-
-if (nvram_node) {
-if (!(loader->nvram = virXMLNodeContentString(nvram_node)))
-return -1;
-
-if (STREQ(loader->nvram, ""))
-VIR_FREE(loader->nvram);
-
-if (!fwAutoSelect)
-def->os.loader->nvramTemplate = virXMLPropString(nvram_node, 
"template");
-}
-
-return 0;
-}
-
-
 static int
 virDomainDefParseBootAcpiOptions(virDomainDef *def,
  xmlXPathContextPtr ctxt)
@@ -18304,7 +18292,7 @@ virDomainDefParseBootOptions(virDomainDef *def,
 if (virDomainDefParseBootFirmwareOptions(def, ctxt) < 0)
 return -1;
 
-if (virDomainDefParseBootLoaderOptions(def, ctxt) < 0)
+if (virDomainLoaderDefParseXML(def, ctxt) < 0)
 return -1;
 
 if (virDomainDefParseBootAcpiOptions(def, ctxt) < 0)
@@ -18320,7 +18308,7 @@ virDomainDefParseBootOptions(virDomainDef *def,
 case VIR_DOMAIN_OSTYPE_UML:
 virDomainDefParseBootKernelOptions(def, ctxt);
 
-if (virDomainDefParseBootLoaderOptions(def, ctxt) < 0)
+if (virDomainLoaderDefParseXML(def, ctxt) < 0)
 return -1;
 
 break;
-- 
2.34.1



[libvirt PATCH v2 0/5] Cleanup and test more firmware handling scenarios

2022-02-22 Thread Daniel P . Berrangé
There are a mind bending number of possible ways to configure the
firmware with/without NVRAM. Only a small portion are tested and
many error scenarios are silently ignored.

This series attempts to get coverage of every possible XML config
scenario and report explicit errors in all invalid configs.

There is an open question on patch 4.  Essentially the use of NVRAM
combined with writable executable feels like an accidental feature
in libvirt that hasn't really been thought through. I'd like to
better define expectations here but there are several possible
strategies and I'm undecided which is best.

Changed in v2:

 - Merged 5 self contained patches already reviewed
 - Moved checks out of post-parse, into validate methods
 - Instead of rejecting R/W loader with NVRAM template
   honour the template in QEMU driver. A R/W loader is
   conceptually relevant if the loader allows guest
   to live flash upgrade itself. Not possible today
   but we shouldn't reject this combo since QEMU allows
   it.

Daniel P. Berrangé (5):
  qemu: fix populating NVRAM vars from template with R/W loader
  tests: don't permit NVRAM path when using firmware auto-select
  conf: switch nvram parsing to use XML node / property helpers
  conf: move nvram parsing into virDomainLoaderDefParseXML
  conf: stop ignoring / with firmware auto-select

 src/conf/domain_conf.c| 67 --
 src/conf/domain_validate.c| 32 +
 src/qemu/qemu_domain.c|  6 +-
 ...-nvram-rw-template-vars.x86_64-latest.args | 41 +++
 .../bios-nvram-rw-template-vars.xml   | 36 ++
 .../bios-nvram-rw-template.x86_64-latest.args | 41 +++
 .../bios-nvram-rw-template.xml| 36 ++
 .../bios-nvram-rw-vars.x86_64-latest.args | 41 +++
 tests/qemuxml2argvdata/bios-nvram-rw-vars.xml | 36 ++
 tests/qemuxml2argvdata/os-firmware-bios.xml   |  1 -
 ...ware-efi-bad-loader-path.x86_64-latest.err |  1 +
 .../os-firmware-efi-bad-loader-path.xml   | 67 ++
 ...ware-efi-bad-loader-type.x86_64-latest.err |  1 +
 .../os-firmware-efi-bad-loader-type.xml   | 67 ++
 ...mware-efi-bad-nvram-path.x86_64-latest.err |  1 +
 .../os-firmware-efi-bad-nvram-path.xml| 68 +++
 ...e-efi-bad-nvram-template.x86_64-latest.err |  1 +
 .../os-firmware-efi-bad-nvram-template.xml| 68 +++
 .../os-firmware-efi-secboot.xml   |  1 -
 tests/qemuxml2argvdata/os-firmware-efi.xml|  1 -
 tests/qemuxml2argvtest.c  |  7 ++
 .../os-firmware-bios.x86_64-latest.xml|  1 -
 .../os-firmware-efi-secboot.x86_64-latest.xml |  1 -
 .../os-firmware-efi.x86_64-latest.xml |  1 -
 24 files changed, 578 insertions(+), 45 deletions(-)
 create mode 100644 
tests/qemuxml2argvdata/bios-nvram-rw-template-vars.x86_64-latest.args
 create mode 100644 tests/qemuxml2argvdata/bios-nvram-rw-template-vars.xml
 create mode 100644 
tests/qemuxml2argvdata/bios-nvram-rw-template.x86_64-latest.args
 create mode 100644 tests/qemuxml2argvdata/bios-nvram-rw-template.xml
 create mode 100644 tests/qemuxml2argvdata/bios-nvram-rw-vars.x86_64-latest.args
 create mode 100644 tests/qemuxml2argvdata/bios-nvram-rw-vars.xml
 create mode 100644 
tests/qemuxml2argvdata/os-firmware-efi-bad-loader-path.x86_64-latest.err
 create mode 100644 tests/qemuxml2argvdata/os-firmware-efi-bad-loader-path.xml
 create mode 100644 
tests/qemuxml2argvdata/os-firmware-efi-bad-loader-type.x86_64-latest.err
 create mode 100644 tests/qemuxml2argvdata/os-firmware-efi-bad-loader-type.xml
 create mode 100644 
tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-path.x86_64-latest.err
 create mode 100644 tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-path.xml
 create mode 100644 
tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-template.x86_64-latest.err
 create mode 100644 
tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-template.xml

-- 
2.34.1




[libvirt PATCH v2 5/5] conf: stop ignoring / with firmware auto-select

2022-02-22 Thread Daniel P . Berrangé
Currently if the   firmware attribute is set then we silently
ignore most of the  and  element configs. This
changes the code so that we always fully parse the  and
 but then use a post-parse method to explicitly reject
invalid combinations.

Reviewed-by: Michal Privoznik 
Signed-off-by: Daniel P. Berrangé 
---
 src/conf/domain_conf.c| 26 +++
 src/conf/domain_validate.c| 24 +++
 ...ware-efi-bad-loader-path.x86_64-latest.err |  1 +
 .../os-firmware-efi-bad-loader-path.xml   | 67 ++
 ...ware-efi-bad-loader-type.x86_64-latest.err |  1 +
 .../os-firmware-efi-bad-loader-type.xml   | 67 ++
 ...e-efi-bad-nvram-template.x86_64-latest.err |  1 +
 .../os-firmware-efi-bad-nvram-template.xml| 68 +++
 tests/qemuxml2argvtest.c  |  3 +
 9 files changed, 243 insertions(+), 15 deletions(-)
 create mode 100644 
tests/qemuxml2argvdata/os-firmware-efi-bad-loader-path.x86_64-latest.err
 create mode 100644 tests/qemuxml2argvdata/os-firmware-efi-bad-loader-path.xml
 create mode 100644 
tests/qemuxml2argvdata/os-firmware-efi-bad-loader-type.x86_64-latest.err
 create mode 100644 tests/qemuxml2argvdata/os-firmware-efi-bad-loader-type.xml
 create mode 100644 
tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-template.x86_64-latest.err
 create mode 100644 
tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-template.xml

diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index d79af0b6c8..e8ce0caa85 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -17800,7 +17800,6 @@ virDomainLoaderDefParseXML(virDomainDef *def,
 {
 xmlNodePtr loader_node = virXPathNode("./os/loader[1]", ctxt);
 xmlNodePtr nvram_node = virXPathNode("./os/nvram[1]", ctxt);
-const bool fwAutoSelect = def->os.firmware != 
VIR_DOMAIN_OS_DEF_FIRMWARE_NONE;
 virDomainLoaderDef *loader;
 
 if (!loader_node && !nvram_node)
@@ -17809,21 +17808,19 @@ virDomainLoaderDefParseXML(virDomainDef *def,
 def->os.loader = loader = g_new0(virDomainLoaderDef, 1);
 
 if (loader_node) {
-if (!fwAutoSelect) {
-if (virXMLPropTristateBool(loader_node, "readonly", 
VIR_XML_PROP_NONE,
-   &loader->readonly) < 0)
-return -1;
+if (virXMLPropTristateBool(loader_node, "readonly", VIR_XML_PROP_NONE,
+   &loader->readonly) < 0)
+return -1;
 
-if (virXMLPropEnum(loader_node, "type", 
virDomainLoaderTypeFromString,
-   VIR_XML_PROP_NONZERO, &loader->type) < 0)
-return -1;
+if (virXMLPropEnum(loader_node, "type", virDomainLoaderTypeFromString,
+   VIR_XML_PROP_NONZERO, &loader->type) < 0)
+return -1;
 
-if (!(loader->path = virXMLNodeContentString(loader_node)))
-return -1;
+if (!(loader->path = virXMLNodeContentString(loader_node)))
+return -1;
 
-if (STREQ(loader->path, ""))
-VIR_FREE(loader->path);
-}
+if (STREQ(loader->path, ""))
+VIR_FREE(loader->path);
 
 if (virXMLPropTristateBool(loader_node, "secure", VIR_XML_PROP_NONE,
&loader->secure) < 0)
@@ -17837,8 +17834,7 @@ virDomainLoaderDefParseXML(virDomainDef *def,
 if (STREQ(loader->nvram, ""))
 VIR_FREE(loader->nvram);
 
-if (!fwAutoSelect)
-loader->nvramTemplate = virXMLPropString(nvram_node, "template");
+loader->nvramTemplate = virXMLPropString(nvram_node, "template");
 }
 
 return 0;
diff --git a/src/conf/domain_validate.c b/src/conf/domain_validate.c
index 22bfb3b59d..9ccb5d0dc2 100644
--- a/src/conf/domain_validate.c
+++ b/src/conf/domain_validate.c
@@ -1505,6 +1505,30 @@ virDomainDefOSValidate(const virDomainDef *def,
 }
 
 if (def->os.firmware != VIR_DOMAIN_OS_DEF_FIRMWARE_NONE) {
+if (def->os.loader->path) {
+virReportError(VIR_ERR_XML_ERROR, "%s",
+   _("Loader path is not permitted with firmware 
attribute"));
+return -1;
+}
+
+if (def->os.loader->type) {
+virReportError(VIR_ERR_XML_ERROR, "%s",
+   _("Loader type is not permitted with firmware 
attribute"));
+return -1;
+}
+
+if (def->os.loader->readonly) {
+virReportError(VIR_ERR_XML_ERROR, "%s",
+   _("Loader read-only attribute is not permitted with 
firmware attribute"));
+return -1;
+}
+
+if (def->os.loader->nvramTemplate) {
+virReportError(VIR_ERR_XML_ERROR, "%s",
+   _("NVRAM template path is not permitted with 
firmware attribute"));
+return -1;
+}
+
 if (def->os.loader->nvram) {
 virReportError(V

[libvirt PATCH v2 2/5] tests: don't permit NVRAM path when using firmware auto-select

2022-02-22 Thread Daniel P . Berrangé
When using  we still parse the  path,
but completely ignore it, replacing any user provided content with
a custom generated path. This makes sense since when undefining the
guest, the code to cleanup NVRAM also uses the same generated path.

Instead of silently ignoring user config, we should report an
explicit error message. This shows that some of our tests had the
bogus config scenario present.

Signed-off-by: Daniel P. Berrangé 
---
 src/conf/domain_validate.c|  8 +++
 tests/qemuxml2argvdata/os-firmware-bios.xml   |  1 -
 ...mware-efi-bad-nvram-path.x86_64-latest.err |  1 +
 .../os-firmware-efi-bad-nvram-path.xml| 68 +++
 .../os-firmware-efi-secboot.xml   |  1 -
 tests/qemuxml2argvdata/os-firmware-efi.xml|  1 -
 tests/qemuxml2argvtest.c  |  1 +
 .../os-firmware-bios.x86_64-latest.xml|  1 -
 .../os-firmware-efi-secboot.x86_64-latest.xml |  1 -
 .../os-firmware-efi.x86_64-latest.xml |  1 -
 10 files changed, 78 insertions(+), 6 deletions(-)
 create mode 100644 
tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-path.x86_64-latest.err
 create mode 100644 tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-path.xml

diff --git a/src/conf/domain_validate.c b/src/conf/domain_validate.c
index f0b8aa2655..22bfb3b59d 100644
--- a/src/conf/domain_validate.c
+++ b/src/conf/domain_validate.c
@@ -1504,6 +1504,14 @@ virDomainDefOSValidate(const virDomainDef *def,
 return -1;
 }
 
+if (def->os.firmware != VIR_DOMAIN_OS_DEF_FIRMWARE_NONE) {
+if (def->os.loader->nvram) {
+virReportError(VIR_ERR_XML_ERROR, "%s",
+   _("NVRAM path is not permitted with firmware 
attribute"));
+return -1;
+}
+}
+
 return 0;
 }
 
diff --git a/tests/qemuxml2argvdata/os-firmware-bios.xml 
b/tests/qemuxml2argvdata/os-firmware-bios.xml
index 6388dd..d89fcb6c58 100644
--- a/tests/qemuxml2argvdata/os-firmware-bios.xml
+++ b/tests/qemuxml2argvdata/os-firmware-bios.xml
@@ -7,7 +7,6 @@
   
 hvm
 
-/var/lib/libvirt/qemu/nvram/fedora_VARS.fd
 
 
   
diff --git 
a/tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-path.x86_64-latest.err 
b/tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-path.x86_64-latest.err
new file mode 100644
index 00..2ba8135ad4
--- /dev/null
+++ b/tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-path.x86_64-latest.err
@@ -0,0 +1 @@
+XML error: NVRAM path is not permitted with firmware attribute
diff --git a/tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-path.xml 
b/tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-path.xml
new file mode 100644
index 00..a4afdb6d0b
--- /dev/null
+++ b/tests/qemuxml2argvdata/os-firmware-efi-bad-nvram-path.xml
@@ -0,0 +1,68 @@
+
+  fedora
+  63840878-0deb-4095-97e6-fc444d9bc9fa
+  8192
+  8192
+  1
+  
+hvm
+
+/some/path
+
+
+  
+  
+
+
+
+  
+  
+  destroy
+  restart
+  restart
+  
+
+
+  
+  
+/usr/bin/qemu-system-x86_64
+
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+
+  
+
diff --git a/tests/qemuxml2argvdata/os-firmware-efi-secboot.xml 
b/tests/qemuxml2argvdata/os-firmware-efi-secboot.xml
index a285e06334..51faac54bf 100644
--- a/tests/qemuxml2argvdata/os-firmware-efi-secboot.xml
+++ b/tests/qemuxml2argvdata/os-firmware-efi-secboot.xml
@@ -7,7 +7,6 @@
   
 hvm
 
-/var/lib/libvirt/qemu/nvram/fedora_VARS.fd
 
 
   
diff --git a/tests/qemuxml2argvdata/os-firmware-efi.xml 
b/tests/qemuxml2argvdata/os-firmware-efi.xml
index 46a7b1b780..cb21437ed8 100644
--- a/tests/qemuxml2argvdata/os-firmware-efi.xml
+++ b/tests/qemuxml2argvdata/os-firmware-efi.xml
@@ -7,7 +7,6 @@
   
 hvm
 
-/var/lib/libvirt/qemu/nvram/fedora_VARS.fd
 
 
   
diff --git a/tests/qemuxml2argvtest.c b/tests/qemuxml2argvtest.c
index d2a53d35a8..693566f2d4 100644
--- a/tests/qemuxml2argvtest.c
+++ b/tests/qemuxml2argvtest.c
@@ -3406,6 +3406,7 @@ mymain(void)
 
 DO_TEST_CAPS_LATEST("os-firmware-bios");
 DO_TEST_CAPS_LATEST("os-firmware-efi");
+DO_TEST_CAPS_LATEST_PARSE_ERROR("os-firmware-efi-bad-nvram-path");
 DO_TEST_CAPS_LATEST("os-firmware-efi-secboot");
 DO_TEST_CAPS_LATEST("os-firmware-efi-no-enrolled-keys");
 DO_TEST_CAPS_ARCH_LATEST("aarch64-os-firmware-efi", "aarch64");
diff --git a/tests/qemuxml2xmloutdata/os-firmware-bios.x86_64-latest.xml 
b/tests/qemuxml2xmloutdata/os-firmware-bios.x86_64-latest.xml
index df6f61421a..a278ff059c 100644
--- a/tests/qemuxml2xmloutdata/os-firmware-bios.x86_64-latest.xml
+++ b/tests/qemuxml2xmloutdata/os-firmware-bios.x86_64-latest.xml
@@ -7,7 +7,6 @@
   
 hvm
 
-/var/lib/libvirt/qemu/nvram/fedora_VARS.fd
 
 
   
diff --git a/tests/qemuxml2xmloutda

[libvirt PATCH v2 1/5] qemu: fix populating NVRAM vars from template with R/W loader

2022-02-22 Thread Daniel P . Berrangé
The QEMU driver will populate the template to the nvram file any time it
sees both the template and nvram paths present. It will auto-generate a
nvram path per-VM if not provided by the user, but only if the loader
is marked R/O.

So with a R/O loader we have these possible scenarios

  - No NVRAM path or template -> try to infer a template based on the
 loader path, if not possible, fatal
 error. Auto-generate NVRAM per per VM
  - NVRAM path only -> try to infer a template based on the loader path,
   if not possible, app must have pre-created NVRAM
  - NVRAM path + template -> QEMU driver will copy template to NVRAM
  - NVRAM template only -> auto-generate NVRAM path per VM and then
   copy template

While with a R/W loader we have these possible scenarios

  - No NVRAM path or template -> do nothing
  - NVRAM path only -> app must have pre-created NVRAM
  - NVRAM path + template -> QEMU driver will copy template to NVRAM
  - NVRAM template only -> silently ignored

This change fixses the last scenario by making us auto-generate an
NVRAM path per VM, and then copy the template. We can't infer a
template based on the loaer path, but we align in other key areas.

Signed-off-by: Daniel P. Berrangé 
---
 src/qemu/qemu_domain.c|  6 ++-
 ...-nvram-rw-template-vars.x86_64-latest.args | 41 +++
 .../bios-nvram-rw-template-vars.xml   | 36 
 .../bios-nvram-rw-template.x86_64-latest.args | 41 +++
 .../bios-nvram-rw-template.xml| 36 
 .../bios-nvram-rw-vars.x86_64-latest.args | 41 +++
 tests/qemuxml2argvdata/bios-nvram-rw-vars.xml | 36 
 tests/qemuxml2argvtest.c  |  3 ++
 8 files changed, 238 insertions(+), 2 deletions(-)
 create mode 100644 
tests/qemuxml2argvdata/bios-nvram-rw-template-vars.x86_64-latest.args
 create mode 100644 tests/qemuxml2argvdata/bios-nvram-rw-template-vars.xml
 create mode 100644 
tests/qemuxml2argvdata/bios-nvram-rw-template.x86_64-latest.args
 create mode 100644 tests/qemuxml2argvdata/bios-nvram-rw-template.xml
 create mode 100644 tests/qemuxml2argvdata/bios-nvram-rw-vars.x86_64-latest.args
 create mode 100644 tests/qemuxml2argvdata/bios-nvram-rw-vars.xml

diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index acc76c1cd6..14a558a7ac 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -4451,8 +4451,10 @@ qemuDomainDefPostParse(virDomainDef *def,
 def->os.machine = g_strdup(machine);
 }
 
-if (virDomainDefHasOldStyleROUEFI(def) &&
-!def->os.loader->nvram)
+if (virDomainDefHasOldStyleUEFI(def) &&
+!def->os.loader->nvram &&
+(def->os.loader->readonly == VIR_TRISTATE_BOOL_YES ||
+ def->os.loader->nvramTemplate))
 qemuDomainNVRAMPathFormat(cfg, def, &def->os.loader->nvram);
 
 if (qemuDomainDefAddDefaultDevices(driver, def, qemuCaps) < 0)
diff --git 
a/tests/qemuxml2argvdata/bios-nvram-rw-template-vars.x86_64-latest.args 
b/tests/qemuxml2argvdata/bios-nvram-rw-template-vars.x86_64-latest.args
new file mode 100644
index 00..8d971ec29b
--- /dev/null
+++ b/tests/qemuxml2argvdata/bios-nvram-rw-template-vars.x86_64-latest.args
@@ -0,0 +1,41 @@
+LC_ALL=C \
+PATH=/bin \
+HOME=/tmp/lib/domain--1-test-bios \
+USER=test \
+LOGNAME=test \
+XDG_DATA_HOME=/tmp/lib/domain--1-test-bios/.local/share \
+XDG_CACHE_HOME=/tmp/lib/domain--1-test-bios/.cache \
+XDG_CONFIG_HOME=/tmp/lib/domain--1-test-bios/.config \
+/usr/bin/qemu-system-x86_64 \
+-name guest=test-bios,debug-threads=on \
+-S \
+-object 
'{"qom-type":"secret","id":"masterKey0","format":"raw","file":"/tmp/lib/domain--1-test-bios/master-key.aes"}'
 \
+-blockdev 
'{"driver":"file","filename":"/var/lib/libvirt/qemu/nvram/test-bios.fd","node-name":"libvirt-pflash0-storage","auto-read-only":true,"discard":"unmap"}'
 \
+-blockdev 
'{"node-name":"libvirt-pflash0-format","read-only":false,"driver":"raw","file":"libvirt-pflash0-storage"}'
 \
+-blockdev 
'{"driver":"file","filename":"/some/vars/path.fd","node-name":"libvirt-pflash1-storage","auto-read-only":true,"discard":"unmap"}'
 \
+-blockdev 
'{"node-name":"libvirt-pflash1-format","read-only":false,"driver":"raw","file":"libvirt-pflash1-storage"}'
 \
+-machine 
pc,usb=off,dump-guest-core=off,pflash0=libvirt-pflash0-format,pflash1=libvirt-pflash1-format,memory-backend=pc.ram
 \
+-accel tcg \
+-cpu qemu64 \
+-m 1024 \
+-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":1073741824}' \
+-overcommit mem-lock=off \
+-smp 1,sockets=1,cores=1,threads=1 \
+-uuid 362d1fc1-df7d-193e-5c18-49a71bd1da66 \
+-display none \
+-no-user-config \
+-nodefaults \
+-chardev socket,id=charmonitor,fd=1729,server=on,wait=off \
+-mon chardev=charmonitor,id=monitor,mode=control \
+-rtc base=utc \
+-no-shutdown \
+-boot menu=on,strict=on \
+-device 
'{"drive

[PATCH] qemu: blockjob: Avoid spurious log errors when cancelling a shallow copy with reused images

2022-02-22 Thread Peter Krempa
In case when a user starts a block copy operation with
VIR_DOMAIN_BLOCK_COPY_SHALLOW and VIR_DOMAIN_BLOCK_COPY_REUSE_EXT and
both the reused image and the original disk have a backing image libvirt
specifically does not insert the backing image until after the job is
asked to be completed via virBlockJobAbort with
VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT.

This is so that management applications can copy the backing image on
the background.

Now when a user aborts the block job instead of cancelling it we'd
ignore the fact that we didn't insert the backing image yet and the
cancellation would result into a 'blockdev-del' of a invalid node name
and thus an 'error' severity entry in the log.

To solve this issue we use the same conditions when the backing image
addition is avoided to remove the internal state for them prior to the
call to unplug the mirror destination.

Reported-by: Kashyap Chamarthy 
Signed-off-by: Peter Krempa 
---
 src/qemu/qemu_blockjob.c | 17 +
 1 file changed, 17 insertions(+)

diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c
index 726df95067..2442865b9b 100644
--- a/src/qemu/qemu_blockjob.c
+++ b/src/qemu/qemu_blockjob.c
@@ -1406,6 +1406,8 @@ qemuBlockJobProcessEventConcludedCopyAbort(virQEMUDriver 
*driver,
qemuBlockJobData *job,
qemuDomainAsyncJob asyncJob)
 {
+qemuDomainObjPrivate *priv = vm->privateData;
+
 VIR_DEBUG("copy job '%s' on VM '%s' aborted", job->name, vm->def->name);

 /* mirror may be NULL for copy job corresponding to migration */
@@ -1413,6 +1415,21 @@ qemuBlockJobProcessEventConcludedCopyAbort(virQEMUDriver 
*driver,
 !job->disk->mirror)
 return;

+if (!job->jobflagsmissing) {
+bool shallow = job->jobflags & VIR_DOMAIN_BLOCK_COPY_SHALLOW;
+bool reuse = job->jobflags & VIR_DOMAIN_BLOCK_COPY_REUSE_EXT;
+
+/* In the special case of a shallow copy with reused image we don't
+ * hotplug the full chain when 
QEMU_CAPS_BLOCKDEV_SNAPSHOT_ALLOW_WRITE_ONLY
+ * is supported. Attempting to delete it would thus result in spurious
+ * errors as we'd attempt to blockdev-del images which were not added
+ * yet */
+if (reuse && shallow &&
+virQEMUCapsGet(priv->qemuCaps, 
QEMU_CAPS_BLOCKDEV_SNAPSHOT_ALLOW_WRITE_ONLY) &&
+virStorageSourceHasBacking(job->disk->mirror))
+g_clear_pointer(&job->disk->mirror->backingStore, virObjectUnref);
+}
+
 /* activeWrite bitmap is removed automatically here */
 qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, 
job->disk->mirror);
 g_clear_pointer(&job->disk->mirror, virObjectUnref);
-- 
2.35.1



Re: [PATCH v2 20/29] domain_conf: format pnv-phb3-root-port empty addr

2022-02-22 Thread Daniel Henrique Barboza




On 2/21/22 10:36, Ján Tomko wrote:

On a Tuesday in 2022, Daniel Henrique Barboza wrote:

Hiding the empty (:00:0.0) PCI address in the case of devices that
will connect to slot 0x0 can be counterintuitive to the user, which will
see something like this:

   
   
 
 
   

Even if the user deliberately adds the root-port  element:

   

We end up removing the  element after saving the domain file.
This happens because virPCIDeviceAddressIsEmpty() can't distinguish
between a zeroed address that was set by the user versus an address that
wasn't filled at all.


I'm afraid this is incomplete.

virPCIDeviceAddressIsEmpty is for example used to figure out whether the
device needs an address assigned too.

So we'll need an extra field to distinguish between empty and
zero addresses, which will probably need adjustment in a lot of callers
:(


Hmmm I don't think this is a hill I want to die on in this series.

This patch is already adding a "VIR_DOMAIN_DEF_FORMAT_EMPTY_PCI_ADDR" flag to 
force
the address formatting regardless of virPCIDeviceAddressIsEmpty(). I believe 
that
I can rename it to "VIR_DOMAIN_DEF_FORMAT_PCI_ADDR", making it clear that the 
idea is
to force the  formatting at all times for the device, regardless of any 
assumptions
about the address being empty, or not assigned, or any other cases where
virPCIDeviceAddressIsEmpty() might apply.


Thanks,


Daniel






Jano



Given that all root-ports of PowerNV domains will connect to slot 0 of
the PHB, if the PHB controller has index = 0 this scenario will occur
every time. This patch aims to alleaviate this behavior by adding a new
virDomainDefFormatFlags that will allow an empty address to be formatted
in the XML. This flag is then used only when formatting PowerNV root
ports.

Signed-off-by: Daniel Henrique Barboza 
---
src/conf/domain_conf.c | 25 -
src/conf/domain_conf.h |  2 ++
2 files changed, 26 insertions(+), 1 deletion(-)





Re: [PATCH] qemu: Don't ignore failure when building default memory backend

2022-02-22 Thread Michal Prívozník
On 2/22/22 17:21, Ján Tomko wrote:
> On a Tuesday in 2022, Michal Privoznik wrote:
>> When building the default memory backend (which has id='pc.ram')
>> and no guest NUMA is configured then
>> qemuBuildMemCommandLineMemoryDefaultBackend() is called. However,
>> its return value is ignored which means that on invalid
>> configuration (e.g. when non-existent hugepage size was
>> requested) an error is reported into the logs but QEMU is started
>> anyway. And while QEMU does error out its error message doesn't
>> give much clue what's going on:
>>
>>  qemu-system-x86_64: Memory backend 'pc.ram' not found
>>
>> While at it, introduce a test case. While I could chose a nice
>> looking value (e.g. 4MiB) that's exactly what I wanted to avoid,
>> because while such value might not be possible on x84_64 it may
>> be possible on other arches (e.g. ppc is notoriously known for
>> supporting wide range of HP sizes). Let's stick with obviously
>> wrong value of 5MiB.
>>
>> Reported-by: Charles Polisher 
>> Signed-off-by: Michal Privoznik 
>> ---
>> src/qemu/qemu_command.c   |  5 ++--
>> .../hugepages-default-5M.x86_64-latest.err    |  1 +
>> .../qemuxml2argvdata/hugepages-default-5M.xml | 27 +++
>> tests/qemuxml2argvtest.c  |  1 +
>> 4 files changed, 32 insertions(+), 2 deletions(-)
>> create mode 100644
>> tests/qemuxml2argvdata/hugepages-default-5M.x86_64-latest.err
>> create mode 100644 tests/qemuxml2argvdata/hugepages-default-5M.xml
>>
> 
> Reviewed-by: Ján Tomko 
> 

Thanks, pushed now.

Michal



Re: [PATCH] qemu: Don't ignore failure when building default memory backend

2022-02-22 Thread Ján Tomko

On a Tuesday in 2022, Michal Privoznik wrote:

When building the default memory backend (which has id='pc.ram')
and no guest NUMA is configured then
qemuBuildMemCommandLineMemoryDefaultBackend() is called. However,
its return value is ignored which means that on invalid
configuration (e.g. when non-existent hugepage size was
requested) an error is reported into the logs but QEMU is started
anyway. And while QEMU does error out its error message doesn't
give much clue what's going on:

 qemu-system-x86_64: Memory backend 'pc.ram' not found

While at it, introduce a test case. While I could chose a nice
looking value (e.g. 4MiB) that's exactly what I wanted to avoid,
because while such value might not be possible on x84_64 it may
be possible on other arches (e.g. ppc is notoriously known for
supporting wide range of HP sizes). Let's stick with obviously
wrong value of 5MiB.

Reported-by: Charles Polisher 
Signed-off-by: Michal Privoznik 
---
src/qemu/qemu_command.c   |  5 ++--
.../hugepages-default-5M.x86_64-latest.err|  1 +
.../qemuxml2argvdata/hugepages-default-5M.xml | 27 +++
tests/qemuxml2argvtest.c  |  1 +
4 files changed, 32 insertions(+), 2 deletions(-)
create mode 100644 tests/qemuxml2argvdata/hugepages-default-5M.x86_64-latest.err
create mode 100644 tests/qemuxml2argvdata/hugepages-default-5M.xml



Reviewed-by: Ján Tomko 

Jano


signature.asc
Description: PGP signature


Re: [PATCH] NEWS: Mention chardev hot(un)plug fixes, '-sock' removal and RPM storage driver fix

2022-02-22 Thread Ján Tomko

On a Tuesday in 2022, Peter Krempa wrote:

Signed-off-by: Peter Krempa 
---
NEWS.rst | 19 +++
1 file changed, 19 insertions(+)



Reviewed-by: Ján Tomko 

Jano


signature.asc
Description: PGP signature


GSoC 2022 CI project idea proposal

2022-02-22 Thread Erik Skultety
So, we're offloading as much CI configuration/workflow stuff to lcitool
as possible. We can generate config files, install/update machines
(local or remote), dump Dockerfiles...we can't build and run those containers
locally. Instead, we have a CI helper script in libvirt repo which essentially
just wraps a Makefile which pulls a gitlab container for you and:
- gives you shell, or
- runs the build and tests

I'm not sure how many people actually know we have that helper script let alone
use it. I've been playing with the idea that we could integrate what's done in
the Makefile to lcitool utilizing either the podman library [1] or the docker
library [2]. Apart from consolidating all CI services-related efforts to
lcitool the other benefit would be that we could gain the ability to run and
debug in a project-specific container also in other libvirt projects not just
main libvirt.

So, I though this could be a nice project for GSoC. Ideas?

Unless there are arguments against this idea then I'd eventually add the idea
to our gitlab GSoC issue tracker.

[1] https://github.com/containers/podman-py
[2] https://docker-py.readthedocs.io/en/stable/

Erik



Re: [PATCH 0/3] Unbreak MIPS Malta

2022-02-22 Thread Cole Robinson
On 2/2/22 4:09 AM, Michal Prívozník wrote:
> On 2/1/22 15:33, Lubomir Rintel wrote:
>> My day started like this:
>>
>>   # virt-install --connect qemu:///system --arch mips --machine malta 
>> --memory 256 --disk none --import
>>   Using default --name vm-mips
>>   
>>   Starting install...
>>   ERRORXML error: No PCI buses available
>>
>> Needless to say, it ended up completely ruined.
>>
>> Chained to this message are the patches I've created in an attempt to
>> remedy the highly unfortunate situation, with hope that they'll be
>> treated with warmth, understanding and perhaps even applied to the
>> libvirt tree.
>>

Hi Lubo, after these patches was the VM usefully usable, did this simply
start up?

There is effectively no qemu-system-mips* mips coverage in virt-install
or libvirt unit test suites, we should add some if we don't want this to
regress. If the XML virt-install generates for you is useful, we could
drop it into libvirt's qemuxml2argvtest

Thanks,
Cole



[PATCH] NEWS: Mention chardev hot(un)plug fixes, '-sock' removal and RPM storage driver fix

2022-02-22 Thread Peter Krempa
Signed-off-by: Peter Krempa 
---
 NEWS.rst | 19 +++
 1 file changed, 19 insertions(+)

diff --git a/NEWS.rst b/NEWS.rst
index b684416909..169ac9b740 100644
--- a/NEWS.rst
+++ b/NEWS.rst
@@ -64,6 +64,25 @@ v8.1.0 (unreleased)

 * **Bug fixes**

+  * Remove unix sockets from filesystem when disabling a '.socket' systemd unit
+
+The presence of the socket files is used by our remote driver to determine
+which service to access. Since neiter systemd nor the daemons clean up the
+socket file clients were running into problems when a modular deployment 
was
+switched to monolithic ``libvirtd``.
+
+  * qemu: Fixes of fd passing during hotplug and hotunplug of chardevs
+
+FDs used as chardev backing are now properly removed when hot-unplugging
+a chardev from qemu and hotplugged chardevs now properly use ``virtlogd``
+to handle the input and output from qemu.
+
+  * RPM: Run pre/post-install steps on ``daemon-driver-storage-core``
+
+Previously the pre/post-install code was part of the meta-package which
+installed all storage driver sub-packages thus a minimalistic install
+of the storage driver didn't behave correctly.
+

 v8.0.0 (2022-01-14)
 ===
-- 
2.35.1



Re: [libvirt PATCH 4/4] gitlab-ci: Introduce a new test 'integration' pipeline stage

2022-02-22 Thread Daniel P . Berrangé
On Thu, Feb 17, 2022 at 09:54:12AM -0800, Andrea Bolognani wrote:
> On Mon, Jan 31, 2022 at 07:01:01PM +0100, Erik Skultety wrote:
> > +++ b/.gitlab-ci-integration.yml
> > @@ -0,0 +1,116 @@
> > +.tests:
> > +  stage: integration
> > +  before_script:
> > +- mkdir "$SCRATCH_DIR"
> > +- sudo dnf install -y libvirt-rpms/* libvirt-perl-rpms/*
> > +- sudo pip3 install --prefix=/usr avocado-framework
> > +- source /etc/os-release  # in order to query the vendor-provided 
> > variables
> > +- if test "$ID" == "centos" && test "$VERSION_ID" -lt 9 ||
> > + test "$ID" == "fedora" && test "$VERSION_ID" -lt 35;
> 
> Using == with test is a bashism, please stick to the portable version
> even though it's very likely that the script will ultimately run
> under bash.
> 
> > +- for daemon in $DAEMONS;
> > +  do
> > +sudo sed -Ei 
> > "s/^(#)?(log_outputs=).*/\2'1:file:\/var\/log\/libvirt\/${daemon}.log'/" 
> > /etc/libvirt/${daemon}.conf;
> > +sudo sed -Ei "s/^(#)?(log_filters=).*/\2'4:*object* 4:*json* 
> > 4:*event* 4:*rpc* 4:daemon.remote 4:util.threadjob 4:*access* 1:*'/" 
> > /etc/libvirt/${daemon}.conf;
> > +sudo systemctl --quiet stop ${daemon}.service;
> > +sudo systemctl restart ${daemon}.socket;
> > +  done
> 
> I suggest changing this to something along the lines of
> 
>   - for daemon in $DAEMONS;
> do
>   log_outputs="file:/var/log/libvirt/${daemon}.log"
>   log_filters="3:remote 4:event 3:util.json 3:util.object
> 3:util.dbus 3:util.netlink 3:node_device 3:rpc 3:access 1:*"
>   sed -Ei -e 
> "s;^#*\\s*log_outputs\\s*=.*$;log_outputs=\"$log_outputs\";g" \
>   -e 
> "s;^#*\\s*log_filters\\s*=.*$;log_filters=\"$log_filters\";g" \
>   "src/remote/${daemon}.conf.in"
>   # ...
> done

I'd suggest simply not using sed at all

   - for daemon in $DAEMONS;
 do
   log_outputs="file:/var/log/libvirt/${daemon}.log"
   log_filters="3:remote 4:event 3:util.json 3:util.object 3:util.dbus 
3:util.netlink 3:node_device 3:rpc 3:access 1:*"
   augtool set /files/etc/libvirt/${daemon}.conf/log_filters "$log_filters"
   augtool set /files/etc/libvirt/${daemon}.conf/log_outputs "$log_outputs"
 done



Regards,
Daniel
-- 
|: https://berrange.com  -o-https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o-https://fstop138.berrange.com :|
|: https://entangle-photo.org-o-https://www.instagram.com/dberrange :|



Re: [libvirt PATCH 09/11] storage: Statically initialize mutex

2022-02-22 Thread Daniel P . Berrangé
On Tue, Feb 22, 2022 at 03:10:52PM +0100, Tim Wiederhake wrote:
> On Thu, 2022-02-17 at 13:15 +, Daniel P. Berrangé wrote:
> > On Thu, Feb 17, 2022 at 02:01:01PM +0100, Tim Wiederhake wrote:
> > 
> > Really should be giving an explanation of why this change
> > is required.
> > 
> Added locally to commit message:
> 
>   Making the mutex static and independent of the lifetime of the
>   driver object allows for simplification of mutex handling during
>   the object's initialization and cleanup using VIR_LOCK_GUARD's
>   unlock-on-scope-exit behavior.

Note, the locking during init/cleanup is pointless because we
rely on serialization and a mere mutex doesn't give us safety
in cleanup, as any concurrent thread will be accessing a freed
driver anyway. I guess that's pre-existing issue though.


Regards,
Daniel
-- 
|: https://berrange.com  -o-https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o-https://fstop138.berrange.com :|
|: https://entangle-photo.org-o-https://www.instagram.com/dberrange :|



Re: [libvirt PATCH 4/4] gitlab-ci: Introduce a new test 'integration' pipeline stage

2022-02-22 Thread Erik Skultety
...

> 
> > +- for daemon in $DAEMONS;
> > +  do
> > +sudo sed -Ei 
> > "s/^(#)?(log_outputs=).*/\2'1:file:\/var\/log\/libvirt\/${daemon}.log'/" 
> > /etc/libvirt/${daemon}.conf;
> > +sudo sed -Ei "s/^(#)?(log_filters=).*/\2'4:*object* 4:*json* 
> > 4:*event* 4:*rpc* 4:daemon.remote 4:util.threadjob 4:*access* 1:*'/" 
> > /etc/libvirt/${daemon}.conf;
> > +sudo systemctl --quiet stop ${daemon}.service;
> > +sudo systemctl restart ${daemon}.socket;
> > +  done
> 
> I suggest changing this to something along the lines of
> 
>   - for daemon in $DAEMONS;
> do
>   log_outputs="file:/var/log/libvirt/${daemon}.log"
>   log_filters="3:remote 4:event 3:util.json 3:util.object
> 3:util.dbus 3:util.netlink 3:node_device 3:rpc 3:access 1:*"
>   sed -Ei -e 
> "s;^#*\\s*log_outputs\\s*=.*$;log_outputs=\"$log_outputs\";g" \
>   -e 
> "s;^#*\\s*log_filters\\s*=.*$;log_filters=\"$log_filters\";g" \
>   "src/remote/${daemon}.conf.in"
>   # ...
> done
> 
> Advantages:
> 
>   * saves one call to sed;
>   * doesn't need the awkward quoting for slashes in paths;
>   * produces key="value" which is consistent with the existing
> contents of the configuration files (even though the parser will
> accept single quotes too);
>   * handles slight, semantically-irrelevant changes to the contents
> of the configuration files such as whitespace being added;
>   * doesn't unnecessarily use captures;
>   * avoids excessively long lines.

Yes, ^this looks better.

> 
> Note that I have adjusted the value of log_filters to match what is
> recommended in
> 
>   https://libvirt.org/kbase/debuglogs.html#less-verbose-logging-for-qemu-vms
> 
> but maybe there's a reason you had picked a different set of filters.

I didn't even know we document this, so I always use the filters I empirically
settled with. Given the general feeling about warnings usefulness in libvirt
logs I either use 1 for debug logs or 4 for errors.

If you feel strong I should use what we recommend on that page, I'll go with
that, but I'll add '4:util.threadjob' as well as threadjobs are also verbose
and don't add any value.

...
> 
> Also note that I haven't actually tested that the above works
> correctly O:-)

It's a good starting point, I'll handle the rest.

...

> > +libvirt-perl-bindings:
> > +  stage: bindings
> > +  trigger:
> > +project: eskultety/libvirt-perl
> > +branch: multi-project-ci
> > +strategy: depend
> > +
> > +
> > +centos-stream-8-tests:
> > +  extends: .tests
> > +  needs:
> > +- libvirt-perl-bindings
> > +- pipeline: $PARENT_PIPELINE_ID
> > +  job: x86_64-centos-stream-8
> > +- project: eskultety/libvirt-perl
> > +  job: x86_64-centos-stream-8
> > +  ref: multi-project-ci
> > +  artifacts: true
> 
> IIUC from the documentation and from reading around, using
> strategy:depend will cause the local job to reflect the status of the
> triggered pipeline. So far so good.
> 
> What I am unclear about is, is there any guarantee that using
> artifact:true with a job from an external project's pipeline will
> expose the artifacts from the job that was executed as part of the
> specific pipeline that we've just triggered ourselves as opposed to
> some other pipeline that might have already been completed in the
> past of might have completed in the meantime?

Not just by using artifact:true or strategy:depend. The important bit is having
'libvirt-perl-bindings' in the job's needs list. Let me explain, if you don't
put the bindings trigger job to the requirements list of another job
(centos-stream-8-tests in this case) what will happen is that the trigger job
will be waiting for the external pipeline to finish, but centos-stream-8-tests
job would execute basically as soon as the container project builds are
finished because artifacts:true would download the latest RPM artifacts from an
earlier build...

> 
> Taking a step back, why exactly are we triggering a rebuild of
> libvirt-perl in the first place? Once we change that project's
> pipeline so that RPMs are published as artifacts, can't we just grab
> the ones from the latest successful pipeline? Maybe you've already
> explained why you did things this way and I just missed it!

...which brings us here. Well, I adopted the mantra that all libvirt-friends
projects depend on libvirt and given that we need libvirt-perl bindings to test
upstream, I'd like to always have the latest bindings available to test with
the current upstream build. The other reason why I did the way you commented on
is that during development of the proposal many times I had to make changes to
both libvirt and libvirt-perl in lockstep and it was tremendously frustrating
to wait for the pipeline to get to the integration stage only to realize that
the integration job didn't wait for the latest bindings and instead picked up
the previous latest artifacts which I knew were either faulty or didn't cont

[libvirt PATCH][merged][trivial] Fix typo in NEWS

2022-02-22 Thread Tim Wiederhake
Signed-off-by: Tim Wiederhake 
---
 NEWS.rst | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/NEWS.rst b/NEWS.rst
index b684416909..cc5666fa91 100644
--- a/NEWS.rst
+++ b/NEWS.rst
@@ -32,7 +32,7 @@ v8.1.0 (unreleased)
 either of the following 3 options:
 ``page-sampling, dirty-bitmap, dirty-ring``.
 
-Add ``calc_mode`` field for dirtyrate statistics retured by
+Add ``calc_mode`` field for dirtyrate statistics returned by
 ``virsh domstats --dirtyrate``, also add ``vCPU dirtyrate`` if
 ``dirty-ring`` mode was used in last measurement.
 
-- 
2.31.1



Re: [libvirt PATCH 09/11] storage: Statically initialize mutex

2022-02-22 Thread Tim Wiederhake
On Thu, 2022-02-17 at 13:15 +, Daniel P. Berrangé wrote:
> On Thu, Feb 17, 2022 at 02:01:01PM +0100, Tim Wiederhake wrote:
> 
> Really should be giving an explanation of why this change
> is required.
> 
Added locally to commit message:

  Making the mutex static and independent of the lifetime of the
  driver object allows for simplification of mutex handling during
  the object's initialization and cleanup using VIR_LOCK_GUARD's
  unlock-on-scope-exit behavior.

Regards,
Tim
> > Signed-off-by: Tim Wiederhake 
> > ---
> >  src/conf/virstorageobj.h |  2 --
> >  src/storage/storage_driver.c | 11 ---
> >  2 files changed, 4 insertions(+), 9 deletions(-)
> > 
> > diff --git a/src/conf/virstorageobj.h b/src/conf/virstorageobj.h
> > index 523bdec244..ad6005f153 100644
> > --- a/src/conf/virstorageobj.h
> > +++ b/src/conf/virstorageobj.h
> > @@ -31,8 +31,6 @@ typedef struct _virStoragePoolObjList
> > virStoragePoolObjList;
> >  
> >  typedef struct _virStorageDriverState virStorageDriverState;
> >  struct _virStorageDriverState {
> > -    virMutex lock;
> > -
> >  /* pid file FD, ensures two copies of the driver can't use the
> > same root */
> >  int lockFD;
> >  
> > diff --git a/src/storage/storage_driver.c
> > b/src/storage/storage_driver.c
> > index 97e0d9b3a0..05675a5539 100644
> > --- a/src/storage/storage_driver.c
> > +++ b/src/storage/storage_driver.c
> > @@ -57,6 +57,8 @@ VIR_LOG_INIT("storage.storage_driver");
> >  
> >  static virStorageDriverState *driver;
> >  
> > +static virMutex mutex = VIR_MUTEX_INITIALIZER;
> > +
> >  static int storageStateCleanup(void);
> >  
> >  typedef struct _virStorageVolStreamInfo virStorageVolStreamInfo;
> > @@ -67,11 +69,11 @@ struct _virStorageVolStreamInfo {
> >  
> >  static void storageDriverLock(void)
> >  {
> > -    virMutexLock(&driver->lock);
> > +    virMutexLock(&mutex);
> >  }
> >  static void storageDriverUnlock(void)
> >  {
> > -    virMutexUnlock(&driver->lock);
> > +    virMutexUnlock(&mutex);
> >  }
> >  
> >  
> > @@ -270,10 +272,6 @@ storageStateInitialize(bool privileged,
> >  driver = g_new0(virStorageDriverState, 1);
> >  
> >  driver->lockFD = -1;
> > -    if (virMutexInit(&driver->lock) < 0) {
> > -    VIR_FREE(driver);
> > -    return VIR_DRV_STATE_INIT_ERROR;
> > -    }
> >  storageDriverLock();
> >  
> >  if (!(driver->pools = virStoragePoolObjListNew()))
> > @@ -392,7 +390,6 @@ storageStateCleanup(void)
> >  VIR_FREE(driver->autostartDir);
> >  VIR_FREE(driver->stateDir);
> >  storageDriverUnlock();
> > -    virMutexDestroy(&driver->lock);
> >  VIR_FREE(driver);
> >  
> >  return 0;
> > -- 
> > 2.31.1
> > 
> 
> Regards,
> Daniel




Re: [PATCH] qemu: Move some enums impl to qemu_monitor.c

2022-02-22 Thread Andrea Bolognani
On Tue, Feb 22, 2022 at 08:38:10AM +0100, Michal Prívozník wrote:
> On 2/21/22 17:17, Andrea Bolognani wrote:
> > On Mon, Feb 21, 2022 at 04:10:25PM +0100, Michal Privoznik wrote:
> >> +VIR_ENUM_IMPL(qemuMonitorDirtyRateCalcMode,
> >> +  QEMU_MONITOR_DIRTYRATE_CALC_MODE_LAST,
> >> +  "page-sampling",
> >> +  "dirty-bitmap",
> >> +  "dirty-ring",
> >> +);
> >
> > Kinda weird that this one ends up in the middle of the file instead
> > of being grouped with the rest. I'd keep them together, unless
> > there's a good reason for doing things this way that I missed.
>
> My idea was to keep it close to the rest of dirty rate related
> functions. But I can move it next to the rest. I don't have strong
> preference.

I don't have a strong preference either. Just push it as is :)

-- 
Andrea Bolognani / Red Hat / Virtualization




Re: [libvirt PATCH v2] Make systemd unit ordering more robust

2022-02-22 Thread Michal Prívozník
On 2/15/22 15:52, Martin Kletzander wrote:
> Since libvirt-guests script/service can operate on various URIs and we do
> support both socket activation and traditional services, the ordering should 
> be
> specified for all the possible sockets and services.
> 
> Also remove the Wants= dependency since do not want to start any service.  We
> cannot know which one libvirt-guests is configured, so we'd have to start all
> the daemons which would break if unused colliding services are not
> masked (libvirtd.service in the modular case and all the modular daemon 
> service
> units in the monolithic scenario).  Fortunately we can assume that the system 
> is
> configured properly to start services/sockets that are of interest to the 
> user.
> That also works with the setup described in https://libvirt.org/daemons.html .
> 
> To make it even more robust we add the daemon service into the machine units
> created for individual domains as it was missing there.
> 
> https://bugzilla.redhat.com/show_bug.cgi?id=1868537
> 
> Signed-off-by: Martin Kletzander 
> ---
>  src/util/virsystemd.c   |  8 ++--
>  tools/libvirt-guests.service.in | 12 +++-
>  2 files changed, 17 insertions(+), 3 deletions(-)

Reviewed-by: Michal Privoznik 

Michal



Re: [libvirt][PATCH RESEND v10 5/5] qemu: Add command-line to generate SGX EPC memory backend

2022-02-22 Thread Michal Prívozník
On 2/16/22 11:25, Michal Prívozník wrote:

>> diff --git a/tests/qemuxml2argvdata/sgx-epc.x86_64-6.2.0.args 
>> b/tests/qemuxml2argvdata/sgx-epc.x86_64-6.2.0.args
>> new file mode 100644
>> index 00..e1aa274054
>> --- /dev/null
>> +++ b/tests/qemuxml2argvdata/sgx-epc.x86_64-6.2.0.args
>> @@ -0,0 +1,38 @@
>> +LC_ALL=C \
>> +PATH=/bin \
>> +HOME=/tmp/lib/domain--1-QEMUGuest1 \
>> +USER=test \
>> +LOGNAME=test \
>> +XDG_DATA_HOME=/tmp/lib/domain--1-QEMUGuest1/.local/share \
>> +XDG_CACHE_HOME=/tmp/lib/domain--1-QEMUGuest1/.cache \
>> +XDG_CONFIG_HOME=/tmp/lib/domain--1-QEMUGuest1/.config \
>> +/usr/bin/qemu-system-x86_64 \
>> +-name guest=QEMUGuest1,debug-threads=on \
>> +-S \
>> +-object 
>> '{"qom-type":"secret","id":"masterKey0","format":"raw","file":"/tmp/lib/domain--1-QEMUGuest1/master-key.aes"}'
>>  \
>> +-machine pc-q35-6.2,usb=off,dump-guest-core=off,memory-backend=pc.ram \
>> +-accel tcg \
>> +-cpu qemu64 \
>> +-m 134 \
>> +-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":140509184}' \
>> +-overcommit mem-lock=off \
>> +-smp 1,sockets=1,cores=1,threads=1 \
>> +-object 
>> '{"qom-type":"memory-backend-epc","id":"memepc0","prealloc":true,"size":67108864}'
>>  \
>> +-object 
>> '{"qom-type":"memory-backend-epc","id":"memepc1","prealloc":true,"size":16777216}'
>>  \
>> +-M sgx-epc.0.memdev=memepc0,sgx-epc.1.memdev=memepc1 \
> 
> I don't think this is correct. IIUC, this can be passed to -machine
> directly, e.g.:
> 
> -machine
> pc-q35-6.2,usb=off,dump-guest-core=off,memory-backend=pc.ram,sgx-epc.0.memdev=memepc0,sgx-epc.1.memdev=memepc1
> 
> And when I try to do that, I get:
> 
>   qemu-system-x86_64: Parameter 'sgx-epc.0.node' is missing
> 
> Any idea, what's going on? I would much rather avoid using -M if we can
> help it.
> 

One more thing. I've enabled SGX on my machine hoping to test this out,
but my attempts are failing so far. Firstly, with these patches qemu is
unhappy, because .node attribute is missing:

/home/zippy/work/qemu/qemu.git/build/qemu-system-x86_64 \

-machine pc-i440fx-4.0,usb=off,dump-guest-core=off \
-accel kvm \
-cpu host,migratable=on \
-m size=4194304k,slots=16,maxmem=1099511627776k \

-object 
'{"qom-type":"memory-backend-memfd","id":"memepc0","hugetlb":true,"hugetlbsize":2097152,"prealloc":true,"size":67108864,"host-nodes":[0],"policy":"bind"}'
 \
-M sgx-epc.0.memdev=memepc0 \

qemu-system-x86_64: Parameter 'sgx-epc.0.node' is missing


But okay, I can add .node, but that doesn't get me much further:

/home/zippy/work/qemu/qemu.git/build/qemu-system-x86_64 \

-machine pc-i440fx-4.0,usb=off,dump-guest-core=off \
-accel kvm \
-cpu host,migratable=on \
-m size=4194304k,slots=16,maxmem=1099511627776k \

-object 
'{"qom-type":"memory-backend-memfd","id":"memepc0","hugetlb":true,"hugetlbsize":2097152,"prealloc":true,"size":67108864,"host-nodes":[0],"policy":"bind"}'
 \
-M sgx-epc.0.memdev=memepc0,sgx-epc.0.node=0 \

qemu-system-x86_64: Invalid parameter type for 'memdev', expected: 
memory-backend-epc


Michal



[PATCH] qemu: Don't ignore failure when building default memory backend

2022-02-22 Thread Michal Privoznik
When building the default memory backend (which has id='pc.ram')
and no guest NUMA is configured then
qemuBuildMemCommandLineMemoryDefaultBackend() is called. However,
its return value is ignored which means that on invalid
configuration (e.g. when non-existent hugepage size was
requested) an error is reported into the logs but QEMU is started
anyway. And while QEMU does error out its error message doesn't
give much clue what's going on:

  qemu-system-x86_64: Memory backend 'pc.ram' not found

While at it, introduce a test case. While I could chose a nice
looking value (e.g. 4MiB) that's exactly what I wanted to avoid,
because while such value might not be possible on x84_64 it may
be possible on other arches (e.g. ppc is notoriously known for
supporting wide range of HP sizes). Let's stick with obviously
wrong value of 5MiB.

Reported-by: Charles Polisher 
Signed-off-by: Michal Privoznik 
---
 src/qemu/qemu_command.c   |  5 ++--
 .../hugepages-default-5M.x86_64-latest.err|  1 +
 .../qemuxml2argvdata/hugepages-default-5M.xml | 27 +++
 tests/qemuxml2argvtest.c  |  1 +
 4 files changed, 32 insertions(+), 2 deletions(-)
 create mode 100644 
tests/qemuxml2argvdata/hugepages-default-5M.x86_64-latest.err
 create mode 100644 tests/qemuxml2argvdata/hugepages-default-5M.xml

diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 2c963a7297..c836799888 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -7369,8 +7369,9 @@ qemuBuildMemCommandLine(virCommand *cmd,
  * regular memory because -mem-path and -mem-prealloc are obsolete.
  * However, if domain has one or more NUMA nodes then there is no
  * default RAM and we mustn't generate the memory object. */
-if (!virDomainNumaGetNodeCount(def->numa))
-qemuBuildMemCommandLineMemoryDefaultBackend(cmd, def, priv, 
defaultRAMid);
+if (!virDomainNumaGetNodeCount(def->numa) &&
+qemuBuildMemCommandLineMemoryDefaultBackend(cmd, def, priv, 
defaultRAMid) < 0)
+return -1;
 } else {
 /*
  * Add '-mem-path' (and '-mem-prealloc') parameter here if
diff --git a/tests/qemuxml2argvdata/hugepages-default-5M.x86_64-latest.err 
b/tests/qemuxml2argvdata/hugepages-default-5M.x86_64-latest.err
new file mode 100644
index 00..bf5e54c9e4
--- /dev/null
+++ b/tests/qemuxml2argvdata/hugepages-default-5M.x86_64-latest.err
@@ -0,0 +1 @@
+internal error: Unable to find any usable hugetlbfs mount for 5120 KiB
diff --git a/tests/qemuxml2argvdata/hugepages-default-5M.xml 
b/tests/qemuxml2argvdata/hugepages-default-5M.xml
new file mode 100644
index 00..280ea4bb71
--- /dev/null
+++ b/tests/qemuxml2argvdata/hugepages-default-5M.xml
@@ -0,0 +1,27 @@
+
+  NonExistentPageSize
+  21433e10-aea8-434a-8f81-55781c2e9035
+  4194304
+  4194304
+  
+
+  
+
+  
+  2
+  
+hvm
+  
+  
+
+
+
+  
+  
+  destroy
+  restart
+  restart
+  
+/usr/bin/qemu-system-x86_64
+  
+
diff --git a/tests/qemuxml2argvtest.c b/tests/qemuxml2argvtest.c
index 9c5c394e03..a32c5a8250 100644
--- a/tests/qemuxml2argvtest.c
+++ b/tests/qemuxml2argvtest.c
@@ -1270,6 +1270,7 @@ mymain(void)
 DO_TEST("hugepages-default", QEMU_CAPS_OBJECT_MEMORY_FILE);
 DO_TEST("hugepages-default-2M", QEMU_CAPS_OBJECT_MEMORY_FILE);
 DO_TEST("hugepages-default-system-size", QEMU_CAPS_OBJECT_MEMORY_FILE);
+DO_TEST_CAPS_LATEST_FAILURE("hugepages-default-5M");
 DO_TEST_PARSE_ERROR_NOCAPS("hugepages-default-1G-nodeset-2M");
 DO_TEST("hugepages-nodeset", QEMU_CAPS_OBJECT_MEMORY_FILE);
 DO_TEST_PARSE_ERROR("hugepages-nodeset-nonexist",
-- 
2.34.1