[PATCH] tests/qemu-iotests: Use GNU sed in two more spots where it is necessary

2022-03-09 Thread Thomas Huth
These two spots have been missed in commit 9086c7639822 ("Rework the
checks and spots using GNU sed") - they need GNU sed, too, since they
are using the "+" address form.

Signed-off-by: Thomas Huth 
---
 tests/qemu-iotests/common.filter | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tests/qemu-iotests/common.filter b/tests/qemu-iotests/common.filter
index 21819db9c3..f6e6b3bd04 100644
--- a/tests/qemu-iotests/common.filter
+++ b/tests/qemu-iotests/common.filter
@@ -106,13 +106,13 @@ _filter_hmp()
 # replace block job offset
 _filter_block_job_offset()
 {
-sed -e 's/, "offset": [0-9]\+,/, "offset": OFFSET,/'
+gsed -e 's/, "offset": [0-9]\+,/, "offset": OFFSET,/'
 }
 
 # replace block job len
 _filter_block_job_len()
 {
-sed -e 's/, "len": [0-9]\+,/, "len": LEN,/g'
+gsed -e 's/, "len": [0-9]\+,/, "len": LEN,/g'
 }
 
 # replace actual image size (depends on the host filesystem)
-- 
2.27.0




Re: [PATCH v7 06/22] hvf: Fix OOB write in RDTSCP instruction decode

2022-03-09 Thread Philippe Mathieu-Daudé

Hi Paolo,

I forgot to Cc you. Could you Ack this patch?

On 7/3/22 00:17, Philippe Mathieu-Daudé wrote:

From: Cameron Esfahani 

A guest could craft a specific stream of instructions that will have QEMU
write 0xF9 to inappropriate locations in memory.  Add additional asserts
to check for this.  Generate a #UD if there are more than 14 prefix bytes.

Found by Julian Stecklina 

Signed-off-by: Cameron Esfahani 
Message-Id: <20220219063831.35356-1-di...@apple.com>
Signed-off-by: Philippe Mathieu-Daudé 
---
  target/i386/hvf/x86_decode.c | 12 ++--
  target/i386/hvf/x86hvf.c |  8 
  target/i386/hvf/x86hvf.h |  1 +
  3 files changed, 19 insertions(+), 2 deletions(-)

diff --git a/target/i386/hvf/x86_decode.c b/target/i386/hvf/x86_decode.c
index 062713b1a4..5d051252b4 100644
--- a/target/i386/hvf/x86_decode.c
+++ b/target/i386/hvf/x86_decode.c
@@ -24,8 +24,10 @@
  #include "vmx.h"
  #include "x86_mmu.h"
  #include "x86_descr.h"
+#include "x86hvf.h"
  
  #define OPCODE_ESCAPE   0xf

+#define X86_MAX_INSN_PREFIX_LENGTH (14)
  
  static void decode_invalid(CPUX86State *env, struct x86_decode *decode)

  {
@@ -541,7 +543,8 @@ static void decode_lidtgroup(CPUX86State *env, struct 
x86_decode *decode)
  };
  decode->cmd = group[decode->modrm.reg];
  if (0xf9 == decode->modrm.modrm) {
-decode->opcode[decode->len++] = decode->modrm.modrm;
+VM_PANIC_ON(decode->opcode_len >= sizeof(decode->opcode));
+decode->opcode[decode->opcode_len++] = decode->modrm.modrm;
  decode->cmd = X86_DECODE_CMD_RDTSCP;
  }
  }
@@ -1847,7 +1850,8 @@ void calc_modrm_operand(CPUX86State *env, struct 
x86_decode *decode,
  
  static void decode_prefix(CPUX86State *env, struct x86_decode *decode)

  {
-while (1) {
+/* At most X86_MAX_INSN_PREFIX_LENGTH prefix bytes. */
+for (int i = 0; i < X86_MAX_INSN_PREFIX_LENGTH; i++) {
  /*
   * REX prefix must come after legacy prefixes.
   * REX before legacy is ignored.
@@ -1892,6 +1896,8 @@ static void decode_prefix(CPUX86State *env, struct 
x86_decode *decode)
  return;
  }
  }
+/* Too many prefixes!  Generate #UD. */
+hvf_inject_ud(env);
  }
  
  void set_addressing_size(CPUX86State *env, struct x86_decode *decode)

@@ -2090,11 +2096,13 @@ static void decode_opcodes(CPUX86State *env, struct 
x86_decode *decode)
  uint8_t opcode;
  
  opcode = decode_byte(env, decode);

+VM_PANIC_ON(decode->opcode_len >= sizeof(decode->opcode));
  decode->opcode[decode->opcode_len++] = opcode;
  if (opcode != OPCODE_ESCAPE) {
  decode_opcode_1(env, decode, opcode);
  } else {
  opcode = decode_byte(env, decode);
+VM_PANIC_ON(decode->opcode_len >= sizeof(decode->opcode));
  decode->opcode[decode->opcode_len++] = opcode;
  decode_opcode_2(env, decode, opcode);
  }
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index bec9fc5814..a338c207b7 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -423,6 +423,14 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
  & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR));
  }
  
+void hvf_inject_ud(CPUX86State *env)

+{
+env->exception_nr = EXCP06_ILLOP;
+env->exception_injected = 1;
+env->has_error_code = false;
+env->error_code = 0;
+}
+
  int hvf_process_events(CPUState *cpu_state)
  {
  X86CPU *cpu = X86_CPU(cpu_state);
diff --git a/target/i386/hvf/x86hvf.h b/target/i386/hvf/x86hvf.h
index db6003d6bd..427cdc1c13 100644
--- a/target/i386/hvf/x86hvf.h
+++ b/target/i386/hvf/x86hvf.h
@@ -22,6 +22,7 @@
  
  int hvf_process_events(CPUState *);

  bool hvf_inject_interrupts(CPUState *);
+void hvf_inject_ud(CPUX86State *);
  void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
   SegmentCache *qseg, bool is_tr);
  void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg);





Re: [PATCH v7 22/22] gitlab-ci: Support macOS 12 via cirrus-run

2022-03-09 Thread Philippe Mathieu-Daudé

Hi Alex, Thomas, Daniel,

Could you ack this patch?

On 7/3/22 00:17, Philippe Mathieu-Daudé wrote:

From: Philippe Mathieu-Daudé 

Add support for macOS 12 build on Cirrus-CI, similarly to commit
0e103a65ba1 ("gitlab: support for ... macOS 11 via cirrus-run"),
but with the following differences:
  - Enable modules (configure --enable-modules)
  - Do not run softfloat3 tests (make check-softfloat)
  - Run Aarch64 qtests instead of x86_64 ones

Generate the vars file by calling 'make lcitool-refresh'.

Signed-off-by: Philippe Mathieu-Daudé 
---
  .gitlab-ci.d/cirrus.yml   | 16 
  .gitlab-ci.d/cirrus/macos-12.vars | 16 
  tests/lcitool/refresh |  1 +
  3 files changed, 33 insertions(+)
  create mode 100644 .gitlab-ci.d/cirrus/macos-12.vars

diff --git a/.gitlab-ci.d/cirrus.yml b/.gitlab-ci.d/cirrus.yml
index b96b22e269..be1dce5d4e 100644
--- a/.gitlab-ci.d/cirrus.yml
+++ b/.gitlab-ci.d/cirrus.yml
@@ -87,6 +87,22 @@ x64-macos-11-base-build:
  PKG_CONFIG_PATH: 
/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/ncurses/lib/pkgconfig:/usr/local/opt/readline/lib/pkgconfig
  TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat 
check-qtest-x86_64
  
+x64-macos-12-base-build:

+  extends: .cirrus_build_job
+  variables:
+NAME: macos-12
+CIRRUS_VM_INSTANCE_TYPE: osx_instance
+CIRRUS_VM_IMAGE_SELECTOR: image
+CIRRUS_VM_IMAGE_NAME: monterey-base
+CIRRUS_VM_CPUS: 12
+CIRRUS_VM_RAM: 24G
+UPDATE_COMMAND: brew update
+INSTALL_COMMAND: brew install
+PATH_EXTRA: /usr/local/opt/ccache/libexec:/usr/local/opt/gettext/bin
+PKG_CONFIG_PATH: 
/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/ncurses/lib/pkgconfig:/usr/local/opt/readline/lib/pkgconfig
+CONFIGURE_ARGS: --enable-modules
+TEST_TARGETS: check-unit check-block check-qapi-schema check-qtest-aarch64
+
  
  # The following jobs run VM-based tests via KVM on a Linux-based Cirrus-CI job

  .cirrus_kvm_job:
diff --git a/.gitlab-ci.d/cirrus/macos-12.vars 
b/.gitlab-ci.d/cirrus/macos-12.vars
new file mode 100644
index 00..a793258c64
--- /dev/null
+++ b/.gitlab-ci.d/cirrus/macos-12.vars
@@ -0,0 +1,16 @@
+# THIS FILE WAS AUTO-GENERATED
+#
+#  $ lcitool variables macos-12 qemu
+#
+# https://gitlab.com/libvirt/libvirt-ci
+
+CCACHE='/usr/local/bin/ccache'
+CPAN_PKGS='Test::Harness'
+CROSS_PKGS=''
+MAKE='/usr/local/bin/gmake'
+NINJA='/usr/local/bin/ninja'
+PACKAGING_COMMAND='brew'
+PIP3='/usr/local/bin/pip3'
+PKGS='bash bc bzip2 capstone ccache cpanminus ctags curl dbus diffutils dtc 
gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo libepoxy libffi 
libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make 
meson ncurses nettle ninja perl pixman pkg-config python3 rpm2cpio sdl2 
sdl2_image snappy sparse spice-protocol tesseract texinfo usbredir vde vte3 
zlib zstd'
+PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme virtualenv'
+PYTHON='/usr/local/bin/python3'
diff --git a/tests/lcitool/refresh b/tests/lcitool/refresh
index 1f00281b44..e2f0bbd1b1 100755
--- a/tests/lcitool/refresh
+++ b/tests/lcitool/refresh
@@ -105,6 +105,7 @@ try:
 generate_cirrus("freebsd-12")
 generate_cirrus("freebsd-13")
 generate_cirrus("macos-11")
+   generate_cirrus("macos-12")
  
 sys.exit(0)

  except Exception as ex:





Re: [PATCH v7 22/22] gitlab-ci: Support macOS 12 via cirrus-run

2022-03-09 Thread Daniel P . Berrangé
On Mon, Mar 07, 2022 at 12:17:53AM +0100, Philippe Mathieu-Daudé wrote:
> From: Philippe Mathieu-Daudé 
> 
> Add support for macOS 12 build on Cirrus-CI, similarly to commit
> 0e103a65ba1 ("gitlab: support for ... macOS 11 via cirrus-run"),
> but with the following differences:
>  - Enable modules (configure --enable-modules)
>  - Do not run softfloat3 tests (make check-softfloat)
>  - Run Aarch64 qtests instead of x86_64 ones
> 
> Generate the vars file by calling 'make lcitool-refresh'.
> 
> Signed-off-by: Philippe Mathieu-Daudé 
> ---
>  .gitlab-ci.d/cirrus.yml   | 16 
>  .gitlab-ci.d/cirrus/macos-12.vars | 16 
>  tests/lcitool/refresh |  1 +
>  3 files changed, 33 insertions(+)
>  create mode 100644 .gitlab-ci.d/cirrus/macos-12.vars

Reviewed-by: Daniel P. Berrangé 


Regards,
Daniel
-- 
|: https://berrange.com  -o-https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o-https://fstop138.berrange.com :|
|: https://entangle-photo.org-o-https://www.instagram.com/dberrange :|




Re: [PATCH v7 02/22] tests/fp/berkeley-testfloat-3: Ignore ignored #pragma directives

2022-03-09 Thread Alex Bennée


Philippe Mathieu-Daudé  writes:

> From: Philippe Mathieu-Daudé 
>
> Since we already use -Wno-unknown-pragmas, we can also use
> -Wno-ignored-pragmas. This silences hundred of warnings using
> clang 13 on macOS Monterey:
>
>   [409/771] Compiling C object 
> tests/fp/libtestfloat.a.p/berkeley-testfloat-3_source_test_az_f128_rx.c.o
>   ../tests/fp/berkeley-testfloat-3/source/test_az_f128_rx.c:49:14: warning: 
> '#pragma FENV_ACCESS' is not supported on this target - ignored 
> [-Wignored-pragmas]
>   #pragma STDC FENV_ACCESS ON
>^
>   1 warning generated.
>
> Having:
>
>   $ cc -v
>   Apple clang version 13.0.0 (clang-1300.0.29.30)
>
> Reported-by: Roman Bolshakov 
> Reviewed-by: Akihiko Odaki 
> Tested-by: Akihiko Odaki 
> Signed-off-by: Philippe Mathieu-Daudé 

Acked-by: Alex Bennée 

-- 
Alex Bennée



Re: [PATCH v7 22/22] gitlab-ci: Support macOS 12 via cirrus-run

2022-03-09 Thread Thomas Huth

On 09/03/2022 11.24, Philippe Mathieu-Daudé wrote:

Hi Alex, Thomas, Daniel,

Could you ack this patch?


Basically fine for me, but can we really run additional cirrus-ci jobs by 
default? IIRC the parallel execution of those were quite limited for the 
free tier, so did you look close that we don't run into additional timeouts 
yet, due to delayed cirrus-ci jobs?


 Thomas



On 7/3/22 00:17, Philippe Mathieu-Daudé wrote:

From: Philippe Mathieu-Daudé 

Add support for macOS 12 build on Cirrus-CI, similarly to commit
0e103a65ba1 ("gitlab: support for ... macOS 11 via cirrus-run"),
but with the following differences:
  - Enable modules (configure --enable-modules)
  - Do not run softfloat3 tests (make check-softfloat)
  - Run Aarch64 qtests instead of x86_64 ones

Generate the vars file by calling 'make lcitool-refresh'.

Signed-off-by: Philippe Mathieu-Daudé 
---
  .gitlab-ci.d/cirrus.yml   | 16 
  .gitlab-ci.d/cirrus/macos-12.vars | 16 
  tests/lcitool/refresh |  1 +
  3 files changed, 33 insertions(+)
  create mode 100644 .gitlab-ci.d/cirrus/macos-12.vars





Re: [PATCH v7 22/22] gitlab-ci: Support macOS 12 via cirrus-run

2022-03-09 Thread Daniel P . Berrangé
On Wed, Mar 09, 2022 at 12:58:42PM +0100, Thomas Huth wrote:
> On 09/03/2022 11.24, Philippe Mathieu-Daudé wrote:
> > Hi Alex, Thomas, Daniel,
> > 
> > Could you ack this patch?
>
> Basically fine for me, but can we really run additional cirrus-ci jobs by
> default? IIRC the parallel execution of those were quite limited for the
> free tier, so did you look close that we don't run into additional timeouts
> yet, due to delayed cirrus-ci jobs?

You can run 2 jobs in parallel in Cirrus. Beyond that they
get queued/serialized

We have a 1 hour job timeout.

We have to expect jobs will sometimes run slower than normal.

IOW if we have a job on Cirrus taking 30 minutes normally, we
expect it will sometimes take 45 minutes.

All this means that if we want Cirrus to be reliable and not
time out, we can really only have 2 jobs by default, unless
we can get the job execution time down to around 20 minutes
to allow for serialization.

We used to have terrible problems with cirrus timeouts when
we were running 4 jobs concurrently (2 on staging and 2 on
master). We addressed that in 9968de0a4a5470bd7b98dcd2fae5d5269908f16b
by disabling the jobs on master.

IOW, we really need to choose 1 macOS job and 1 FreeBSD job
and others need to be marked manual.

Regards,
Daniel
-- 
|: https://berrange.com  -o-https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o-https://fstop138.berrange.com :|
|: https://entangle-photo.org-o-https://www.instagram.com/dberrange :|




Re: [PATCH v5 13/15] hw/nvme: Add support for the Virtualization Management command

2022-03-09 Thread Łukasz Gieryk
On Tue, Mar 01, 2022 at 02:07:08PM +0100, Klaus Jensen wrote:
> On Feb 17 18:45, Lukasz Maniak wrote:
> > From: Łukasz Gieryk 
> > 
> > With the new command one can:
> >  - assign flexible resources (queues, interrupts) to primary and
> >secondary controllers,
> >  - toggle the online/offline state of given controller.
> > 
> 
> QEMU segfaults (or asserts depending on the wind blowing) if the SR-IOV
> enabled device is hotplugged after being configured (i.e. follow the
> docs for a simple setup and then do a `device_del ` in the
> monitor. I suspect this is related to freeing the queues and something
> getting double-freed.
> 

I’ve finally found some time to look at the issue.

Long story short: the hot-plug mechanism deletes all VFs without the PF
knowing, then PF tries to reset and delete all the already non-existing
devices.

I have a solution for the problem, but there’s high a chance it’s not
the correct one. I’m still reading through the specs, as my knowledge in
the area of hot-plug/ACPI is quite limited.

Soon we will release the next patch set, with the fix included. I hope
the ACPI maintainers will chime in then. Till that happens, this is the
summary of my findings:

1) The current SR-IOV implementation assumes it’s the PF that creates
   and deletes VFs.
2) It’s a design decision (the Nvme device at least) for the VFs to be
   of the same class as PF. Effectively, they share the dc->hotpluggable
   value.
3) When a VF is created, it’s added as a child node to PF’s PCI bus
   slot.
4) Monitor/device_del triggers the ACPI mechanism. The implementation is
   not aware of SR/IOV and ejects PF’s PCI slot, directly unrealizing all
   hot-pluggable (!acpi_pcihp_pc_no_hotplug) children nodes.
5) VFs are unrealized directly, and it doesn’t work well with (1).
   SR/IOV structures are not updated, so when it’s PF’s turn to be
   unrealized, it works on stale pointers to already-deleted VFs.

My proposed ‘fix’ is to make the PCI ACPI code aware of SR/IOV:


diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c
index f4d706e47d..090bdb8e74 100644
--- a/hw/acpi/pcihp.c
+++ b/hw/acpi/pcihp.c
@@ -196,8 +196,12 @@ static bool acpi_pcihp_pc_no_hotplug(AcpiPciHpState *s, 
PCIDevice *dev)
  * ACPI doesn't allow hotplug of bridge devices.  Don't allow
  * hot-unplug of bridge devices unless they were added by hotplug
  * (and so, not described by acpi).
+ *
+ * Don't allow hot-unplug of SR-IOV Virtual Functions, as they
+ * will be removed implicitly, when Physical Function is unplugged.
  */
-return (pc->is_bridge && !dev->qdev.hotplugged) || !dc->hotpluggable;
+return (pc->is_bridge && !dev->qdev.hotplugged) || !dc->hotpluggable ||
+   pci_is_vf(dev);
 }




Re: [PATCH v7 22/22] gitlab-ci: Support macOS 12 via cirrus-run

2022-03-09 Thread Philippe Mathieu-Daudé

On 9/3/22 13:33, Daniel P. Berrangé wrote:

On Wed, Mar 09, 2022 at 12:58:42PM +0100, Thomas Huth wrote:



Basically fine for me, but can we really run additional cirrus-ci jobs by
default? IIRC the parallel execution of those were quite limited for the
free tier, so did you look close that we don't run into additional timeouts
yet, due to delayed cirrus-ci jobs?


You can run 2 jobs in parallel in Cirrus. Beyond that they
get queued/serialized

We have a 1 hour job timeout.

We have to expect jobs will sometimes run slower than normal.

IOW if we have a job on Cirrus taking 30 minutes normally, we
expect it will sometimes take 45 minutes.

All this means that if we want Cirrus to be reliable and not
time out, we can really only have 2 jobs by default, unless
we can get the job execution time down to around 20 minutes
to allow for serialization.

We used to have terrible problems with cirrus timeouts when
we were running 4 jobs concurrently (2 on staging and 2 on
master). We addressed that in 9968de0a4a5470bd7b98dcd2fae5d5269908f16b
by disabling the jobs on master.

IOW, we really need to choose 1 macOS job and 1 FreeBSD job
and others need to be marked manual.


Not sure which job to choose yet. Per the first google hits we
still want to cover Catalina first:
https://www.statista.com/statistics/944559/worldwide-macos-version-market-share/

Would it be beneficial to have a 1 per OS job during PR, and
other jobs run nightly (once a day, not per PR)?



Re: [PATCH v7 22/22] gitlab-ci: Support macOS 12 via cirrus-run

2022-03-09 Thread Thomas Huth

On 09/03/2022 13.50, Philippe Mathieu-Daudé wrote:


Would it be beneficial to have a 1 per OS job during PR, and
other jobs run nightly (once a day, not per PR)?


Is there a way to trigger nightly runs in gitlab?

 Thomas





Re: [PATCH v7 22/22] gitlab-ci: Support macOS 12 via cirrus-run

2022-03-09 Thread Daniel P . Berrangé
On Wed, Mar 09, 2022 at 01:52:15PM +0100, Thomas Huth wrote:
> On 09/03/2022 13.50, Philippe Mathieu-Daudé wrote:
> 
> > Would it be beneficial to have a 1 per OS job during PR, and
> > other jobs run nightly (once a day, not per PR)?
> 
> Is there a way to trigger nightly runs in gitlab?

Yes, in the repo CI settings create a scheduled job, using cron syntax.
Relies on the job "rules" in the config file not excluding scheduled
execution of course.

Regards,
Daniel
-- 
|: https://berrange.com  -o-https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o-https://fstop138.berrange.com :|
|: https://entangle-photo.org-o-https://www.instagram.com/dberrange :|




Re: [PATCH v7 22/22] gitlab-ci: Support macOS 12 via cirrus-run

2022-03-09 Thread Daniel P . Berrangé
On Wed, Mar 09, 2022 at 01:50:34PM +0100, Philippe Mathieu-Daudé wrote:
> On 9/3/22 13:33, Daniel P. Berrangé wrote:
> > On Wed, Mar 09, 2022 at 12:58:42PM +0100, Thomas Huth wrote:
> 
> > > Basically fine for me, but can we really run additional cirrus-ci jobs by
> > > default? IIRC the parallel execution of those were quite limited for the
> > > free tier, so did you look close that we don't run into additional 
> > > timeouts
> > > yet, due to delayed cirrus-ci jobs?
> > 
> > You can run 2 jobs in parallel in Cirrus. Beyond that they
> > get queued/serialized
> > 
> > We have a 1 hour job timeout.
> > 
> > We have to expect jobs will sometimes run slower than normal.
> > 
> > IOW if we have a job on Cirrus taking 30 minutes normally, we
> > expect it will sometimes take 45 minutes.
> > 
> > All this means that if we want Cirrus to be reliable and not
> > time out, we can really only have 2 jobs by default, unless
> > we can get the job execution time down to around 20 minutes
> > to allow for serialization.
> > 
> > We used to have terrible problems with cirrus timeouts when
> > we were running 4 jobs concurrently (2 on staging and 2 on
> > master). We addressed that in 9968de0a4a5470bd7b98dcd2fae5d5269908f16b
> > by disabling the jobs on master.
> > 
> > IOW, we really need to choose 1 macOS job and 1 FreeBSD job
> > and others need to be marked manual.
> 
> Not sure which job to choose yet. Per the first google hits we
> still want to cover Catalina first:
> https://www.statista.com/statistics/944559/worldwide-macos-version-market-share/

My general gut feeling is usually to prioritize testing older versions
as they tend to be more widely used, and we want to avoid regresions
on stuff that has been around the longest. Compatibility problems with
new releases tend to get reported by users/maintainers and would not
be regressions, but rather enhancements to support the new platform.

But it is not really an easy decision, as clearly we would prefer to
test both old and new if we had the ability.

Having one as a nightly job is a reasonable compromise. 

With regards,
Daniel
-- 
|: https://berrange.com  -o-https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o-https://fstop138.berrange.com :|
|: https://entangle-photo.org-o-https://www.instagram.com/dberrange :|




Re: [PATCH v7 22/22] gitlab-ci: Support macOS 12 via cirrus-run

2022-03-09 Thread Peter Maydell
On Wed, 9 Mar 2022 at 13:02, Daniel P. Berrangé  wrote:
>
> On Wed, Mar 09, 2022 at 01:50:34PM +0100, Philippe Mathieu-Daudé wrote:
> > Not sure which job to choose yet. Per the first google hits we
> > still want to cover Catalina first:
> > https://www.statista.com/statistics/944559/worldwide-macos-version-market-share/

...that page says that the figures for Catalina are "incorrectly
high", so not a very useful set of data for these purposes I think.

> My general gut feeling is usually to prioritize testing older versions
> as they tend to be more widely used, and we want to avoid regresions
> on stuff that has been around the longest. Compatibility problems with
> new releases tend to get reported by users/maintainers and would not
> be regressions, but rather enhancements to support the new platform.

Note that technically speaking Catalina (10.15) has already fallen off
the end of our support policy (which currently requires Big Sur (11)
and Monterey (12) support, as the two most recent versions). My
personal macos x86 laptop is still running Catalina, though,
because Big Sur dropped support for that hardware :-/
That suggests that maybe Big Sur would be what we should go for
now if we have to pick just one for the CI.

thanks
-- PMM



Re: [PATCH v2] block/nbd.c: Fixed IO request coroutine not being wakeup when kill NBD server

2022-03-09 Thread Vladimir Sementsov-Ogievskiy

09.03.2022 10:48, Rao Lei wrote:

During the IO stress test, the IO request coroutine has a probability that is
can't be awakened when the NBD server is killed.

The GDB stack is as follows:
(gdb) bt
0  0x7f2ff990cbf6 in __ppoll (fds=0x55575de85000, nfds=1, timeout=, sigmask=0x0) at ../sysdeps/unix/sysv/linux/ppoll.c:44
1  0x55575c302e7c in qemu_poll_ns (fds=0x55575de85000, nfds=1, 
timeout=59603140) at ../util/qemu-timer.c:348
2  0x55575c2d3c34 in fdmon_poll_wait (ctx=0x55575dc480f0, 
ready_list=0x7ffd9dd1dae0, timeout=59603140) at ../util/fdmon-poll.c:80
3  0x55575c2d350d in aio_poll (ctx=0x55575dc480f0, blocking=true) at 
../util/aio-posix.c:655
4  0x55575c16eabd in bdrv_do_drained_begin(bs=0x55575dee7fe0, 
recursive=false, parent=0x0, ignore_bds_parents=false, poll=true)at 
../block/io.c:474
5  0x55575c16eba6 in bdrv_drained_begin (bs=0x55575dee7fe0) at 
../block/io.c:480
6  0x55575c1aff33 in quorum_del_child (bs=0x55575dee7fe0, 
child=0x55575dcea690, errp=0x7ffd9dd1dd08) at ../block/quorum.c:1130
7  0x55575c14239b in bdrv_del_child (parent_bs=0x55575dee7fe0, 
child=0x55575dcea690, errp=0x7ffd9dd1dd08) at ../block.c:7705
8  0x55575c12da28 in qmp_x_blockdev_change(parent=0x55575df404c0 "colo-disk0", 
has_child=true, child=0x55575de867f0 "children.1", has_node=false, no   de=0x0, 
errp=0x7ffd9dd1dd08) at ../blockdev.c:3676
9  0x55575c258435 in qmp_marshal_x_blockdev_change (args=0x7f2fec008190, 
ret=0x7f2ff7b0bd98, errp=0x7f2ff7b0bd90) at qapi/qapi-commands-block-core.c   
:1675
10 0x55575c2c6201 in do_qmp_dispatch_bh (opaque=0x7f2ff7b0be30) at 
../qapi/qmp-dispatch.c:129
11 0x55575c2ebb1c in aio_bh_call (bh=0x55575dc429c0) at ../util/async.c:141
12 0x55575c2ebc2a in aio_bh_poll (ctx=0x55575dc480f0) at ../util/async.c:169
13 0x55575c2d2d96 in aio_dispatch (ctx=0x55575dc480f0) at 
../util/aio-posix.c:415
14 0x55575c2ec07f in aio_ctx_dispatch (source=0x55575dc480f0, callback=0x0, 
user_data=0x0) at ../util/async.c:311
15 0x7f2ff9e7cfbd in g_main_context_dispatch () at 
/lib/x86_64-linux-gnu/libglib-2.0.so.0
16 0x55575c2fd581 in glib_pollfds_poll () at ../util/main-loop.c:232
17 0x55575c2fd5ff in os_host_main_loop_wait (timeout=0) at 
../util/main-loop.c:255
18 0x55575c2fd710 in main_loop_wait (nonblocking=0) at 
../util/main-loop.c:531
19 0x55575bfa7588 in qemu_main_loop () at ../softmmu/runstate.c:726
20 0x55575bbee57a in main (argc=60, argv=0x7ffd9dd1e0e8, 
envp=0x7ffd9dd1e2d0) at ../softmmu/main.c:50

(gdb) qemu coroutine 0x55575e16aac0
0  0x55575c2ee7dc in qemu_coroutine_switch (from_=0x55575e16aac0, 
to_=0x7f2ff830fba0, action=COROUTINE_YIELD) at ../util/coroutine-ucontext.c:302
1  0x55575c2fe2a9 in qemu_coroutine_yield () at ../util/qemu-coroutine.c:195
2  0x55575c2fe93c in qemu_co_queue_wait_impl (queue=0x55575dc46170, 
lock=0x7f2b32ad9850) at ../util/qemu-coroutine-lock.c:56
3  0x55575c17ddfb in nbd_co_send_request (bs=0x55575ebfaf20, 
request=0x7f2b32ad9920, qiov=0x55575dfc15d8) at ../block/nbd.c:478
4  0x55575c17f931 in nbd_co_request (bs=0x55575ebfaf20, 
request=0x7f2b32ad9920, write_qiov=0x55575dfc15d8) at ../block/nbd.c:1182
5  0x55575c17fe14 in nbd_client_co_pwritev (bs=0x55575ebfaf20, 
offset=403487858688, bytes=4538368, qiov=0x55575dfc15d8, flags=0) at 
../block/nbd.c:1284
6  0x55575c170d25 in bdrv_driver_pwritev (bs=0x55575ebfaf20, 
offset=403487858688, bytes=4538368, qiov=0x55575dfc15d8, qiov_offset=0, flags=0)
 at ../block/io.c:1264
7  0x55575c1733b4 in bdrv_aligned_pwritev
 (child=0x55575dff6890, req=0x7f2b32ad9ad0, offset=403487858688, 
bytes=4538368, align=1, qiov=0x55575dfc15d8, qiov_offset=0, flags=0) at 
../block/io.c:2126
8  0x55575c173c67 in bdrv_co_pwritev_part (child=0x55575dff6890, 
offset=403487858688, bytes=4538368, qiov=0x55575dfc15d8, qiov_offset=0, flags=0)
 at ../block/io.c:2314
9  0x55575c17391b in bdrv_co_pwritev (child=0x55575dff6890, 
offset=403487858688, bytes=4538368, qiov=0x55575dfc15d8, flags=0) at 
../block/io.c:2233
10 0x55575c1ee506 in replication_co_writev (bs=0x55575e9824f0, 
sector_num=788062224, remaining_sectors=8864, qiov=0x55575dfc15d8, flags=0)
 at ../block/replication.c:270
11 0x55575c170eed in bdrv_driver_pwritev (bs=0x55575e9824f0, 
offset=403487858688, bytes=4538368, qiov=0x55575dfc15d8, qiov_offset=0, flags=0)
 at ../block/io.c:1297
12 0x55575c1733b4 in bdrv_aligned_pwritev
 (child=0x55575dcea690, req=0x7f2b32ad9e00, offset=403487858688, 
bytes=4538368, align=512, qiov=0x55575dfc15d8, qiov_offset=0, flags=0)
 at ../block/io.c:2126
13 0x55575c173c67 in bdrv_co_pwritev_part (child=0x55575dcea690, 
offset=403487858688, bytes=4538368, qiov=0x55575dfc15d8, qiov_offset=0, flags=0)
 at ../block/io.c:2314
14 0x55575c17391b in bdrv_co_pwritev (child=0x55575dcea690, 
offset=403487858688, bytes=4538368, qiov=0x55575dfc15d8, flags=0) at 
../block/io.c:2233
15 0x55575c1aeffa

Re: [RFC PATCH 0/5] Removal of AioContext lock, bs->parents and ->children: proof of concept

2022-03-09 Thread Emanuele Giuseppe Esposito



Am 02/03/2022 um 12:07 schrieb Vladimir Sementsov-Ogievskiy:
> 01.03.2022 17:21, Emanuele Giuseppe Esposito wrote:
>> This serie tries to provide a proof of concept and a clear explanation
>> on why we need to use drains (and more precisely subtree_drains)
>> to replace the aiocontext lock, especially to protect BlockDriverState
>> ->children and ->parent lists.
>>
>> Just a small recap on the key concepts:
>> * We split block layer APIs in "global state" (GS), "I/O", and
>> "global state or I/O".
>>    GS are running in the main loop, under BQL, and are the only
>>    one allowed to modify the BlockDriverState graph.
>>
>>    I/O APIs are thread safe and can run in any thread
>>
>>    "global state or I/O" are essentially all APIs that use
>>    BDRV_POLL_WHILE. This is because there can be only 2 threads
>>    that can use BDRV_POLL_WHILE: main loop and the iothread that
>>    runs the aiocontext.
>>
>> * Drains allow the caller (either main loop or iothread running
>> the context) to wait all in_flights requests and operations
>> of a BDS: normal drains target a given node and is parents, while
>> subtree ones also include the subgraph of the node. Siblings are
>> not affected by any of these two kind of drains.
>> After bdrv_drained_begin, no more request is allowed to come
>> from the affected nodes. Therefore the only actor left working
>> on a drained part of the graph should be the main loop.
>>
>> What do we intend to do
>> ---
>> We want to remove the AioContext lock. It is not 100% clear on how
>> many things we are protecting with it, and why.
>> As a starter, we want to protect BlockDriverState ->parents and
>> ->children lists, since they are read by main loop and I/O but
>> only written by main loop under BQL. The function that modifies
>> these lists is bdrv_replace_child_common().
>>
>> How do we want to do it
>> ---
>> We individuated as ideal subtitute of AioContext lock
>> the subtree_drain API. The reason is simple: draining prevents the
>> iothread to read or write the nodes, so once the main loop finishes
> 
> I'm not sure it's ideal. Unfortunately I'm not really good in all that
> BQL, AioContext locks and drains. So, I can't give good advice. But here
> are my doubts:
> 
> Draining is very restrictive measure: even if drained section is very
> short, at least on bdrv_drained_begin() we have to wait for all current
> requests, and don't start new ones. That slows down the guest. 

I don't think we are in a critical path where performance is important here.

In the
> same time there are operations that don't require to stop guest IO
> requests. For example manipulation with dirty bitmaps - qmp commands
> block-dirty-bitmap-add block-dirty-bitmap-remove. Or different query
> requests..
>

Maybe you misunderstood or I was not 100% clear, but I am talking about 
replacing the AioContext lock for the ->parents and ->children instance. Not 
everywhere. This is the first step, and then we will see if the additional 
things that it protects can use drain or something else

 
> I see only two real cases, where we do need drain:
> 
> 1. When we need a consistent "point-in-time". For example, when we start
> backup in transaction with some dirty-bitmap manipulation commands.
> 
> 2. When we need to modify block-graph: if we are going to break relation
> A -> B, there must not be any in-flight request that want to use this
> relation.

That's the use case I am considering.
> 
> All other operations, for which we want some kind of lock (like
> AioContext lock or something) we actually don't want to stop guest IO.

Yes, they have to be analyzed case by case.
> 
> 
> Next, I have a problem in mind, that in past lead to a lot of iotest 30
> failures. Next there were different fixes and improvements, but the core
> problem (as far as I understand) is still here: nothing protects us when
> we are in some graph modification process (for example block-job
> finalization) do yield, switch to other coroutine and enter another
> graph modification process (for example, another block-job finaliztion)..

That's another point to consider. I don't really have a solution for this.

> (for details look at my old "[PATCH RFC 0/5] Fix accidental crash in
> iotest 30" 
> https://lists.nongnu.org/archive/html/qemu-devel/2020-11/msg05290.html ,
> where I suggested to add a global graph_modify_mutex CoMutex, to be held
> during graph-modifying process that may yield)..
> Does your proposal solve this problem?
> 
> 
>> executing bdrv_drained_begin() on the interested graph, we are sure that
>> the iothread is not going to look or even interfere with that part of
>> the graph.
>> We are also sure that the only two actors that can look at a specific
>> BlockDriverState in any given context are the main loop and the
>> iothread running the AioContext (ensured by "global state or IO" logic).
>>
>> Why use _subtree_ instead of normal drain
>> --

Re: [RFC PATCH 0/5] Removal of AioContext lock, bs->parents and ->children: proof of concept

2022-03-09 Thread Emanuele Giuseppe Esposito



Am 02/03/2022 um 10:47 schrieb Stefan Hajnoczi:
> On Tue, Mar 01, 2022 at 09:21:08AM -0500, Emanuele Giuseppe Esposito wrote:
>> This serie tries to provide a proof of concept and a clear explanation
>> on why we need to use drains (and more precisely subtree_drains)
>> to replace the aiocontext lock, especially to protect BlockDriverState
>> ->children and ->parent lists.
>>
>> Just a small recap on the key concepts:
>> * We split block layer APIs in "global state" (GS), "I/O", and
>> "global state or I/O".
>>   GS are running in the main loop, under BQL, and are the only
>>   one allowed to modify the BlockDriverState graph.
>>
>>   I/O APIs are thread safe and can run in any thread
>>
>>   "global state or I/O" are essentially all APIs that use
>>   BDRV_POLL_WHILE. This is because there can be only 2 threads
>>   that can use BDRV_POLL_WHILE: main loop and the iothread that
>>   runs the aiocontext.
>>
>> * Drains allow the caller (either main loop or iothread running
>> the context) to wait all in_flights requests and operations
>> of a BDS: normal drains target a given node and is parents, while
>> subtree ones also include the subgraph of the node. Siblings are
>> not affected by any of these two kind of drains.
> 
> Siblings are drained to the extent required for their parent node to
> reach in_flight == 0.
> 
> I haven't checked the code but I guess the case you're alluding to is
> that siblings with multiple parents could have other I/O in flight that
> will not be drained and further I/O can be submitted after the parent
> has drained?

Yes, this in theory can happen. I don't really know if this happens
practically, and how likely is to happen.

The alternative would be to make a drain that blocks the whole graph,
siblings included, but that would probably be an overkill.

> 
>> After bdrv_drained_begin, no more request is allowed to come
>> from the affected nodes. Therefore the only actor left working
>> on a drained part of the graph should be the main loop.
> 
> It's worth remembering that this invariant is not enforced. Draining is
> a cooperative scheme. Everything *should* be notified and stop
> submitting I/O, but there is no assertion or return -EBUSY if a request
> is submitted while the BDS is drained.

Yes, it is a cooperative scheme and all except from mirror (fixed in the
last patch) seem to follow the invariant.
> 
> If the thread that drained the BDS wants, I think it can (legally)
> submit I/O within the drained section.
> 

Yes but at some point the main loop drains too before changing the
graph. Then the thread must be stopped, according with the invariant
above. So it doesn't matter if the thread has already drained or not.

>> What do we intend to do
>> ---
>> We want to remove the AioContext lock. It is not 100% clear on how
>> many things we are protecting with it, and why.
>> As a starter, we want to protect BlockDriverState ->parents and
>> ->children lists, since they are read by main loop and I/O but
>> only written by main loop under BQL. The function that modifies
>> these lists is bdrv_replace_child_common().
>>
>> How do we want to do it
>> ---
>> We individuated as ideal subtitute of AioContext lock
>> the subtree_drain API. The reason is simple: draining prevents the iothread 
>> to read or write the nodes, so once the main loop finishes
>> executing bdrv_drained_begin() on the interested graph, we are sure that
>> the iothread is not going to look or even interfere with that part of the 
>> graph.
>> We are also sure that the only two actors that can look at a specific
>> BlockDriverState in any given context are the main loop and the
>> iothread running the AioContext (ensured by "global state or IO" logic).
>>
>> Why use _subtree_ instead of normal drain
>> -
>> A simple drain "blocks" a given node and all its parents.
>> But it doesn't touch the child.
>> This means that if we use a simple drain, a child can always
>> keep processing requests, and eventually end up calling itself
>> bdrv_drained_begin, ending up reading the parents node while the main loop
>> is modifying them. Therefore a subtree drain is necessary.
>>
>> Possible scenarios
>> ---
>> Keeping in mind that we can only have an iothread and the main loop
>> draining on a certain node, we could have:
>>
>> main loop successfully drains and then iothread tries to drain:
>>   impossible scenario, as iothread is already stopped once main
>>   successfully drains.
>>
>> iothread successfully drains and then main loop drains:
>>   should not be a problem, as:
>>   1) the iothread should be already "blocked" by its own drain
> 
> Once drained_begin() returns in the IOThread, the IOThread can do
> anything it wants, including more submitting I/O. I don't consider that
> "blocked", so I'm not sure what this sentence means?
> 
> The way the main loop thread protects itself against the IOThread is via
> the a

Re: [PATCH v7 22/22] gitlab-ci: Support macOS 12 via cirrus-run

2022-03-09 Thread Philippe Mathieu-Daudé

On 9/3/22 14:09, Peter Maydell wrote:

On Wed, 9 Mar 2022 at 13:02, Daniel P. Berrangé  wrote:


On Wed, Mar 09, 2022 at 01:50:34PM +0100, Philippe Mathieu-Daudé wrote:

Not sure which job to choose yet. Per the first google hits we
still want to cover Catalina first:
https://www.statista.com/statistics/944559/worldwide-macos-version-market-share/


...that page says that the figures for Catalina are "incorrectly
high", so not a very useful set of data for these purposes I think.


My general gut feeling is usually to prioritize testing older versions
as they tend to be more widely used, and we want to avoid regresions
on stuff that has been around the longest. Compatibility problems with
new releases tend to get reported by users/maintainers and would not
be regressions, but rather enhancements to support the new platform.


Note that technically speaking Catalina (10.15) has already fallen off
the end of our support policy (which currently requires Big Sur (11)
and Monterey (12) support, as the two most recent versions). My
personal macos x86 laptop is still running Catalina, though,
because Big Sur dropped support for that hardware :-/
That suggests that maybe Big Sur would be what we should go for
now if we have to pick just one for the CI.


OK. Since I'm running Monterey (12) on my workstation, I'll make this
job manual / nightly, and keep the Big Sur (11) on CI.

I'll let Daniel / Thomas / Alex decide which FreeBSD job to demote to
manual.

Regards,

Phil.



Re: [RFC PATCH 2/5] introduce BDRV_POLL_WHILE_UNLOCKED

2022-03-09 Thread Eric Blake
On Tue, Mar 01, 2022 at 09:21:10AM -0500, Emanuele Giuseppe Esposito wrote:
> Same as BDRV_POLL_WHILE, but uses AIO_WAIT_WHILE_UNLOCKED.
> See doc comment for more info.

This sentence implies there is a doc comment...

> 
> Signed-off-by: Emanuele Giuseppe Esposito 
> ---
>  include/block/block.h | 5 +
>  1 file changed, 5 insertions(+)
> 
> diff --git a/include/block/block.h b/include/block/block.h
> index e1713ee306..5a7a850c16 100644
> --- a/include/block/block.h
> +++ b/include/block/block.h
> @@ -512,6 +512,11 @@ void bdrv_drain_all(void);
>  AIO_WAIT_WHILE(bdrv_get_aio_context(bs_),  \
> cond); })
>  
> +#define BDRV_POLL_WHILE_UNLOCKED(bs, cond) ({  \
> +BlockDriverState *bs_ = (bs);  \
> +AIO_WAIT_WHILE_UNLOCKED(bdrv_get_aio_context(bs_), \
> +cond); })
> +

but none is added here.  I'm presuming that the comment is already in
the file (not shown in the limited context view here), but it may be
worth tweaking the commit message to mention that.

>  int generated_co_wrapper bdrv_pdiscard(BdrvChild *child, int64_t offset,
> int64_t bytes);
>  int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
> -- 
> 2.31.1
> 
> 

-- 
Eric Blake, Principal Software Engineer
Red Hat, Inc.   +1-919-301-3266
Virtualization:  qemu.org | libvirt.org




Re: [PATCH] tests/qemu-iotests: Use GNU sed in two more spots where it is necessary

2022-03-09 Thread Eric Blake
On Wed, Mar 09, 2022 at 11:16:26AM +0100, Thomas Huth wrote:
> These two spots have been missed in commit 9086c7639822 ("Rework the
> checks and spots using GNU sed") - they need GNU sed, too, since they
> are using the "+" address form.
> 
> Signed-off-by: Thomas Huth 
> ---
>  tests/qemu-iotests/common.filter | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)

Simpler than rewriting as [0-9][0-9]* for non-GNU sed.

Reviewed-by: Eric Blake 

-- 
Eric Blake, Principal Software Engineer
Red Hat, Inc.   +1-919-301-3266
Virtualization:  qemu.org | libvirt.org




Re: [PATCH 2/3] block/copy-before-write: add on-cbw-error open parameter

2022-03-09 Thread Vladimir Sementsov-Ogievskiy

01.03.2022 23:59, Vladimir Sementsov-Ogievskiy wrote:

@@ -273,9 +311,9 @@ cbw_co_snapshot_block_status(BlockDriverState *bs,
  assert(ret & BDRV_BLOCK_ALLOCATED);
  }
  
-cbw_snapshot_read_unlock(bs, req);

+ret2 = cbw_snapshot_read_unlock(bs, req);
  
-return ret;

+return ret < 0 ? ret : ret2;


Oops. On success we should return ret, as it's a block-status


--
Best regards,
Vladimir



Re: [PATCH v2 0/6] Support exporting BDSs via VDUSE

2022-03-09 Thread Stefan Hajnoczi
On Tue, Feb 15, 2022 at 06:59:37PM +0800, Xie Yongji wrote:
> Hi all,
> 
> Last few months ago, VDUSE (vDPA Device in Userspace) [1] has
> been merged into Linux kernel as a framework that make it
> possible to emulate a vDPA device in userspace. This series
> aimed at implementing a VDUSE block backend based on the
> qemu-storage-daemon infrastructure.
> 
> To support that, we firstly introduce a VDUSE library as a
> subproject (like what libvhost-user does) to help implementing
> VDUSE backends in QEMU. Then a VDUSE block export is implemented
> based on this library. At last, we add resize and reconnect support
> to the VDUSE block export and VDUSE library.
> 
> Since we don't support vdpa-blk in QEMU currently, the VM case is
> tested with my previous patchset [2].
> 
> [1] https://www.kernel.org/doc/html/latest/userspace-api/vduse.html
> [2] https://www.mail-archive.com/qemu-devel@nongnu.org/msg797569.html
> 
> Please review, thanks!

Sorry for the delay. I will review this on Monday.

Stefan


signature.asc
Description: PGP signature


Re: [PATCH] tests/qemu-iotests: Use GNU sed in two more spots where it is necessary

2022-03-09 Thread Philippe Mathieu-Daudé

On 9/3/22 11:16, Thomas Huth wrote:

These two spots have been missed in commit 9086c7639822 ("Rework the
checks and spots using GNU sed") - they need GNU sed, too, since they
are using the "+" address form.

Signed-off-by: Thomas Huth 
---
  tests/qemu-iotests/common.filter | 4 ++--
  1 file changed, 2 insertions(+), 2 deletions(-)


Reviewed-by: Philippe Mathieu-Daudé 



Re: [PATCH 2/4] block/block-copy: block_copy(): add timeout_ns parameter

2022-03-09 Thread Vladimir Sementsov-Ogievskiy

02.03.2022 19:24, Vladimir Sementsov-Ogievskiy wrote:

Add possibility to limit block_copy() call in time. To be used in the
next commit.

Signed-off-by: Vladimir Sementsov-Ogievskiy
---
  include/block/block-copy.h |  2 +-
  block/block-copy.c | 28 
  2 files changed, 21 insertions(+), 9 deletions(-)


block_copy() call in block/copy-before-write.c should be updated to not break 
compilation by this commit

--
Best regards,
Vladimir



[PATCH v4] tests: Do not treat the iotests as separate meson test target anymore

2022-03-09 Thread Thomas Huth
If there is a failing iotest, the output is currently not logged to
the console anymore. To get this working again, we need to run the
meson test runner with "--print-errorlogs" (and without "--verbose"
due to a current meson bug that will be fixed here:
https://github.com/mesonbuild/meson/commit/c3f145ca2b9f5.patch ).
We could update the "meson test" call in tests/Makefile.include,
but actually it's nicer and easier if we simply do not treat the
iotests as separate test target anymore and integrate them along
with the other test suites. This has the disadvantage of not getting
the detailed progress indication there anymore, but since that was
only working right in single-threaded "make -j1" mode anyway, it's
not a huge loss right now.

Signed-off-by: Thomas Huth 
---
 v4: updated commit description

 meson.build| 6 +++---
 scripts/mtest2make.py  | 4 
 tests/Makefile.include | 9 +
 3 files changed, 4 insertions(+), 15 deletions(-)

diff --git a/meson.build b/meson.build
index 2d6601467f..4f5b9f5ec5 100644
--- a/meson.build
+++ b/meson.build
@@ -3,9 +3,9 @@ project('qemu', ['c'], meson_version: '>=0.59.3',
   'b_staticpic=false', 'stdsplit=false'],
 version: files('VERSION'))
 
-add_test_setup('quick', exclude_suites: ['block', 'slow', 'thorough'], 
is_default: true)
-add_test_setup('slow', exclude_suites: ['block', 'thorough'], env: 
['G_TEST_SLOW=1', 'SPEED=slow'])
-add_test_setup('thorough', exclude_suites: ['block'], env: ['G_TEST_SLOW=1', 
'SPEED=thorough'])
+add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true)
+add_test_setup('slow', exclude_suites: ['thorough'], env: ['G_TEST_SLOW=1', 
'SPEED=slow'])
+add_test_setup('thorough', env: ['G_TEST_SLOW=1', 'SPEED=thorough'])
 
 not_found = dependency('', required: false)
 keyval = import('keyval')
diff --git a/scripts/mtest2make.py b/scripts/mtest2make.py
index 4d542e8aaa..304634b71e 100644
--- a/scripts/mtest2make.py
+++ b/scripts/mtest2make.py
@@ -101,10 +101,6 @@ def emit_suite(name, suite, prefix):
 testsuites = defaultdict(Suite)
 for test in introspect['tests']:
 process_tests(test, targets, testsuites)
-# HACK: check-block is a separate target so that it runs with --verbose;
-# only write the dependencies
-emit_suite_deps('block', testsuites['block'], 'check')
-del testsuites['block']
 emit_prolog(testsuites, 'check')
 for name, suite in testsuites.items():
 emit_suite(name, suite, 'check')
diff --git a/tests/Makefile.include b/tests/Makefile.include
index e7153c8e91..b89018cdcc 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -147,16 +147,9 @@ check-acceptance: check-acceptance-deprecated-warning | 
check-avocado
 
 # Consolidated targets
 
-.PHONY: check-block check check-clean get-vm-images
+.PHONY: check check-clean get-vm-images
 check:
 
-ifneq ($(.check-block.deps),)
-check: check-block
-check-block: run-ninja
-   $(if $(MAKE.n),,+)$(MESON) test $(MTESTARGS) $(.mtestargs) --verbose \
-   --logbase iotestslog $(call .speed.$(SPEED), block block-slow 
block-thorough)
-endif
-
 check-build: run-ninja
 
 check-clean:
-- 
2.27.0