Re: [PATCH 04/10] tests/avocado: machine aarch64: standardize location and RO/RW access

2023-12-13 Thread Cleber Rosa
Alex Bennée  writes:

> Cleber Rosa  writes:
>
>> The tests under machine_aarch64_virt.py do not need read-write access
>> to the ISOs.  The ones under machine_aarch64_sbsaref.py, on the other
>> hand, will need read-write access, so let's give each test an unique
>> file.
>
> I think we are making two separate changes here so probably best split
> the patch.
>

Sure, but, do you mean separating the "readonly=on" and the "writable
file" changes?  Or separating those two from the ISO url code style
change?

>> And while at it, let's use a single code style and hash for the ISO
>> url.
>>
>> Signed-off-by: Cleber Rosa 
>> ---
>>  tests/avocado/machine_aarch64_sbsaref.py |  9 +++--
>>  tests/avocado/machine_aarch64_virt.py| 14 +++---
>>  2 files changed, 14 insertions(+), 9 deletions(-)
>>
>> diff --git a/tests/avocado/machine_aarch64_sbsaref.py 
>> b/tests/avocado/machine_aarch64_sbsaref.py
>> index 528c7d2934..6ae84d77ac 100644
>> --- a/tests/avocado/machine_aarch64_sbsaref.py
>> +++ b/tests/avocado/machine_aarch64_sbsaref.py
>> @@ -7,6 +7,7 @@
>>  # SPDX-License-Identifier: GPL-2.0-or-later
>>  
>>  import os
>> +import shutil
>>  
>>  from avocado import skipUnless
>>  from avocado.utils import archive
>> @@ -123,13 +124,15 @@ def boot_alpine_linux(self, cpu):
>>  
>>  iso_hash = 
>> "5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027"
>>  iso_path = self.fetch_asset(iso_url, algorithm="sha256", 
>> asset_hash=iso_hash)
>> +iso_path_rw = os.path.join(self.workdir, os.path.basename(iso_path))
>> +shutil.copy(iso_path, iso_path_rw)
>>  
>>  self.vm.set_console()
>>  self.vm.add_args(
>>  "-cpu",
>>  cpu,
>>  "-drive",
>> -f"file={iso_path},format=raw",
>> +f"file={iso_path_rw},format=raw",
>
> Instead of copying why not add ",snapshot=on" to preserve the original
> image. We don't want to persist data between tests.
>
>>  "-device",
>>  "virtio-rng-pci,rng=rng0",
>>  "-object",
>> @@ -170,13 +173,15 @@ def boot_openbsd73(self, cpu):
>>  
>>  img_hash = 
>> "7fc2c75401d6f01fbfa25f4953f72ad7d7c18650056d30755c44b9c129b707e5"
>>  img_path = self.fetch_asset(img_url, algorithm="sha256", 
>> asset_hash=img_hash)
>> +img_path_rw = os.path.join(self.workdir, os.path.basename(img_path))
>> +shutil.copy(img_path, img_path_rw)
>>  
>>  self.vm.set_console()
>>  self.vm.add_args(
>>  "-cpu",
>>  cpu,
>>  "-drive",
>> -f"file={img_path},format=raw",
>> +f"file={img_path_rw},format=raw",
>
> ditto.
>
>
>>  "-device",
>>  "virtio-rng-pci,rng=rng0",
>>  "-object",
>> diff --git a/tests/avocado/machine_aarch64_virt.py 
>> b/tests/avocado/machine_aarch64_virt.py
>> index a90dc6ff4b..093d68f837 100644
>> --- a/tests/avocado/machine_aarch64_virt.py
>> +++ b/tests/avocado/machine_aarch64_virt.py
>> @@ -37,13 +37,13 @@ def test_alpine_virt_tcg_gic_max(self):
>>  :avocado: tags=machine:virt
>>  :avocado: tags=accel:tcg
>>  """
>> -iso_url = ('https://dl-cdn.alpinelinux.org/'
>> -   'alpine/v3.17/releases/aarch64/'
>> -   'alpine-standard-3.17.2-aarch64.iso')
>> +iso_url = (
>> +"https://dl-cdn.alpinelinux.org/;
>> +
>> "alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso"
>> +)
>>  
>> -# Alpine use sha256 so I recalculated this myself
>> -iso_sha1 = '76284fcd7b41fe899b0c2375ceb8470803eea839'
>> -iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha1)
>> +iso_hash = 
>> "5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027"
>> +iso_path = self.fetch_asset(iso_url, algorithm="sha256", 
>> asset_hash=iso_hash)
>>  
>>  self.vm.set_console()
>>  kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
>> @@ -60,7 +60,7 @@ def test_alpine_virt_tcg_gic_max(self):
>>  self.vm.add_args("-smp", "2", "-m", "1024")
>>  self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios',
>> 'edk2-aarch64-code.fd'))
>> -self.vm.add_args("-drive", f"file={iso_path},format=raw")
>> +self.vm.add_args("-drive",
>>  f"file={iso_path},readonly=on,format=raw")
>
> Perhaps we can set ",media=cdrom" here.
>

Yes, but more importantly, adding both "readonly=on" and "media=cdrom"
to the tests under machine_aarch64_sbsaref.py do the trick.  Now, the
behavior explained in my previous response still warrants investigation
IMO.

Thanks
- Cleber.




Re: [PATCH 04/10] tests/avocado: machine aarch64: standardize location and RO/RW access

2023-12-13 Thread Cleber Rosa
Marcin Juszkiewicz  writes:

> W dniu 8.12.2023 o 20:09, Cleber Rosa pisze:
>> The tests under machine_aarch64_virt.py do not need read-write access
>> to the ISOs.  The ones under machine_aarch64_sbsaref.py, on the other
>> hand, will need read-write access, so let's give each test an unique
>> file.
>> 
>> And while at it, let's use a single code style and hash for the ISO
>> url.
>> 
>> Signed-off-by: Cleber Rosa
>
> It is ISO file, so sbsa-ref tests should be fine with readonly as well.
>
> Nothing gets installed so nothing is written. We only test does boot works.

That was my original expectation too.  But, with nothing but the
following change:

diff --git a/tests/avocado/machine_aarch64_sbsaref.py 
b/tests/avocado/machine_aarch64_sbsaref.py
index 528c7d2934..436da4b156 100644
--- a/tests/avocado/machine_aarch64_sbsaref.py
+++ b/tests/avocado/machine_aarch64_sbsaref.py
@@ -129,7 +129,7 @@ def boot_alpine_linux(self, cpu):
 "-cpu",
 cpu,
 "-drive",
-f"file={iso_path},format=raw",
+f"file={iso_path},readonly=on,format=raw",
 "-device",
 "virtio-rng-pci,rng=rng0",
 "-object",

We get:

15:55:10 DEBUG| VM launch command: './qemu-system-aarch64 -display none -vga 
none -chardev socket,id=mon,fd=15 -mon chardev=mon,mode=control -machine 
sbsa-ref -
chardev socket,id=console,fd=20 -serial chardev:console -cpu cortex-a57 -drive 
if=pflash,file=/home/cleber/avocado/job-results/job-2023-12-13T15.55-28ef2b5/test
-results/tmp_dirx8p5xzt4/1-tests_avocado_machine_aarch64_sbsaref.py_Aarch64SbsarefMachine.test_sbsaref_alpine_linux_cortex_a57/SBSA_FLASH0.fd,format=raw
 -drive 
if=pflash,file=/home/cleber/avocado/job-results/job-2023-12-13T15.55-28ef2b5/test-results/tmp_dirx8p5xzt4/1-tests_avocado_machine_aarch64_sbsaref.py_Aarch64Sbsa
refMachine.test_sbsaref_alpine_linux_cortex_a57/SBSA_FLASH1.fd,format=raw -smp 
1 -machine sbsa-ref -cpu cortex-a57 -drive 
file=/home/cleber/avocado/data/cache/b
y_location/0154b7cd3a4f5e135299060c8cabbeec10b70b6d/alpine-standard-3.17.2-aarch64.iso,readonly=on,format=raw
 -device virtio-rng-pci,rng=rng0 -object rng-random
,id=rng0,filename=/dev/urandom'

Followed by:

15:55:10 DEBUG| Failed to establish session:
  | Traceback (most recent call last):
  |   File "/home/cleber/src/qemu/python/qemu/qmp/protocol.py", line 425, in 
_session_guard
  | await coro
  |   File "/home/cleber/src/qemu/python/qemu/qmp/qmp_client.py", line 253, in 
_establish_session
  | await self._negotiate()
  |   File "/home/cleber/src/qemu/python/qemu/qmp/qmp_client.py", line 305, in 
_negotiate
  | reply = await self._recv()
  | ^^
  |   File "/home/cleber/src/qemu/python/qemu/qmp/protocol.py", line 1009, in 
_recv
  | message = await self._do_recv()
  |   ^
  |   File "/home/cleber/src/qemu/python/qemu/qmp/qmp_client.py", line 402, in 
_do_recv
  | msg_bytes = await self._readline()
  | ^^
  |   File "/home/cleber/src/qemu/python/qemu/qmp/protocol.py", line 977, in 
_readline
  | raise EOFError
  | EOFError

With qemu-system-arch producing on stdout:

   qemu-system-aarch64: Block node is read-only

Any ideas on the reason or cause?

Thanks,
- Cleber.




Re: [PATCH 03/10] tests/avocado/intel_iommu.py: increase timeout

2023-12-13 Thread Cleber Rosa
Alex Bennée  writes:

> Cleber Rosa  writes:
>
>> Based on many runs, the average run time for these 4 tests is around
>> 250 seconds, with 320 seconds being the ceiling.  In any way, the
>> default 120 seconds timeout is inappropriate in my experience.
>
> I would rather see these tests updated to fix:
>
>  - Don't use such an old Fedora 31 image

I remember proposing a bump in Fedora version used by default in
avocado_qemu.LinuxTest (which would propagate to tests such as
boot_linux.py and others), but that was not well accepted.  I can
definitely work on such a version bump again.

>  - Avoid updating image packages (when will RH stop serving them?)

IIUC the only reason for updating the packages is to test the network
from the guest, and could/should be done another way.

Eric, could you confirm this?

>  - The "test" is a fairly basic check of dmesg/sysfs output

Maybe the network is also an implicit check here.  Let's see what Eric
has to say.

>
> I think building a buildroot image with the tools pre-installed (with
> perhaps more testing) would be a better use of our limited test time.
>
> FWIW the runtime on my machine is:
>
> ➜  env QEMU_TEST_FLAKY_TESTS=1 ./pyvenv/bin/avocado run 
> ./tests/avocado/intel_iommu.py
> JOB ID : 5c582ccf274f3aee279c2208f969a7af8ceb9943
> JOB LOG: 
> /home/alex/avocado/job-results/job-2023-12-11T16.53-5c582cc/job.log
>  (1/4) ./tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu: PASS 
> (44.21 s)
>  (2/4) ./tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu_strict: 
> PASS (78.60 s)
>  (3/4) ./tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu_strict_cm: 
> PASS (65.57 s)
>  (4/4) ./tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu_pt: PASS 
> (66.63 s)
> RESULTS: PASS 4 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | 
> CANCEL 0
> JOB TIME   : 255.43 s
>

Yes, I've also seen similar runtimes in other environments... so it
looks like it depends a lot on the "dnf -y install numactl-devel".  If
that can be removed, the tests would have much more predictable runtimes.




Re: [PATCH 02/10] tests/avocado: mips: add hint for fetchasset plugin

2023-12-13 Thread Cleber Rosa
Akihiko Odaki  writes:

> On 2023/12/09 4:09, Cleber Rosa wrote:
>> Avocado's fetchasset plugin runs before the actual Avocado job (and
>> any test).  It analyses the test's code looking for occurrences of
>> "self.fetch_asset()" in the either the actual test or setUp() method.
>> It's not able to fully analyze all code, though.
>> 
>> The way these tests are written, make the fetchasset plugin blind to
>> the assets.  This adds redundant code, true, but one that doesn't hurt
>> the test and aids the fetchasset plugin to download or verify the
>> existence of these assets in advance.
>> 
>> Signed-off-by: Cleber Rosa 
>
> Why not delete fetch_asset() in do_test_mips_malta32el_nanomips()?

I was trying to preserve do_test_mips_malta32el_nanomips() in such a way
that with the eventual migration to the "dependency" system in newer
Avocado, the lines added here could simply be reversed.

But, that's not a strong enough reason to justify the duplication.  I'll
follow your suggestion on v2.

Thanks!
- Cleber.




[PATCH 00/10] for-8.3 tests/avocado: prep for Avocado 103.0 LTS

2023-12-08 Thread Cleber Rosa
This is a collection of improvements to a number of Avocado based
tests, but also fixes that will allow them to behave properly under
Avocado's upcoming new Long Term Stability release (LTS) version
103.0.

A pipeline with (pretty much) these changes can be seen at:
  - https://gitlab.com/cleber.gnu/qemu/-/pipelines/1096168899

While a pipeline with the Avocado version bump (using a preview of the
103.0 release) can be seen at:
  - https://gitlab.com/cleber.gnu/qemu/-/pipelines/1099488480

Once Avocado officially releases 103.0 LTS, which is expected to take
no longer than 2 weeks (after a huge development window), the actual
version bump will be posted, along with more profound changes to the
tests to leverage the new features.

Cleber Rosa (10):
  tests/avocado: mips: fallback to HTTP given certificate expiration
  tests/avocado: mips: add hint for fetchasset plugin
  tests/avocado/intel_iommu.py: increase timeout
  tests/avocado: machine aarch64: standardize location and RO/RW access
  tests/avocado: use more distinct names for assets
  tests/avocado/kvm_xen_guest.py: cope with asset RW requirements
  testa/avocado: test_arm_emcraft_sf2: handle RW requirements for asset
  tests/avocado/boot_xen.py: merge base classes
  tests/avocado/boot_xen.py: unify tags
  tests/avocado/boot_xen.py: use class attribute

 tests/avocado/boot_linux_console.py  | 27 +++
 tests/avocado/boot_xen.py| 34 +---
 tests/avocado/intel_iommu.py |  2 ++
 tests/avocado/kvm_xen_guest.py   | 30 ++---
 tests/avocado/machine_aarch64_sbsaref.py |  9 +--
 tests/avocado/machine_aarch64_virt.py| 14 +-
 tests/avocado/netdev-ethtool.py  |  3 ++-
 7 files changed, 67 insertions(+), 52 deletions(-)

-- 
2.43.0




[PATCH 02/10] tests/avocado: mips: add hint for fetchasset plugin

2023-12-08 Thread Cleber Rosa
Avocado's fetchasset plugin runs before the actual Avocado job (and
any test).  It analyses the test's code looking for occurrences of
"self.fetch_asset()" in the either the actual test or setUp() method.
It's not able to fully analyze all code, though.

The way these tests are written, make the fetchasset plugin blind to
the assets.  This adds redundant code, true, but one that doesn't hurt
the test and aids the fetchasset plugin to download or verify the
existence of these assets in advance.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/boot_linux_console.py | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/tests/avocado/boot_linux_console.py 
b/tests/avocado/boot_linux_console.py
index 8066861c17..f5c5d647a4 100644
--- a/tests/avocado/boot_linux_console.py
+++ b/tests/avocado/boot_linux_console.py
@@ -303,6 +303,11 @@ def test_mips_malta32el_nanomips_4k(self):
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page4k.xz')
 kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6'
+
+# The following line is a no-op that aids the avocado
+# fetchasset plugin that runs before any portion of the test
+self.fetch_asset(kernel_url, asset_hash=kernel_hash)
+
 self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash)
 
 def test_mips_malta32el_nanomips_16k_up(self):
@@ -316,6 +321,11 @@ def test_mips_malta32el_nanomips_16k_up(self):
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page16k_up.xz')
 kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc'
+
+# The following line is a no-op that aids the avocado
+# fetchasset plugin that runs before any portion of the test
+self.fetch_asset(kernel_url, asset_hash=kernel_hash)
+
 self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash)
 
 def test_mips_malta32el_nanomips_64k_dbg(self):
@@ -329,6 +339,11 @@ def test_mips_malta32el_nanomips_64k_dbg(self):
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page64k_dbg.xz')
 kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180'
+
+# The following line is a no-op that aids the avocado
+# fetchasset plugin that runs before any portion of the test
+self.fetch_asset(kernel_url, asset_hash=kernel_hash)
+
 self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash)
 
 def test_aarch64_xlnx_versal_virt(self):
-- 
2.43.0




[PATCH 06/10] tests/avocado/kvm_xen_guest.py: cope with asset RW requirements

2023-12-08 Thread Cleber Rosa
Some of these tests actually require the root filesystem image,
obtained through Avocado's asset feature and kept in a common cache
location, to be writable.

This makes a distinction between the tests that actually have this
requirement and those who don't.  The goal is to be as safe as
possible, avoiding causing cache misses (because the assets get
modified and thus need to be dowloaded again) while avoid copying the
root filesystem backing file whenever possible.

This also allow these tests to be run in parallel with newer Avocado
versions.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/kvm_xen_guest.py | 27 ++-
 1 file changed, 18 insertions(+), 9 deletions(-)

diff --git a/tests/avocado/kvm_xen_guest.py b/tests/avocado/kvm_xen_guest.py
index ec4052a1fe..d73fa888ef 100644
--- a/tests/avocado/kvm_xen_guest.py
+++ b/tests/avocado/kvm_xen_guest.py
@@ -10,6 +10,7 @@
 # SPDX-License-Identifier: GPL-2.0-or-later
 
 import os
+import shutil
 
 from qemu.machine import machine
 
@@ -43,7 +44,7 @@ def get_asset(self, name, sha1):
 return self.fetch_asset(name=f"qemu-kvm-xen-guest-{name}",
 locations=(url), asset_hash=sha1)
 
-def common_vm_setup(self):
+def common_vm_setup(self, readwrite=False):
 # We also catch lack of KVM_XEN support if we fail to launch
 self.require_accelerator("kvm")
 
@@ -56,11 +57,19 @@ def common_vm_setup(self):
   
"367962983d0d32109998a70b45dcee4672d0b045")
 self.rootfs = self.get_asset("rootfs.ext4",
  
"f1478401ea4b3fa2ea196396be44315bab2bb5e4")
+if readwrite:
+dest = os.path.join(self.workdir, os.path.basename(self.rootfs))
+shutil.copy(self.rootfs, dest)
+self.rootfs = dest
 
-def run_and_check(self):
+def run_and_check(self, readwrite=False):
+if readwrite:
+drive = f"file={self.rootfs},if=none,format=raw,id=drv0"
+else:
+drive = 
f"file={self.rootfs},if=none,readonly=on,format=raw,id=drv0"
 self.vm.add_args('-kernel', self.kernel_path,
  '-append', self.kernel_params,
- '-drive',  
f"file={self.rootfs},if=none,format=raw,id=drv0",
+ '-drive',  drive,
  '-device', 'xen-disk,drive=drv0,vdev=xvda',
  '-device', 'virtio-net-pci,netdev=unet',
  '-netdev', 'user,id=unet,hostfwd=:127.0.0.1:0-:22')
@@ -90,11 +99,11 @@ def test_kvm_xen_guest(self):
 :avocado: tags=kvm_xen_guest
 """
 
-self.common_vm_setup()
+self.common_vm_setup(True)
 
 self.kernel_params = (self.KERNEL_DEFAULT +
   ' xen_emul_unplug=ide-disks')
-self.run_and_check()
+self.run_and_check(True)
 self.ssh_command('grep xen-pirq.*msi /proc/interrupts')
 
 def test_kvm_xen_guest_nomsi(self):
@@ -102,11 +111,11 @@ def test_kvm_xen_guest_nomsi(self):
 :avocado: tags=kvm_xen_guest_nomsi
 """
 
-self.common_vm_setup()
+self.common_vm_setup(True)
 
 self.kernel_params = (self.KERNEL_DEFAULT +
   ' xen_emul_unplug=ide-disks pci=nomsi')
-self.run_and_check()
+self.run_and_check(True)
 self.ssh_command('grep xen-pirq.* /proc/interrupts')
 
 def test_kvm_xen_guest_noapic_nomsi(self):
@@ -114,11 +123,11 @@ def test_kvm_xen_guest_noapic_nomsi(self):
 :avocado: tags=kvm_xen_guest_noapic_nomsi
 """
 
-self.common_vm_setup()
+self.common_vm_setup(True)
 
 self.kernel_params = (self.KERNEL_DEFAULT +
   ' xen_emul_unplug=ide-disks noapic pci=nomsi')
-self.run_and_check()
+self.run_and_check(True)
 self.ssh_command('grep xen-pirq /proc/interrupts')
 
 def test_kvm_xen_guest_vapic(self):
-- 
2.43.0




[PATCH 07/10] testa/avocado: test_arm_emcraft_sf2: handle RW requirements for asset

2023-12-08 Thread Cleber Rosa
The asset used in the mentioned test gets truncated before it's used
in the test.  This means that the file gets modified, and thus the
asset's expected hash doesn't match anymore.  This causes cache misses
and re-downloads every time the test is re-run.

Let's make a copy of the asset so that the one in the cache is
preserved and the cache sees a hit on re-runs.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/boot_linux_console.py | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/tests/avocado/boot_linux_console.py 
b/tests/avocado/boot_linux_console.py
index f5c5d647a4..e2e928e703 100644
--- a/tests/avocado/boot_linux_console.py
+++ b/tests/avocado/boot_linux_console.py
@@ -414,14 +414,16 @@ def test_arm_emcraft_sf2(self):
'fe371d32e50ca682391e1e70ab98c2942aeffb01/spi.bin')
 spi_hash = '65523a1835949b6f4553be96dec1b6a38fb05501'
 spi_path = self.fetch_asset(spi_url, asset_hash=spi_hash)
+spi_path_rw = os.path.join(self.workdir, os.path.basename(spi_path))
+shutil.copy(spi_path, spi_path_rw)
 
-file_truncate(spi_path, 16 << 20) # Spansion S25FL128SDPBHICO is 16 MiB
+file_truncate(spi_path_rw, 16 << 20) # Spansion S25FL128SDPBHICO is 16 
MiB
 
 self.vm.set_console()
 kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
 self.vm.add_args('-kernel', uboot_path,
  '-append', kernel_command_line,
- '-drive', 'file=' + spi_path + ',if=mtd,format=raw',
+ '-drive', 'file=' + spi_path_rw + 
',if=mtd,format=raw',
  '-no-reboot')
 self.vm.launch()
 self.wait_for_console_pattern('Enter \'help\' for a list')
-- 
2.43.0




[PATCH 09/10] tests/avocado/boot_xen.py: unify tags

2023-12-08 Thread Cleber Rosa
Because all tests share the same tags, it's possible to have all of
them at the class level.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/boot_xen.py | 26 +-
 1 file changed, 5 insertions(+), 21 deletions(-)

diff --git a/tests/avocado/boot_xen.py b/tests/avocado/boot_xen.py
index f80cbcb8fb..f4b63c1ef2 100644
--- a/tests/avocado/boot_xen.py
+++ b/tests/avocado/boot_xen.py
@@ -20,6 +20,11 @@
 class BootXen(LinuxKernelTest):
 """
 Boots a Xen hypervisor with a Linux DomU kernel.
+
+:avocado: tags=arch:aarch64
+:avocado: tags=accel:tcg
+:avocado: tags=cpu:cortex-a57
+:avocado: tags=machine:virt
 """
 
 timeout = 90
@@ -60,13 +65,6 @@ def launch_xen(self, xen_path):
 wait_for_console_pattern(self, console_pattern, "Panic on CPU 0:")
 
 def test_arm64_xen_411_and_dom0(self):
-"""
-:avocado: tags=arch:aarch64
-:avocado: tags=accel:tcg
-:avocado: tags=cpu:cortex-a57
-:avocado: tags=machine:virt
-"""
-
 # archive of file from https://deb.debian.org/debian/pool/main/x/xen/
 xen_url = ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/'
'download?path=%2F='
@@ -78,13 +76,6 @@ def test_arm64_xen_411_and_dom0(self):
 self.launch_xen(xen_path)
 
 def test_arm64_xen_414_and_dom0(self):
-"""
-:avocado: tags=arch:aarch64
-:avocado: tags=accel:tcg
-:avocado: tags=cpu:cortex-a57
-:avocado: tags=machine:virt
-"""
-
 # archive of file from https://deb.debian.org/debian/pool/main/x/xen/
 xen_url = ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/'
'download?path=%2F='
@@ -96,13 +87,6 @@ def test_arm64_xen_414_and_dom0(self):
 self.launch_xen(xen_path)
 
 def test_arm64_xen_415_and_dom0(self):
-"""
-:avocado: tags=arch:aarch64
-:avocado: tags=accel:tcg
-:avocado: tags=cpu:cortex-a57
-:avocado: tags=machine:virt
-"""
-
 xen_url = ('https://fileserver.linaro.org/'
's/JSsewXGZ6mqxPr5/download'
'?path=%2F=xen-upstream-4.15-unstable.deb')
-- 
2.43.0




[PATCH 01/10] tests/avocado: mips: fallback to HTTP given certificate expiration

2023-12-08 Thread Cleber Rosa
The SSL certificate installed at mipsdistros.mips.com has expired:

 0 s:CN = mipsdistros.mips.com
 i:C = US, O = Amazon, OU = Server CA 1B, CN = Amazon
 a:PKEY: rsaEncryption, 2048 (bit); sigalg: RSA-SHA256
 v:NotBefore: Dec 23 00:00:00 2019 GMT; NotAfter: Jan 23 12:00:00 2021 GMT

Because this project has no control over that certificate and host,
this falls back to plain HTTP instead.  The integrity of the
downloaded files can be guaranteed by the existing hashes for those
files (which are not modified here).

Signed-off-by: Cleber Rosa 
---
 tests/avocado/boot_linux_console.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tests/avocado/boot_linux_console.py 
b/tests/avocado/boot_linux_console.py
index 3f0180e1f8..8066861c17 100644
--- a/tests/avocado/boot_linux_console.py
+++ b/tests/avocado/boot_linux_console.py
@@ -299,7 +299,7 @@ def test_mips_malta32el_nanomips_4k(self):
 :avocado: tags=endian:little
 :avocado: tags=cpu:I7200
 """
-kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
+kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page4k.xz')
 kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6'
@@ -312,7 +312,7 @@ def test_mips_malta32el_nanomips_16k_up(self):
 :avocado: tags=endian:little
 :avocado: tags=cpu:I7200
 """
-kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
+kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page16k_up.xz')
 kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc'
@@ -325,7 +325,7 @@ def test_mips_malta32el_nanomips_64k_dbg(self):
 :avocado: tags=endian:little
 :avocado: tags=cpu:I7200
 """
-kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
+kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page64k_dbg.xz')
 kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180'
-- 
2.43.0




[PATCH 10/10] tests/avocado/boot_xen.py: use class attribute

2023-12-08 Thread Cleber Rosa
Rather than defining a single use variable, let's just use the class
attribute directly.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/boot_xen.py | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/tests/avocado/boot_xen.py b/tests/avocado/boot_xen.py
index f4b63c1ef2..f29bc58b9e 100644
--- a/tests/avocado/boot_xen.py
+++ b/tests/avocado/boot_xen.py
@@ -50,11 +50,10 @@ def launch_xen(self, xen_path):
 
 self.vm.set_console()
 
-xen_command_line = self.XEN_COMMON_COMMAND_LINE
 self.vm.add_args('-machine', 'virtualization=on',
  '-m', '768',
  '-kernel', xen_path,
- '-append', xen_command_line,
+ '-append', self.XEN_COMMON_COMMAND_LINE,
  '-device',
  
'guest-loader,addr=0x4700,kernel=%s,bootargs=console=hvc0'
  % (kernel_path))
-- 
2.43.0




[PATCH 03/10] tests/avocado/intel_iommu.py: increase timeout

2023-12-08 Thread Cleber Rosa
Based on many runs, the average run time for these 4 tests is around
250 seconds, with 320 seconds being the ceiling.  In any way, the
default 120 seconds timeout is inappropriate in my experience.

Let's increase the timeout so these tests get a chance to completion.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/intel_iommu.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/tests/avocado/intel_iommu.py b/tests/avocado/intel_iommu.py
index f04ee1cf9d..24bfad0756 100644
--- a/tests/avocado/intel_iommu.py
+++ b/tests/avocado/intel_iommu.py
@@ -25,6 +25,8 @@ class IntelIOMMU(LinuxTest):
 :avocado: tags=flaky
 """
 
+timeout = 360
+
 IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on'
 kernel_path = None
 initrd_path = None
-- 
2.43.0




[PATCH 08/10] tests/avocado/boot_xen.py: merge base classes

2023-12-08 Thread Cleber Rosa
While it's a good practice to have reusable base classes, in this
specific case there's no other user of the BootXenBase class.

By unifying the class used in this test, we can improve readability
and have the opportunity to add some future improvements in a clearer
fashion.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/boot_xen.py | 5 +
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/tests/avocado/boot_xen.py b/tests/avocado/boot_xen.py
index fc2faeedb5..f80cbcb8fb 100644
--- a/tests/avocado/boot_xen.py
+++ b/tests/avocado/boot_xen.py
@@ -17,7 +17,7 @@
 from boot_linux_console import LinuxKernelTest
 
 
-class BootXenBase(LinuxKernelTest):
+class BootXen(LinuxKernelTest):
 """
 Boots a Xen hypervisor with a Linux DomU kernel.
 """
@@ -59,9 +59,6 @@ def launch_xen(self, xen_path):
 console_pattern = 'VFS: Cannot open root device'
 wait_for_console_pattern(self, console_pattern, "Panic on CPU 0:")
 
-
-class BootXen(BootXenBase):
-
 def test_arm64_xen_411_and_dom0(self):
 """
 :avocado: tags=arch:aarch64
-- 
2.43.0




[PATCH 04/10] tests/avocado: machine aarch64: standardize location and RO/RW access

2023-12-08 Thread Cleber Rosa
The tests under machine_aarch64_virt.py do not need read-write access
to the ISOs.  The ones under machine_aarch64_sbsaref.py, on the other
hand, will need read-write access, so let's give each test an unique
file.

And while at it, let's use a single code style and hash for the ISO
url.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/machine_aarch64_sbsaref.py |  9 +++--
 tests/avocado/machine_aarch64_virt.py| 14 +++---
 2 files changed, 14 insertions(+), 9 deletions(-)

diff --git a/tests/avocado/machine_aarch64_sbsaref.py 
b/tests/avocado/machine_aarch64_sbsaref.py
index 528c7d2934..6ae84d77ac 100644
--- a/tests/avocado/machine_aarch64_sbsaref.py
+++ b/tests/avocado/machine_aarch64_sbsaref.py
@@ -7,6 +7,7 @@
 # SPDX-License-Identifier: GPL-2.0-or-later
 
 import os
+import shutil
 
 from avocado import skipUnless
 from avocado.utils import archive
@@ -123,13 +124,15 @@ def boot_alpine_linux(self, cpu):
 
 iso_hash = 
"5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027"
 iso_path = self.fetch_asset(iso_url, algorithm="sha256", 
asset_hash=iso_hash)
+iso_path_rw = os.path.join(self.workdir, os.path.basename(iso_path))
+shutil.copy(iso_path, iso_path_rw)
 
 self.vm.set_console()
 self.vm.add_args(
 "-cpu",
 cpu,
 "-drive",
-f"file={iso_path},format=raw",
+f"file={iso_path_rw},format=raw",
 "-device",
 "virtio-rng-pci,rng=rng0",
 "-object",
@@ -170,13 +173,15 @@ def boot_openbsd73(self, cpu):
 
 img_hash = 
"7fc2c75401d6f01fbfa25f4953f72ad7d7c18650056d30755c44b9c129b707e5"
 img_path = self.fetch_asset(img_url, algorithm="sha256", 
asset_hash=img_hash)
+img_path_rw = os.path.join(self.workdir, os.path.basename(img_path))
+shutil.copy(img_path, img_path_rw)
 
 self.vm.set_console()
 self.vm.add_args(
 "-cpu",
 cpu,
 "-drive",
-f"file={img_path},format=raw",
+f"file={img_path_rw},format=raw",
 "-device",
 "virtio-rng-pci,rng=rng0",
 "-object",
diff --git a/tests/avocado/machine_aarch64_virt.py 
b/tests/avocado/machine_aarch64_virt.py
index a90dc6ff4b..093d68f837 100644
--- a/tests/avocado/machine_aarch64_virt.py
+++ b/tests/avocado/machine_aarch64_virt.py
@@ -37,13 +37,13 @@ def test_alpine_virt_tcg_gic_max(self):
 :avocado: tags=machine:virt
 :avocado: tags=accel:tcg
 """
-iso_url = ('https://dl-cdn.alpinelinux.org/'
-   'alpine/v3.17/releases/aarch64/'
-   'alpine-standard-3.17.2-aarch64.iso')
+iso_url = (
+"https://dl-cdn.alpinelinux.org/;
+"alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso"
+)
 
-# Alpine use sha256 so I recalculated this myself
-iso_sha1 = '76284fcd7b41fe899b0c2375ceb8470803eea839'
-iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha1)
+iso_hash = 
"5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027"
+iso_path = self.fetch_asset(iso_url, algorithm="sha256", 
asset_hash=iso_hash)
 
 self.vm.set_console()
 kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
@@ -60,7 +60,7 @@ def test_alpine_virt_tcg_gic_max(self):
 self.vm.add_args("-smp", "2", "-m", "1024")
 self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios',
'edk2-aarch64-code.fd'))
-self.vm.add_args("-drive", f"file={iso_path},format=raw")
+self.vm.add_args("-drive", f"file={iso_path},readonly=on,format=raw")
 self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
 self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom')
 
-- 
2.43.0




[PATCH 05/10] tests/avocado: use more distinct names for assets

2023-12-08 Thread Cleber Rosa
Avocado's asset system will deposit files in a cache organized either
by their original location (the URI) or by their names.  Because the
cache (and the "by_name" sub directory) is common across tests, it's a
good idea to make these names as distinct as possible.

This avoid name clashes, which makes future Avocado runs to attempt to
redownload the assets with the same name, but from the different
locations they actually are from.  This causes cache misses, extra
downloads, and possibly canceled tests.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/kvm_xen_guest.py  | 3 ++-
 tests/avocado/netdev-ethtool.py | 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/tests/avocado/kvm_xen_guest.py b/tests/avocado/kvm_xen_guest.py
index 5391283113..ec4052a1fe 100644
--- a/tests/avocado/kvm_xen_guest.py
+++ b/tests/avocado/kvm_xen_guest.py
@@ -40,7 +40,8 @@ def get_asset(self, name, sha1):
 url = base_url + name
 # use explicit name rather than failing to neatly parse the
 # URL into a unique one
-return self.fetch_asset(name=name, locations=(url), asset_hash=sha1)
+return self.fetch_asset(name=f"qemu-kvm-xen-guest-{name}",
+locations=(url), asset_hash=sha1)
 
 def common_vm_setup(self):
 # We also catch lack of KVM_XEN support if we fail to launch
diff --git a/tests/avocado/netdev-ethtool.py b/tests/avocado/netdev-ethtool.py
index 5f33288f81..462cf8de7d 100644
--- a/tests/avocado/netdev-ethtool.py
+++ b/tests/avocado/netdev-ethtool.py
@@ -27,7 +27,8 @@ def get_asset(self, name, sha1):
 url = base_url + name
 # use explicit name rather than failing to neatly parse the
 # URL into a unique one
-return self.fetch_asset(name=name, locations=(url), asset_hash=sha1)
+return self.fetch_asset(name=f"qemu-netdev-ethtool-{name}",
+locations=(url), asset_hash=sha1)
 
 def common_test_code(self, netdev, extra_args=None):
 
-- 
2.43.0




Re: avocado test failing INTERRUPTED for "Missing asset"

2023-07-27 Thread Cleber Rosa
On Wed, Jul 26, 2023 at 5:07 AM Thomas Huth  wrote:
>
> On 26/07/2023 09.33, Philippe Mathieu-Daudé wrote:
> > On 25/7/23 19:13, Peter Maydell wrote:
> >> Currently this CI job is failing:
> >>
> >> https://gitlab.com/qemu-project/qemu/-/jobs/4737819946
> >>
> >> because:
> >>
> >> (05/59)
> >> tests/avocado/boot_linux_console.py:BootLinuxConsole.test_arm_exynos4210_initrd:
> >> INTERRUPTED: Missing asset
> >> https://snapshot.debian.org/archive/debian/20190928T224601Z/pool/main/l/linux/linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb\nRunner
> >> error occurred: Timeout reached\nOriginal status: CANCEL\n{'name':
> >> '05-tests/avocado/boot_linux_console... (90.67 s)
> >>
> >> Why is a "Missing asset" causing a timeout after 90 seconds,
> >> rather than being accounted as a "SKIP" ("missing requirements
> >> in the test environment" sounds like what we have here) ?
> >
> > Maybe something to report to upstream Avocado.
>
> We're back to using Avocado v88.1 in QEMU. We first need someone who can
> update to the latest Avocado release and take care of the remaining
> problems... This is *very* frustrating.
>
>   Thomas
>

Hi Thomas,

As you might remember from a couple of months ago, I was (and still
am) running the Avocado QEMU tests extensively with the latest Avocado
and attempting to cover all gaps before the 103.0 LTS release.

To make it clear, Avocado is currently on sprint #103[1] which will
release 103.0, a "Long Term Stability'' release.  It may sound like a
marketing gimmick, but it's important for QEMU because we want to be
able to address all bugs that are eventually caught after its release
while still minimizing disruption.

The frustration you mentioned with the attempt to migrate from 88.1 to
101.0, and now back to 88.1 is the kind of disruption we want to
avoid.  With 103.0 LTS, like with 92.X LTS, we can and will release
minor releases with needed fixes.

So, this is also an invitation to everyone else running the
Avocado-based  QEMU tests with either Avocado 102.0, or the latest
development version and report issues.

BTW, Avocado's GitHub has a "customer:QEMU"[2] label that can be added
to issues that will certainly increase its classification and
priority.

PS: 88.1 was a minor release to address an issue with the release
process itself, so there were no differences from 88.0 that mattered
to users, nor was there the possibility of a 88.2 release with more
fixes.

Thanks,
- Cleber.

[1] - https://github.com/avocado-framework/avocado/milestone/29
[2] - 
https://github.com/avocado-framework/avocado/issues?q=is%3Aopen+is%3Aissue+label%3Acustomer%3AQEMU+milestone%3A%22%23103+%28LTS+release+-+Sound+of+Freedom%29%22




Re: avocado test failing INTERRUPTED for "Missing asset"

2023-07-27 Thread Cleber Rosa
On Wed, Jul 26, 2023 at 3:34 AM Philippe Mathieu-Daudé
 wrote:
>
> On 25/7/23 19:13, Peter Maydell wrote:
> > Currently this CI job is failing:
> >
> > https://gitlab.com/qemu-project/qemu/-/jobs/4737819946
> >
> > because:
> >
> > (05/59) 
> > tests/avocado/boot_linux_console.py:BootLinuxConsole.test_arm_exynos4210_initrd:
> > INTERRUPTED: Missing asset
> > https://snapshot.debian.org/archive/debian/20190928T224601Z/pool/main/l/linux/linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb\nRunner
> > error occurred: Timeout reached\nOriginal status: CANCEL\n{'name':
> > '05-tests/avocado/boot_linux_console... (90.67 s)
> >
> > Why is a "Missing asset" causing a timeout after 90 seconds,
> > rather than being accounted as a "SKIP" ("missing requirements
> > in the test environment" sounds like what we have here) ?
>
> Maybe something to report to upstream Avocado.
>
>

Hi Philippe,

Please check my response to Peter's first message on this thread.
It's a rather long answer, but I hope this behavior is understandable.

> That said, we want CI to be reproducible. If we fetch assets from
> unreliable sources, we can't be reproducible. If we are unable to
> provide a assets cache, we'll keep hitting this problem. If we can
> not find a way to have assets stored (requiring sysadmin time setting
> up some infra, possibly only for GitLab), then I'd consider stopping
> running tests depending on external assets on CI; otherwise at some
> point we'll get tired to waste time figuring out the same problem.
>

Right, in an ideal world, we could have a master list of all the
assets that every single job will need, and require an admin to make
sure each and every one of them is cached before running a job.  The
current approach with Avocado's pre-job "fetch asset" plugin is to do
as much as possible without having duplication of the assets URLs in
such a "asset master list" and in the test code.

Also, it will not abort a job if any of these assets fail to be
fetched.  It's a convenient choice that, on the other hand, yields
lower reproducibility and reliability.

> As a maintainer I'm happy to run the avocado tests using my local
> assets cache, and I would rather keep using the framework. But then
> my cache is likely different from others (users, maintainers, CI).
> Similarly few users/maintainers end up having the same cache and
> running the same set of tests.
>
> $ du -chs ~/avocado/data/cache/
> 5.7G/Users/philmd/avocado/data/cache/
>
> Some files are older than 3 years, and I'm happy to still run the
> tests depending on them (although they disappeared from their
> original http server).
>

This is a well maintained cache! :) Can we rsync from it? :)

Jokes aside, I'm open for ideas on how to better balance this
convenience versus reliability question.

Thanks,
- Cleber.




Re: avocado test failing INTERRUPTED for "Missing asset"

2023-07-27 Thread Cleber Rosa
On Thu, Jul 27, 2023 at 11:50 AM Peter Maydell  wrote:
>
> Ah, so the problem is that we are trying to download the asset
> file, and the remote server is stalling so it doesn't actually
> download the file in 90s, and Avocado doesn't distinguish
> "hit the timeout while trying to download assets" from
> "hit the timeout running the actual test" ?
>

Yes, exactly.  Once the test starts, that's the only timeout being
enforced.  The fetch_asset() (and all the download code path) is
simply part of the test and thus under the test timeout.  Also, right
now, avocado.Test.fetch_asset() doesn't provide a timeout parameter
(but the underlying avocado.utils.asset.Asset.fetch() does).

> This sounds to me like the ideal would be that there is a separate
> timeout for file downloads (which could then be a lot shorter than
> the overall test timeout), and "timeout during asset download"
> would be detected separately from "timeout while actually running
> test".  But maybe the separation-of-phases in newer Avocado achieves
> that already ?
>

The mechanism in newer Avocado will simply never attempt to run tests
that don't have the stated requirements fulfilled.  With regards to
timeouts, each of the different kinds of requirement implementations
(file downloads and cache, A.K.A. "assets", packages installation,
ansible module execution,  etc) are supposed to provide their own
features, including timeouts.

Anyways, I'll look into, and report back on:

1. expanding avocado.Test.fetch_asset() with a timeout parameter
2. making sure the newer implementation for the requirement types used
by QEMU respect a timeout (they don't need to be smaller than the
test, because they run completely outside of the test).

For now, are you OK with re-running those jobs if the servers stall
the transfers? Or would you rather see a patch that changes the
find_only parameter to True, so that if the pre-test attempt to
download the asset fails, the transfer is never attempted during the
test?

Thanks,
- Cleber.

> thanks
> -- PMM
>




Re: avocado test failing INTERRUPTED for "Missing asset"

2023-07-27 Thread Cleber Rosa
On Tue, Jul 25, 2023 at 1:13 PM Peter Maydell  wrote:
>
> Currently this CI job is failing:
>
> https://gitlab.com/qemu-project/qemu/-/jobs/4737819946
>
> because:
>
> (05/59) 
> tests/avocado/boot_linux_console.py:BootLinuxConsole.test_arm_exynos4210_initrd:
> INTERRUPTED: Missing asset
> https://snapshot.debian.org/archive/debian/20190928T224601Z/pool/main/l/linux/linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb\nRunner
> error occurred: Timeout reached\nOriginal status: CANCEL\n{'name':
> '05-tests/avocado/boot_linux_console... (90.67 s)
>
> Why is a "Missing asset" causing a timeout after 90 seconds,
> rather than being accounted as a "SKIP" ("missing requirements
> in the test environment" sounds like what we have here) ?
>

Hi Peter,

First of all, I am sorry for the frustration you experienced while
trying to understand all of this.  It tooke me a while too. Anyways,
the 90 seconds timeout is set here[1].

> I don't understand the debug.log, because it says all of
>  * that it retrieved the URL

That happens here[2], because Python's urllib.request.urlopen()[3]
returned a response.  The message is clearly misleading, though,
because:

1. the response may not indicate that the request was successful (even
though the most common exception raised by unsuccessful operations,
HTTPError,  is being handled)
2. that the file's content being pointed at by the URL has been transferred.

I've opened an issue[4] to fix this misleading message.

>  * that it wanted to cancel the test

Yes, this is the default behavior set here[5] (cancel_on_missing=True).

>  * that the test timed out
>

My understanding, up to this point, is that:

Avocado signals to the test that it should be interrupted.  The
handling of the exception gets to the attempt to fetch the asset,
which fails and is thus considered missing.  Because of
cancel_on_missing=True, It sets the status of the test, up to that
point, to be CANCELed.

But, Avocado "knows better", because it triggered the interruption of
the test, so it overwrites that status of the test as INTERRUPTed.
The whole thing may be complex and confusing, but IMO it seems
coherent so far (minus the misleading "retrieved" message).

Your question (" Why is a "Missing asset" causing a timeout after 90
seconds, rather than being accounted as a "SKIP" ("missing
requirements in the test environment" sounds like what we have here)
?) is actually very important and up to the point.  For Avocado 88.1,
there are two things happening when it comes to requirements:

1. The "assets" plugin runs *before* the job, and attempts to identify
and cache all assets (best effort, and sometimes limited approach,
because it employs a static syntactic analysis of the source code to
identify assets to be retrieved.  This *usually* handles the
requirements before the tests.
2. The actual execution of the "fetch_asset()" code during the test
execution (including during setUp()).  This is *not* limited by the
static syntactic analysis mentioned earlier.

Ideally, step 2 would *not* happen, as it can violate the
"requirements before test" principle.  One way to achieve that is
simply to set find_only=True at the same place cancel_on_missing
defaults to True[5].  It's documented here[6].

For newer Avocado, there's a completely different requirement
mechanism[7], that is completely outside of the test execution.  I
hope that it will be leveraged by QEMU (it was designed to improve the
current design/implementation limitations mentioned previously).  But,
this is a bit out of the scope at this point, because the goal is to
have Avocado 103.0 addressing logging and properly cleaning up all
tests (like stray processes) which are the two high priority issues we
are tracking with the "customer:QEMU" label before proposing a bump in
Avocado version.

> Here it is:
>
> 16:03:16 DEBUG| PARAMS (key=arch, path=*, default=arm) => 'arm'
> 16:03:16 DEBUG| PARAMS (key=cpu, path=*, default=None) => None
> 16:03:16 DEBUG| PARAMS (key=qemu_bin, path=*,
> default=./qemu-system-arm) => './qemu-system-arm'
> 16:03:16 DEBUG| PARAMS (key=machine, path=*, default=smdkc210) => 'smdkc210'
> 16:03:16 INFO | Asset not in cache, fetching it.
> 16:03:16 INFO | Fetching
> https://snapshot.debian.org/archive/debian/20190928T224601Z/pool/main/l/linux/linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb
> -> 
> /builds/qemu-project/qemu/avocado-cache/by_location/5f20376efeb69c8898caaff3edf7de45b4540163/linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb.ooffovd_
> 16:04:05 DEBUG| Retrieved URL
> "https://snapshot.debian.org/archive/debian/20190928T224601Z/pool/main/l/linux/linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb":
> content-length 33882084, date: "Tue, 25 Jul 2023 16:03:16 GMT",
> last-modified: "Tue, 24 Sep 2019 22:31:23 GMT"
> 16:04:46 ERROR| RuntimeError: Test interrupted by SIGTERM
> 16:04:46 ERROR|
> 16:04:46 ERROR| Reproduced traceback from:
> 

Re: [PATCH v4 1/6] tests/requirements.txt: bump up avocado-framework version to 101.0

2023-04-20 Thread Cleber Rosa



On 4/18/23 03:10, Thomas Huth wrote:

On 17/04/2023 15.43, Alex Bennée wrote:

From: Kautuk Consul 

Avocado version 101.0 has a fix to re-compute the checksum
of an asset file if the algorithm used in the *-CHECKSUM
file isn't the same as the one being passed to it by the
avocado user (i.e. the avocado_qemu python module).
In the earlier avocado versions this fix wasn't there due
to which if the checksum wouldn't match the earlier
checksum (calculated by a different algorithm), the avocado
code would start downloading a fresh image from the internet
URL thus making the test-cases take longer to execute.

Bump up the avocado-framework version to 101.0.

Signed-off-by: Kautuk Consul 
Tested-by: Hariharan T S 
Message-Id: <20230327115030.3418323-2-kcon...@linux.vnet.ibm.com>
Message-Id: <20230330101141.30199-10-alex.ben...@linaro.org>

---
v2
   - limit --max-parallel-tasks $(JOBS_OPTION:-j%=%)
---
  tests/Makefile.include | 18 +++---
  tests/requirements.txt |  2 +-
  2 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/tests/Makefile.include b/tests/Makefile.include
index 9422ddaece..a4de0ad5a2 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -138,14 +138,18 @@ get-vm-image-fedora-31-%: check-venv
  # download all vm images, according to defined targets
  get-vm-images: check-venv $(patsubst %,get-vm-image-fedora-31-%, 
$(FEDORA_31_DOWNLOAD))
  +JOBS_OPTION=$(lastword -j1 $(filter-out -j, $(filter 
-j%,$(MAKEFLAGS

+
  check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images
-    $(call quiet-command, \
-    $(TESTS_PYTHON) -m avocado \
-    --show=$(AVOCADO_SHOW) run 
--job-results-dir=$(TESTS_RESULTS_DIR) \

-    $(if $(AVOCADO_TAGS),, --filter-by-tags-include-empty \
-    --filter-by-tags-include-empty-key) \
-    $(AVOCADO_CMDLINE_TAGS) \
-    $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \
+    $(call quiet-command, \
+    $(TESTS_PYTHON) -m avocado \
+    --show=$(AVOCADO_SHOW) run 
--job-results-dir=$(TESTS_RESULTS_DIR) \

+    $(if $(AVOCADO_TAGS),, \
+    --filter-by-tags-include-empty \
+    --filter-by-tags-include-empty-key) \
+    --max-parallel-tasks $(JOBS_OPTION:-j%=%) \
+    $(AVOCADO_CMDLINE_TAGS) \
+    $(if $(GITLAB_CI),,--failfast) 
$(AVOCADO_TESTS), \


We might need to revisit this --failfast logic, too. If I've got that 
right, failfast is now the default with the new system? So we might 
want to disable it by default again if GITLAB_CI is not set?


 Thomas


Hi Thomas,

I must be missing something, because under Avocado 101.0, I'm getting 
the following behavior without the --failfast flag:


  $ avocado run --max-parallel-tasks=1 -- /bin/true /bin/false /bin/true
  JOB ID : 646f476f01b8d5599a57530606de543f2d9a5366
  JOB LOG    : 
/root/avocado/job-results/job-2023-04-21T04.15-646f476/job.log

   (1/3) /bin/true: STARTED
   (1/3) /bin/true: PASS (0.01 s)
   (2/3) /bin/false: STARTED
   (2/3) /bin/false: FAIL (0.01 s)
   (3/3) /bin/true: STARTED
   (3/3) /bin/true: PASS (0.01 s)
  RESULTS    : PASS 2 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 
0 | CANCEL 0

  JOB TIME   : 1.01 s

  Test summary:
  /bin/false: FAIL

And this with --failfast:

  $ avocado run --failfast --max-parallel-tasks=1 -- /bin/true 
/bin/false /bin/true

  JOB ID : ae4894607a42194a7382efa545eccaccf7495fa3
  JOB LOG    : 
/root/avocado/job-results/job-2023-04-21T04.17-ae48946/job.log

   (1/3) /bin/true: STARTED
   (1/3) /bin/true: PASS (0.01 s)
   (2/3) /bin/false: STARTED
   (2/3) /bin/false: FAIL (0.01 s)
  Interrupting job (failfast).
  RESULTS    : PASS 1 | ERROR 0 | FAIL 1 | SKIP 1 | WARN 0 | INTERRUPT 
0 | CANCEL 0

  JOB TIME   : 0.75 s

  Test summary:
  /bin/false: FAIL

Maybe it's something in the Makefile I'm missing, or something specific 
to these tests, but that would be a long shot.


Thanks,

- Cleber.




Re: [PATCH v4 1/6] tests/requirements.txt: bump up avocado-framework version to 101.0

2023-04-20 Thread Cleber Rosa



On 4/20/23 12:54, John Snow wrote:



On Thu, Apr 20, 2023, 5:19 AM Philippe Mathieu-Daudé 
 wrote:


On 17/4/23 19:44, Alex Bennée wrote:
>
> John Snow  writes:
>
>> On Mon, Apr 17, 2023 at 9:43 AM Alex Bennée
 wrote:
>>>
>>> From: Kautuk Consul 
>>>
>>> Avocado version 101.0 has a fix to re-compute the checksum
>>> of an asset file if the algorithm used in the *-CHECKSUM
>>> file isn't the same as the one being passed to it by the
>>> avocado user (i.e. the avocado_qemu python module).
>>> In the earlier avocado versions this fix wasn't there due
>>> to which if the checksum wouldn't match the earlier
>>> checksum (calculated by a different algorithm), the avocado
>>> code would start downloading a fresh image from the internet
>>> URL thus making the test-cases take longer to execute.
>>>
>>> Bump up the avocado-framework version to 101.0.
>>>
>>> Signed-off-by: Kautuk Consul 
>>> Tested-by: Hariharan T S 
>>> Message-Id: <20230327115030.3418323-2-kcon...@linux.vnet.ibm.com>
>>> Message-Id: <20230330101141.30199-10-alex.ben...@linaro.org>
>>>
>>> ---
>>> v2
>>>    - limit --max-parallel-tasks $(JOBS_OPTION:-j%=%)
>>> ---
>>>   tests/Makefile.include | 18 +++---
>>>   tests/requirements.txt |  2 +-
>>>   2 files changed, 12 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/tests/Makefile.include b/tests/Makefile.include
>>> index 9422ddaece..a4de0ad5a2 100644
>>> --- a/tests/Makefile.include
>>> +++ b/tests/Makefile.include
>>> @@ -138,14 +138,18 @@ get-vm-image-fedora-31-%: check-venv
>>>   # download all vm images, according to defined targets
>>>   get-vm-images: check-venv $(patsubst
%,get-vm-image-fedora-31-%, $(FEDORA_31_DOWNLOAD))
>>>
>>> +JOBS_OPTION=$(lastword -j1 $(filter-out -j, $(filter
-j%,$(MAKEFLAGS
>>> +
>>>   check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images
>>> -       $(call quiet-command, \
>>> -            $(TESTS_PYTHON) -m avocado \
>>> -            --show=$(AVOCADO_SHOW) run
--job-results-dir=$(TESTS_RESULTS_DIR) \
>>> -            $(if $(AVOCADO_TAGS),,
--filter-by-tags-include-empty \
>>> -  --filter-by-tags-include-empty-key) \
>>> -            $(AVOCADO_CMDLINE_TAGS) \
>>> -            $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \
>>> +       $(call quiet-command,                                    \
>>> +            $(TESTS_PYTHON) -m avocado                      
                     \
>>> +            --show=$(AVOCADO_SHOW) run
--job-results-dir=$(TESTS_RESULTS_DIR)  \
>>> +            $(if $(AVOCADO_TAGS),,                          
         \
>>> +  --filter-by-tags-include-empty                          \
>>> +  --filter-by-tags-include-empty-key)                     \
>>> +               --max-parallel-tasks $(JOBS_OPTION:-j%=%)     
                 \
>>> +            $(AVOCADO_CMDLINE_TAGS)                          
        \
>>> +            $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), 
                 \
>>>               "AVOCADO", "tests/avocado")
>>>
>>>   check-acceptance-deprecated-warning:
>>> diff --git a/tests/requirements.txt b/tests/requirements.txt
>>> index 0ba561b6bd..a6f73da681 100644
>>> --- a/tests/requirements.txt
>>> +++ b/tests/requirements.txt
>>> @@ -2,5 +2,5 @@
>>>   # in the tests/venv Python virtual environment. For more info,
>>>   # refer to: https://pip.pypa.io/en/stable/user_guide/#id1
>>>   # Note that qemu.git/python/ is always implicitly installed.
>>> -avocado-framework==88.1
>>> +avocado-framework==101.0
>>>   pycdlib==1.11.0
>>> --
>>> 2.39.2
>>>
>>
>> I thought there were test failures that prohibited us from
bumping the
>> Avocado-Framework version. Did those get rectified recently?

No, still not working on Darwin. At this point I'm thinking at sending
a patch clarifying testing QEMU with Avocado is not supported on
Darwin.


What's broken on Darwin?


Hi John,

IIRC, the main issue is that the legacy runner (the one active in 88.1) 
pickles some stuff that do not play nice under Darwin.  It has never (to 
the best of my knowledge) worked under Darwin.



Is it broken with avocado-framework==88.1 too? (Is this a regression?)

It was already broken with avocado-framework==88.1, but it should mostly 
work with 101.0 (see previous reply).



We might need to consider entirely separate issues:

(1) We need to upgrade avocado-framework to a supported LTS version >= 
v90 for dependency harmony with qemu.git/python testing


(2) We need to upgrade avocado-framework to >= v101.0 to fix the 
stated checksum issue in this patch


(3) We would like avocado tests to work on Darwin. (Have they ever 
worked? When did they break? 

Re: [PATCH v4 1/6] tests/requirements.txt: bump up avocado-framework version to 101.0

2023-04-20 Thread Cleber Rosa



On 4/20/23 05:18, Philippe Mathieu-Daudé wrote:

On 17/4/23 19:44, Alex Bennée wrote:


John Snow  writes:

On Mon, Apr 17, 2023 at 9:43 AM Alex Bennée  
wrote:


From: Kautuk Consul 

Avocado version 101.0 has a fix to re-compute the checksum
of an asset file if the algorithm used in the *-CHECKSUM
file isn't the same as the one being passed to it by the
avocado user (i.e. the avocado_qemu python module).
In the earlier avocado versions this fix wasn't there due
to which if the checksum wouldn't match the earlier
checksum (calculated by a different algorithm), the avocado
code would start downloading a fresh image from the internet
URL thus making the test-cases take longer to execute.

Bump up the avocado-framework version to 101.0.

Signed-off-by: Kautuk Consul 
Tested-by: Hariharan T S 
Message-Id: <20230327115030.3418323-2-kcon...@linux.vnet.ibm.com>
Message-Id: <20230330101141.30199-10-alex.ben...@linaro.org>

---
v2
   - limit --max-parallel-tasks $(JOBS_OPTION:-j%=%)
---
  tests/Makefile.include | 18 +++---
  tests/requirements.txt |  2 +-
  2 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/tests/Makefile.include b/tests/Makefile.include
index 9422ddaece..a4de0ad5a2 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -138,14 +138,18 @@ get-vm-image-fedora-31-%: check-venv
  # download all vm images, according to defined targets
  get-vm-images: check-venv $(patsubst %,get-vm-image-fedora-31-%, 
$(FEDORA_31_DOWNLOAD))


+JOBS_OPTION=$(lastword -j1 $(filter-out -j, $(filter 
-j%,$(MAKEFLAGS

+
  check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images
-   $(call quiet-command, \
-    $(TESTS_PYTHON) -m avocado \
-    --show=$(AVOCADO_SHOW) run 
--job-results-dir=$(TESTS_RESULTS_DIR) \

-    $(if $(AVOCADO_TAGS),, --filter-by-tags-include-empty \
-   --filter-by-tags-include-empty-key) \
-    $(AVOCADO_CMDLINE_TAGS) \
-    $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \
+   $(call quiet-command, \
+    $(TESTS_PYTHON) -m 
avocado \
+    --show=$(AVOCADO_SHOW) run 
--job-results-dir=$(TESTS_RESULTS_DIR)  \

+    $(if $(AVOCADO_TAGS),, \
+ --filter-by-tags-include-empty  \
+ --filter-by-tags-include-empty-key) \
+   --max-parallel-tasks 
$(JOBS_OPTION:-j%=%)   \

+ $(AVOCADO_CMDLINE_TAGS) \
+    $(if $(GITLAB_CI),,--failfast) 
$(AVOCADO_TESTS),   \

  "AVOCADO", "tests/avocado")

  check-acceptance-deprecated-warning:
diff --git a/tests/requirements.txt b/tests/requirements.txt
index 0ba561b6bd..a6f73da681 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -2,5 +2,5 @@
  # in the tests/venv Python virtual environment. For more info,
  # refer to: https://pip.pypa.io/en/stable/user_guide/#id1
  # Note that qemu.git/python/ is always implicitly installed.
-avocado-framework==88.1
+avocado-framework==101.0
  pycdlib==1.11.0
--
2.39.2



I thought there were test failures that prohibited us from bumping the
Avocado-Framework version. Did those get rectified recently?


No, still not working on Darwin. At this point I'm thinking at sending
a patch clarifying testing QEMU with Avocado is not supported on Darwin.


Hi Phil,

Do you mean with or without this version bump?

Even though it was somewhat recently[1] that another level of Darwin 
compatibility was added to Avocado, where a good part of Avocado's 
selftests[2] were enabled in OS X, Avocado 101.0 should not be that far 
behind.


I honestly believe people testing and reporting successful execution of 
the Avocado tests under Darwin with latest Avocado. Hopefully my memory 
is not playing tricks on me, but I'll attempt to verify that.


Cheers,

- Cleber.

[1] - https://github.com/avocado-framework/avocado/pull/5622

[2] - 
https://github.com/avocado-framework/avocado/actions/runs/4417285317/jobs/7742772932?pr=5622#step:10:11




I'm not seeing any with your patch applied.


Are you using running the Cirrus-CI jobs?






Re: [PATCH v4 1/6] tests/requirements.txt: bump up avocado-framework version to 101.0

2023-04-20 Thread Cleber Rosa



On 4/17/23 12:50, John Snow wrote:

On Mon, Apr 17, 2023 at 9:43 AM Alex Bennée  wrote:

From: Kautuk Consul 

Avocado version 101.0 has a fix to re-compute the checksum
of an asset file if the algorithm used in the *-CHECKSUM
file isn't the same as the one being passed to it by the
avocado user (i.e. the avocado_qemu python module).
In the earlier avocado versions this fix wasn't there due
to which if the checksum wouldn't match the earlier
checksum (calculated by a different algorithm), the avocado
code would start downloading a fresh image from the internet
URL thus making the test-cases take longer to execute.

Bump up the avocado-framework version to 101.0.

Signed-off-by: Kautuk Consul 
Tested-by: Hariharan T S 
Message-Id: <20230327115030.3418323-2-kcon...@linux.vnet.ibm.com>
Message-Id: <20230330101141.30199-10-alex.ben...@linaro.org>

---
v2
   - limit --max-parallel-tasks $(JOBS_OPTION:-j%=%)
---
  tests/Makefile.include | 18 +++---
  tests/requirements.txt |  2 +-
  2 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/tests/Makefile.include b/tests/Makefile.include
index 9422ddaece..a4de0ad5a2 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -138,14 +138,18 @@ get-vm-image-fedora-31-%: check-venv
  # download all vm images, according to defined targets
  get-vm-images: check-venv $(patsubst %,get-vm-image-fedora-31-%, 
$(FEDORA_31_DOWNLOAD))

+JOBS_OPTION=$(lastword -j1 $(filter-out -j, $(filter -j%,$(MAKEFLAGS
+
  check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images
-   $(call quiet-command, \
-$(TESTS_PYTHON) -m avocado \
---show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \
-$(if $(AVOCADO_TAGS),, --filter-by-tags-include-empty \
-   --filter-by-tags-include-empty-key) \
-$(AVOCADO_CMDLINE_TAGS) \
-$(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \
+   $(call quiet-command,   
\
+$(TESTS_PYTHON) -m avocado 
\
+--show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR)  
\
+$(if $(AVOCADO_TAGS),, 
\
+   --filter-by-tags-include-empty  
\
+   --filter-by-tags-include-empty-key) 
\
+   --max-parallel-tasks $(JOBS_OPTION:-j%=%)   
\
+$(AVOCADO_CMDLINE_TAGS)
\
+$(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS),   
\
  "AVOCADO", "tests/avocado")

  check-acceptance-deprecated-warning:
diff --git a/tests/requirements.txt b/tests/requirements.txt
index 0ba561b6bd..a6f73da681 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -2,5 +2,5 @@
  # in the tests/venv Python virtual environment. For more info,
  # refer to: https://pip.pypa.io/en/stable/user_guide/#id1
  # Note that qemu.git/python/ is always implicitly installed.
-avocado-framework==88.1
+avocado-framework==101.0
  pycdlib==1.11.0
--
2.39.2


I thought there were test failures that prohibited us from bumping the
Avocado-Framework version. Did those get rectified recently?


Hi John,

While testing before and after behavior of pretty much every test in 
tests/avocado/,  I've found that some tests may get affected by the 
extra isolation (and overhead) of the new runner in recent Avocado and 
may get interrupted more often than on the previous runner.  But this is 
highly dependent on the amount of resources the machine you run the 
tests on has, and how tight the current timeout is.


Running all the tests under old and new Avocado also revealed some tests 
that are equally broken under both versions.  Thomas has bisected some, 
and Cedric has come up with some patches too.


To risk not being too verbose by default, let me know if you want the 
full details.  Also, see my previous reply about the two issues I'm 
aware that would make this bump as seamless as possible.


Thanks,

- Cleber.




Re: [PATCH v4 1/6] tests/requirements.txt: bump up avocado-framework version to 101.0

2023-04-20 Thread Cleber Rosa



On 4/17/23 09:43, Alex Bennée wrote:

From: Kautuk Consul 

Avocado version 101.0 has a fix to re-compute the checksum
of an asset file if the algorithm used in the *-CHECKSUM
file isn't the same as the one being passed to it by the
avocado user (i.e. the avocado_qemu python module).
In the earlier avocado versions this fix wasn't there due
to which if the checksum wouldn't match the earlier
checksum (calculated by a different algorithm), the avocado
code would start downloading a fresh image from the internet
URL thus making the test-cases take longer to execute.

Bump up the avocado-framework version to 101.0.

Signed-off-by: Kautuk Consul 
Tested-by: Hariharan T S 
Message-Id: <20230327115030.3418323-2-kcon...@linux.vnet.ibm.com>
Message-Id: <20230330101141.30199-10-alex.ben...@linaro.org>

---
v2
   - limit --max-parallel-tasks $(JOBS_OPTION:-j%=%)
---
  tests/Makefile.include | 18 +++---
  tests/requirements.txt |  2 +-
  2 files changed, 12 insertions(+), 8 deletions(-)


Hi everyone,

Looks like I've mistakenly replied to the earlier thread, so here's 
pretty much the same message on a better location.


First of all, thanks to Kautuk for sending this and thanks to Hariharan 
for further testing it.


I'd like to give some context which not everyone may be aware of. 
Avocado 101.0 is a very different when compared with 88.1. Everything 
related to the execution of tests is brand new.  To be more precise, on 
version 91.0[1], this new runner[2] became the default. On version 97.0, 
the old runner implementation (currently in use in QEMU) was finally 
removed.


On most releases since then, I've been running the QEMU tests with the 
latest Avocado, and finding issues that are (as resources allow) 
addressed in later versions.   As you probably noticed, Avocado 101.0 
runs the QEMU tests without much (or any) visible issues for most 
people.  But, I'm aware of two pending issues that may or may not be a 
big deal to users:


I) The logging behavior is a bit different since Avocado 88.1. At a 
given point it was considered that Avocado should not mess around 
inadvertently with Python's root logger, and should be more picky about 
it includes in logs.  For most cases, a simple workaround[4] does the 
trick.  But, for some other use cases (say for 3rd party libraries' logs 
you want logged alongside Avocado's logs) there's a pending PR[5] that 
will take care of all known limitations.


II) The support for killing tests (internally in Avocado represented as 
more generic "tasks") and all its children is a bit lacking.  This is an 
issue I'm actively working on[6].  This may leave some processes (such 
as "qemu-system-*") running even after a test was interrupted.


Fixes for both of these issues are due to be in version 102.0. The ETA 
for version 102.0 is 1-2 weeks.


With that being said, I'm more than OK with this patch (alongside PATCH 
2, without which havoc ensues :) provided people understand the two 
pending issues above.  If this patch is taken before Avocado 102.0 is 
released, the delta from 101.0 would be much smaller, so it should be an 
easier change to test.


Cheers,

- Cleber.


[1] - https://avocado-framework.readthedocs.io/en/101.0/releases/91_0.html

[2] - The new runner is called "nrunner" and I am to be blamed for the 
naming lacking any originality


[3] - 
https://avocado-framework.readthedocs.io/en/101.0/releases/97_0.html#users-test-writers


[4] - 
https://gitlab.com/cleber.gnu/qemu/-/commit/a9f39c4f6671b756196a185c7275eb7ebd13e588


[5] - https://github.com/avocado-framework/avocado/pull/5645

[6] - https://github.com/avocado-framework/avocado/issues/4994





Re: [PATCH 1/2] tests/requirements.txt: bump up avocado-framework version to 101.0

2023-04-20 Thread Cleber Rosa



On 3/27/23 07:50, Kautuk Consul wrote:

Avocado version 101.0 has a fix to re-compute the checksum
of an asset file if the algorithm used in the *-CHECKSUM
file isn't the same as the one being passed to it by the
avocado user (i.e. the avocado_qemu python module).
In the earlier avocado versions this fix wasn't there due
to which if the checksum wouldn't match the earlier
checksum (calculated by a different algorithm), the avocado
code would start downloading a fresh image from the internet
URL thus making the test-cases take longer to execute.

Bump up the avocado-framework version to 101.0.


Hi Kautuk,

First of all, thanks for working on this, and thanks to Hariharan for 
testing it.


I'd like to give some context which not everyone may be aware of.  
Avocado 101.0 is a very different when compared with 88.1. Everything 
related to the execution of tests is brand new.  To be more precise, on 
version 91.0[1], this new runner[2] became the default. On version 97.0, 
the old runner implementation (currently in use in QEMU) was finally 
removed.


On most releases since then, I've been running the QEMU tests with the 
latest Avocado, and finding issues that are (as resources allow) 
addressed in later versions.   As you probably noticed, Avocado 101.0 
runs the QEMU tests without much (or any) visible issues for most 
people.  But, I'm aware of two pending issues that may or may not be a 
big deal to users:


I) The logging behavior is a bit different since Avocado 88.1. At a 
given point it was considered that Avocado should not mess around 
inadvertently with Python's root logger, and should be more picky about 
it includes in logs.  For most cases, a simple workaround[4] does the 
trick.  But, for some other use cases (say for 3rd party libraries' logs 
you want logged alongside Avocado's logs) there's a pending PR[5] that 
will take care of all known limitations.


II) The support for killing tests (internally in Avocado represented as 
more generic "tasks") and all its children is a bit lacking.  This is an 
issue I'm actively working on[6].  This may leave some processes (such 
as "qemu-system-*") running even after a test was interrupted.


Fixes for both of these issues are due to be in version 102.0. The ETA 
for version 102.0 is 1-2 weeks.


With that being said, I'm more than OK with this patch (alongside PATCH 
2, without which havoc ensues :) provided people understand the two 
pending issues above.  If this patch is taken before Avocado 102.0 is 
released, the delta from 101.0 would be much smaller, so it should be an 
easier change to test.


Cheers,

- Cleber.


[1] - https://avocado-framework.readthedocs.io/en/101.0/releases/91_0.html

[2] - The new runner is called "nrunner" and I am to be blamed for the 
naming lacking any originality


[3] - 
https://avocado-framework.readthedocs.io/en/101.0/releases/97_0.html#users-test-writers


[4] - 
https://gitlab.com/cleber.gnu/qemu/-/commit/a9f39c4f6671b756196a185c7275eb7ebd13e588


[5] - https://github.com/avocado-framework/avocado/pull/5645

[6] - https://github.com/avocado-framework/avocado/issues/4994





Re: [PATCH 3/4] python/qmp-shell: relicense as LGPLv2+

2022-03-30 Thread Cleber Rosa


John Snow  writes:

> qmp-shell is presently licensed as GPLv2 (only). I intend to include
> this tool as an add-on to an LGPLv2+ library package hosted on
> PyPI.org. I've selected LGPLv2+ to maximize compatibility with other
> licenses while retaining a copyleft license.
>
> To keep licensing matters simple, I'd like to relicense this tool as
> LGPLv2+ as well in order to keep the resultant license of the hosted
> release files simple -- even if library users won't "link against" this
> command line tool.
>
> Therefore, I am asking permission from the current authors of this
> tool to loosen the license. At present, those people are:
>
> - John Snow (me!), 411/609
> - Luiz Capitulino, Author, 97/609
> - Daniel Berrangé, 81/609
> - Eduardo Habkost, 10/609
> - Marc-André Lureau, 6/609
> - Fam Zheng, 3/609
> - Cleber Rosa, 1/609
>
> (All of which appear to have been written under redhat.com addresses.)
>
> Eduardo's fixes are largely automated from 2to3 conversion tools and may
> not necessarily constitute authorship, but his signature would put to
> rest any questions.
>
> Cleber's changes concern a single import statement change. Also won't
> hurt to ask.
>
> CC: Luiz Capitulino 
> CC: Daniel Berrange 
> CC: Eduardo Habkost 
> CC: Marc-André Lureau 
> CC: Fam Zheng 
> CC: Cleber Rosa 
>
> Signed-off-by: John Snow 
> ---
>  python/qemu/aqmp/qmp_shell.py | 7 ---
>  1 file changed, 4 insertions(+), 3 deletions(-)
>

Acked-by: Cleber Rosa 




Re: [PATCH v2] tests/avocado: starts PhoneServer upfront

2022-03-16 Thread Cleber Rosa


Beraldo Leal  writes:

> Race conditions can happen with the current code, because the port that
> was available might not be anymore by the time the server is started.
>
> By setting the port to 0, PhoneServer it will use the OS default
> behavior to get a free port, then we save this information so we can
> later configure the guest.
>
> Suggested-by: Daniel P. Berrangé 
> Signed-off-by: Beraldo Leal 
> ---
>  tests/avocado/avocado_qemu/__init__.py | 13 +++--
>  1 file changed, 7 insertions(+), 6 deletions(-)

Reviewed-by: Cleber Rosa 
Tested-by: Cleber Rosa 




Re: [PATCH] tests/avocado: starts PhoneServer upfront

2022-03-11 Thread Cleber Rosa


Beraldo Leal  writes:

> On Fri, Mar 11, 2022 at 09:28:24AM -0500, Cleber Rosa wrote:
>> 
>> Beraldo Leal  writes:
>> 
>> > Race conditions can happen with the current code, because the port that
>> > was available might not be anymore by the time the server is started.
>> >
>> > By setting the port to 0, PhoneServer it will use the OS default
>> > behavior to get a free port, then we save this information so we can
>> > later configure the guest.
>> >
>> > Suggested-by: Daniel P. Berrangé 
>> > Signed-off-by: Beraldo Leal 
>> > ---
>> >  tests/avocado/avocado_qemu/__init__.py | 13 -
>> >  1 file changed, 8 insertions(+), 5 deletions(-)
>> >
>> > diff --git a/tests/avocado/avocado_qemu/__init__.py 
>> > b/tests/avocado/avocado_qemu/__init__.py
>> > index 9b056b5ce5..e830d04b84 100644
>> > --- a/tests/avocado/avocado_qemu/__init__.py
>> > +++ b/tests/avocado/avocado_qemu/__init__.py
>> > @@ -602,9 +602,8 @@ def prepare_cloudinit(self, ssh_pubkey=None):
>> >  self.log.info('Preparing cloudinit image')
>> >  try:
>> >  cloudinit_iso = os.path.join(self.workdir, 'cloudinit.iso')
>> > -self.phone_home_port = network.find_free_port()
>> > -if not self.phone_home_port:
>> > -self.cancel('Failed to get a free port')
>> > +if not self.phone_server:
>> > +self.cancel('Failed to get port used by the PhoneServer.')
>> 
>> Can you think of a condition where `self.phone_server` would not
>> evaluate to True?  `network.find_free_port()` could return None, so this
>> check was valid.  But now with `cloudinit.PhoneHomeServer`, I can not
>> see how we'd end up with a similar condition.  Instantiating
>> `cloudinit.PhoneHomeServer` where a port can not be alloccated, AFAICT,
>> would raise a socket exception instead.
>
> Since this is a public method and could be called anytime before
> set_up_cloudinit(), I decided to keep the check just for safety reasons.
> Ideally, I would prefer not to have this dependency and add a new
> argument, but I didn't want to change the method signature since it
> would be required.
>

I'm not sure I follow your point.  Let me try to rephrase mine, in case
I failed to communicate it: I can't see how "if not self.phone_server"
is a valid check given that it will either:

* Contain an instance with a port that is already allocated, OR
* Not get assigned if cloudinit.PhoneHomeServer() fails (and raises an
  exception).

Instead of this check, it'd make sense to have a try/except block
protecting the PhoneHomeServer instantiation, and canceling the test if
it fails.

Or maybe you meant to check for self.phone_server.server_port instead?

Cheers,
- Cleber.




Re: [PATCH] tests/avocado: starts PhoneServer upfront

2022-03-11 Thread Cleber Rosa


Beraldo Leal  writes:

> Race conditions can happen with the current code, because the port that
> was available might not be anymore by the time the server is started.
>
> By setting the port to 0, PhoneServer it will use the OS default
> behavior to get a free port, then we save this information so we can
> later configure the guest.
>
> Suggested-by: Daniel P. Berrangé 
> Signed-off-by: Beraldo Leal 
> ---
>  tests/avocado/avocado_qemu/__init__.py | 13 -
>  1 file changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/tests/avocado/avocado_qemu/__init__.py 
> b/tests/avocado/avocado_qemu/__init__.py
> index 9b056b5ce5..e830d04b84 100644
> --- a/tests/avocado/avocado_qemu/__init__.py
> +++ b/tests/avocado/avocado_qemu/__init__.py
> @@ -602,9 +602,8 @@ def prepare_cloudinit(self, ssh_pubkey=None):
>  self.log.info('Preparing cloudinit image')
>  try:
>  cloudinit_iso = os.path.join(self.workdir, 'cloudinit.iso')
> -self.phone_home_port = network.find_free_port()
> -if not self.phone_home_port:
> -self.cancel('Failed to get a free port')
> +if not self.phone_server:
> +self.cancel('Failed to get port used by the PhoneServer.')

Can you think of a condition where `self.phone_server` would not
evaluate to True?  `network.find_free_port()` could return None, so this
check was valid.  But now with `cloudinit.PhoneHomeServer`, I can not
see how we'd end up with a similar condition.  Instantiating
`cloudinit.PhoneHomeServer` where a port can not be alloccated, AFAICT,
would raise a socket exception instead.

Also, the name of the utility class is PhoneHomeServer.  Using a
different name in the message will make cross references into the
Avocado docs harder.

Finally, a nitpick: I'd drop the leading dot in such a test cancelation
message.

Other than those points, the direction of those changes are indeed a
great improvement.

Thanks,
- Cleber.




Re: [PATCH] tests/avocado: Cancel BootLinux tests in case there is no free port

2022-03-10 Thread Cleber Rosa


Thomas Huth  writes:

> The BootLinux tests are currently failing with an ugly python
> stack trace on my RHEL8 system since they cannot get a free port
> (likely due to the firewall settings on my system). Let's properly
> check the return value of find_free_port() instead and cancel the
> test gracefully if it cannot get a free port.
>
> Signed-off-by: Thomas Huth 
> ---
>  Unfortunately, it still takes > 70 seconds for each and every
>  tests from tests/avocado/boot_linux.py to get canceled, so
>  tests/avocado/boot_linux.py still renders "make check-avocado"
>  for me pretty unusable... looking at the implementation of
>  find_free_port() in Avocado, I wonder whether there isn't a
>  better way to get a free port number in Python? Brute-forcing
>  all ports between 1024 and 65536 seems just quite cumbersome
>  to me...
>
>  tests/avocado/avocado_qemu/__init__.py | 2 ++
>  1 file changed, 2 insertions(+)
>

LGTM, despite  the root issue is being addressed in Avocado.

Reviewed-by: Cleber Rosa 




Re: [PATCH 5/9] tests/avocado/linux_ssh_mips_malta.py: add missing accel (tcg) tag

2022-03-03 Thread Cleber Rosa


Philippe Mathieu-Daudé  writes:

> On 25/2/22 22:01, Cleber Rosa wrote:
>> Being explicit about the accelerator used on these tests is a good
>> thing in itself, but it will also be used in the filtering rules
>> applied on "make check-avocado".
>> Signed-off-by: Cleber Rosa 
>> ---
>>   tests/avocado/linux_ssh_mips_malta.py | 3 +++
>>   1 file changed, 3 insertions(+)
>> diff --git a/tests/avocado/linux_ssh_mips_malta.py
>> b/tests/avocado/linux_ssh_mips_malta.py
>> index c0f0be5ade..0179d8a6ca 100644
>> --- a/tests/avocado/linux_ssh_mips_malta.py
>> +++ b/tests/avocado/linux_ssh_mips_malta.py
>> @@ -23,6 +23,9 @@
>>   @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
>
> Should we remove this line then? ^^^
>

Yes, we definitely should.  But, I thought it should be a next step.
The reason being that manual invocations of avocado or a custom list of
tests to "make check-avocado" will still respect that at this point.

What do you think?

Cheers,
- Cleber.




[PATCH 8/9] Avocado tests: classify tests based on what it's booted

2022-02-25 Thread Cleber Rosa
This adds some classification to the existing tests, based on the
mechanism (and a lot more loosely) on the content of the binary blob.

The proposal is to use the "boots" tag, and so far the following
values have been defined with the following meaning:

 - bios:   the "-bios" option is used to select the BIOS file to be
   loaded.  Because default bios are used in many QEMU runs,
   only tests that change the default are tagged.
 - kernel: means that the direct kernel boot mechanism (-kernel) is
   used.  Most of the time it means that a Linux kernel is
   booted, although there are occurrences of uboot usage.
 - initrd: means that an initial ram disk (-initrd) is used in
   addition to the kernel boot.
 - rootfs: means that a root filesystem is booted, in addition to a
   kernel and optionally an initrd.  This is usually done with
   a "-drive" command line option.
 - distro: means that a full blown distro image is booted, which may
   or may not include a kernel and initrd.  This is also
   usually done with a "-drive" command line option.

As with any other Avocado tags, it's possible to use them to select a
subset of tests.  For instance, if one wants to run tests that boots a
bios:

  $ avocado run -t boots:bios tests/avocado/

If one want to run tests that boots a kernel and an initrd:

  $ avocado run -t boots:kernel,boots:initrd tests/avocado/

It's possible, if deemed valuable, to further evolve this
classification into one with a clear separation between mechanism and
content.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/boot_linux.py   |  4 ++
 tests/avocado/boot_linux_console.py   | 54 +++
 tests/avocado/boot_xen.py |  3 ++
 tests/avocado/hotplug_cpu.py  |  1 +
 tests/avocado/intel_iommu.py  |  1 +
 tests/avocado/linux_initrd.py |  2 +
 tests/avocado/linux_ssh_mips_malta.py |  2 +
 tests/avocado/machine_arm_canona1100.py   |  1 +
 tests/avocado/machine_arm_integratorcp.py |  4 ++
 tests/avocado/machine_arm_n8x0.py |  2 +
 tests/avocado/machine_avr6.py |  1 +
 tests/avocado/machine_m68k_nextcube.py|  1 +
 tests/avocado/machine_microblaze.py   |  1 +
 tests/avocado/machine_mips_fuloong2e.py   |  1 +
 tests/avocado/machine_mips_loongson3v.py  |  1 +
 tests/avocado/machine_mips_malta.py   |  3 ++
 tests/avocado/machine_rx_gdbsim.py|  2 +
 tests/avocado/machine_s390_ccw_virtio.py  |  4 ++
 tests/avocado/machine_sparc64_sun4u.py|  1 +
 tests/avocado/machine_sparc_leon3.py  |  1 +
 tests/avocado/multiprocess.py |  4 ++
 tests/avocado/ppc_405.py  |  2 +
 tests/avocado/ppc_bamboo.py   |  2 +
 tests/avocado/ppc_mpc8544ds.py|  1 +
 tests/avocado/ppc_prep_40p.py |  1 +
 tests/avocado/ppc_pseries.py  |  1 +
 tests/avocado/ppc_virtex_ml507.py |  1 +
 tests/avocado/replay_kernel.py| 28 
 tests/avocado/replay_linux.py |  1 +
 tests/avocado/reverse_debugging.py|  1 +
 tests/avocado/smmu.py |  1 +
 tests/avocado/tcg_plugins.py  |  3 ++
 tests/avocado/virtio-gpu.py   |  2 +
 tests/avocado/virtiofs_submounts.py   |  1 +
 34 files changed, 139 insertions(+)

diff --git a/tests/avocado/boot_linux.py b/tests/avocado/boot_linux.py
index ab19146d1e..c4172f11e3 100644
--- a/tests/avocado/boot_linux.py
+++ b/tests/avocado/boot_linux.py
@@ -18,6 +18,7 @@
 class BootLinuxX8664(LinuxTest):
 """
 :avocado: tags=arch:x86_64
+:avocado: tags=boots:distro
 """
 
 def test_pc_i440fx_tcg(self):
@@ -62,6 +63,7 @@ class BootLinuxAarch64(LinuxTest):
 :avocado: tags=arch:aarch64
 :avocado: tags=machine:virt
 :avocado: tags=machine:gic-version=2
+:avocado: tags=boots:distro
 """
 
 def add_common_args(self):
@@ -110,6 +112,7 @@ def test_virt_kvm(self):
 class BootLinuxPPC64(LinuxTest):
 """
 :avocado: tags=arch:ppc64
+:avocado: tags=boots:distro
 """
 
 def test_pseries_tcg(self):
@@ -125,6 +128,7 @@ def test_pseries_tcg(self):
 class BootLinuxS390X(LinuxTest):
 """
 :avocado: tags=arch:s390x
+:avocado: tags=boots:distro
 """
 
 @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
diff --git a/tests/avocado/boot_linux_console.py 
b/tests/avocado/boot_linux_console.py
index 9c618d4809..0a8980953f 100644
--- a/tests/avocado/boot_linux_console.py
+++ b/tests/avocado/boot_linux_console.py
@@ -95,6 +95,7 @@ def test_x86_64_pc(self):
 """
 :avocado: tags=arch:x86_64
 :avocado: tags=machine:pc
+:avocado: tags=boots:kernel
 """
 kernel_url = ('http

[PATCH 9/9] Avocado tests: don't run tests with TCG that boot full blown distros

2022-02-25 Thread Cleber Rosa
Tests that use TCG and boot full blown distros, such as Fedora, will
take a good time to run.  This excludes those combinations by default
on invocations of "make check-avocado".

Tests that rely on KVM instead, will continue to run.

As a reminder, one can always supply a list of tests or tags to be
used on a "make check-avocado" by setting AVOCADO_TESTS or
AVOCADO_TAGS.

Signed-off-by: Cleber Rosa 
---
 tests/Makefile.include | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/tests/Makefile.include b/tests/Makefile.include
index 676aa0d944..6d9cf7cbc9 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -91,8 +91,11 @@ endif
 # Any number of command separated loggers are accepted.  For more
 # information please refer to "avocado --help".
 AVOCADO_SHOW=app
+comma:=,
 ifndef AVOCADO_TAGS
-   AVOCADO_CMDLINE_TAGS=$(patsubst %-softmmu,-t arch:%, \
+   AVOCADO_CMDLINE_TAGS=$(patsubst %-softmmu,-t 
arch:%$(comma)accel:tcg$(comma)boots:-distro, \
+$(filter %-softmmu,$(TARGETS)))
+   AVOCADO_CMDLINE_TAGS+=$(patsubst %-softmmu,-t arch:%$(comma)accel:kvm, \
 $(filter %-softmmu,$(TARGETS)))
 else
AVOCADO_CMDLINE_TAGS=$(addprefix -t , $(AVOCADO_TAGS))
-- 
2.35.1




[PATCH 4/9] Avocado: bump to version 95.0

2022-02-25 Thread Cleber Rosa
Even though there have been a number of improvements (and some pretty
deep internal changes) since Avocado 88.1, only one change should
affect "make check-avocado".

With the nrunner architecture, test execution happens in parallel by
default.  But, tests may fail due to insufficient timeouts or similar
reasons when run under systems with limited or shared resources.  To
avoid breakages, especially on CI, let's keep the serial execution
until proven that it won't impact the CI jobs.

Signed-off-by: Cleber Rosa 
---
 tests/Makefile.include | 1 +
 tests/requirements.txt | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/tests/Makefile.include b/tests/Makefile.include
index e7153c8e91..676aa0d944 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -135,6 +135,7 @@ check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images
 $(if $(AVOCADO_TAGS),, --filter-by-tags-include-empty \
--filter-by-tags-include-empty-key) \
 $(AVOCADO_CMDLINE_TAGS) \
+--nrunner-max-parallel-tasks=1 \
 $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \
 "AVOCADO", "tests/avocado")
 
diff --git a/tests/requirements.txt b/tests/requirements.txt
index a21b59b443..49aa0fd6f6 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -1,5 +1,5 @@
 # Add Python module requirements, one per line, to be installed
 # in the tests/venv Python virtual environment. For more info,
 # refer to: https://pip.pypa.io/en/stable/user_guide/#id1
-avocado-framework==88.1
+avocado-framework==95.0
 pycdlib==1.11.0
-- 
2.35.1




[PATCH 0/9] Avocado tests: filter out tests using TCG booting full blown distros

2022-02-25 Thread Cleber Rosa
It was previously reported[1] and discussed that tests booting full
blown distros and relying on TCG would take too much time to run,
especially in the environments given by GitLab CI's shared runners.

This is an implementation of a proposal to exclude those tests from
being run by default on `make check-avocado` invocations.  To make it
extra clear, all tests are still available, but those that are tagged
with "accel:tcg" and "boots:distro", are filtered out by default on
`make check-avocado`.

This is the situation of the Avocado GitLab CI jobs with and without
the changes in this PS:

 +--+--- +
 |Now[2]|   Before[3]|
++--++
|  Job   |  Length |  Tests |  Length  |  Tests  |
| Name   | (mm:ss) |   Run  |  (mm:ss) |   Run   |
++--+---+
|avocado-system-alpine   |  06:33  16   |   20:30   18   |
|avocado-system-debian   |  12:06  24   |   13:05   24   |
|avocado-system-centos   |  09:58  41   |   24:15   44   |
|avocado-system-fedora   |  08:50  35   |   08:59   35   |
|avocado-system-opensuse |  08:09  38   |   27:21   42   |
|avocado-system-ubuntu   |  06:52  16   |   18:52   18   |
|avocado-cfi-x86_64  |  05:43  27   |   15:07   29   |
++--++
|TOTALS  |  58:11 197   | 2:08:09  210   |
++--++

Assuming the jobs run in parallel, the overall wait time for all the
Avocado jobs to complete is now ~12 minutes.

[1] https://lists.gnu.org/archive/html/qemu-devel/2021-07/msg07271.html
[2] https://gitlab.com/cleber.gnu/qemu/-/pipelines/479720240
[3] https://gitlab.com/qemu-project/qemu/-/pipelines/478580581

Cleber Rosa (9):
  Avocado GitLab CI jobs: don't reset TARGETS and simplify commands
  Avocado tests: use logging namespace that is preserved in test logs
  Avocado migration test: adapt to "utils.network" API namespace change
  Avocado: bump to version 95.0
  tests/avocado/linux_ssh_mips_malta.py: add missing accel (tcg) tag
  tests/avocado/virtiofs_submounts.py: shared_dir may not exist
  Avocado tests: improve documentation on tag filtering
  Avocado tests: classify tests based on what it's booted
  Avocado tests: don't run tests with TCG that boot full blown distros

 .gitlab-ci.d/buildtest-template.yml   |  3 ++
 .gitlab-ci.d/buildtest.yml|  9 
 docs/devel/testing.rst| 22 +
 tests/Makefile.include|  6 ++-
 tests/avocado/avocado_qemu/__init__.py| 10 ++---
 tests/avocado/boot_linux.py   |  4 ++
 tests/avocado/boot_linux_console.py   | 54 +++
 tests/avocado/boot_xen.py |  3 ++
 tests/avocado/hotplug_cpu.py  |  1 +
 tests/avocado/intel_iommu.py  |  1 +
 tests/avocado/linux_initrd.py |  5 ++-
 tests/avocado/linux_ssh_mips_malta.py |  5 +++
 tests/avocado/machine_arm_canona1100.py   |  1 +
 tests/avocado/machine_arm_integratorcp.py |  7 ++-
 tests/avocado/machine_arm_n8x0.py |  2 +
 tests/avocado/machine_avr6.py |  1 +
 tests/avocado/machine_m68k_nextcube.py|  1 +
 tests/avocado/machine_microblaze.py   |  1 +
 tests/avocado/machine_mips_fuloong2e.py   |  1 +
 tests/avocado/machine_mips_loongson3v.py  |  1 +
 tests/avocado/machine_mips_malta.py   |  6 ++-
 tests/avocado/machine_rx_gdbsim.py|  2 +
 tests/avocado/machine_s390_ccw_virtio.py  |  4 ++
 tests/avocado/machine_sparc64_sun4u.py|  1 +
 tests/avocado/machine_sparc_leon3.py  |  1 +
 tests/avocado/migration.py|  4 +-
 tests/avocado/multiprocess.py |  4 ++
 tests/avocado/ppc_405.py  |  2 +
 tests/avocado/ppc_bamboo.py   |  2 +
 tests/avocado/ppc_mpc8544ds.py|  1 +
 tests/avocado/ppc_prep_40p.py |  1 +
 tests/avocado/ppc_pseries.py  |  1 +
 tests/avocado/ppc_virtex_ml507.py |  1 +
 tests/avocado/replay_kernel.py| 33 --
 tests/avocado/replay_linux.py |  6 +--
 tests/avocado/reverse_debugging.py|  6 +--
 tests/avocado/smmu.py |  1 +
 tests/avocado/tcg_plugins.py  |  3 ++
 tests/avocado/tesseract_utils.py  |  6 +--
 tests/avocado/virtio-gpu.py   |  2 +
 tests/avocado/virtio_check_params.py  |  3 +-
 tests/avocado/virtiofs_submounts.py   |  8 ++--
 tests/requirements.txt|  2 +-
 43 files changed, 197 insertions(+), 41 deletions(-)

-- 
2.35.1





[PATCH 6/9] tests/avocado/virtiofs_submounts.py: shared_dir may not exist

2022-02-25 Thread Cleber Rosa
If the test is skipped because of their conditionals, the shared_dir
attribute may not exist.

Check for its existence in the tearDown() method to avoid and
AttributeError.

Signed-off-by: Cleber Rosa 
---
 tests/avocado/virtiofs_submounts.py | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/tests/avocado/virtiofs_submounts.py 
b/tests/avocado/virtiofs_submounts.py
index e6dc32ffd4..d9c2c9d9ef 100644
--- a/tests/avocado/virtiofs_submounts.py
+++ b/tests/avocado/virtiofs_submounts.py
@@ -157,9 +157,10 @@ def tearDown(self):
 except:
 pass
 
-scratch_dir = os.path.join(self.shared_dir, 'scratch')
-self.run(('bash', self.get_data('cleanup.sh'), scratch_dir),
- ignore_error=True)
+if hasattr(self, 'shared_dir'):
+scratch_dir = os.path.join(self.shared_dir, 'scratch')
+self.run(('bash', self.get_data('cleanup.sh'), scratch_dir),
+ ignore_error=True)
 
 def test_pre_virtiofsd_set_up(self):
 self.set_up_shared_dir()
-- 
2.35.1




[PATCH 3/9] Avocado migration test: adapt to "utils.network" API namespace change

2022-02-25 Thread Cleber Rosa
Since Avocado 94.0[1], the "avocado.utils.network" dropped a lot of
previously deprecated API names, having the new names into a finer
grained structure.

This simply uses the new API names for the network port utility
module.

[1] - 
https://avocado-framework.readthedocs.io/en/latest/releases/94_0.html#utility-apis

Signed-off-by: Cleber Rosa 
---
 tests/avocado/avocado_qemu/__init__.py | 5 +++--
 tests/avocado/migration.py | 4 ++--
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/tests/avocado/avocado_qemu/__init__.py 
b/tests/avocado/avocado_qemu/__init__.py
index 88cec83e5c..3f15c8a222 100644
--- a/tests/avocado/avocado_qemu/__init__.py
+++ b/tests/avocado/avocado_qemu/__init__.py
@@ -17,7 +17,8 @@
 import uuid
 
 import avocado
-from avocado.utils import cloudinit, datadrainer, network, process, ssh, 
vmimage
+from avocado.utils import cloudinit, datadrainer, process, ssh, vmimage
+from avocado.utils.network import ports
 from avocado.utils.path import find_command
 
 #: The QEMU build root directory.  It may also be the source directory
@@ -601,7 +602,7 @@ def prepare_cloudinit(self, ssh_pubkey=None):
 self.log.info('Preparing cloudinit image')
 try:
 cloudinit_iso = os.path.join(self.workdir, 'cloudinit.iso')
-self.phone_home_port = network.find_free_port()
+self.phone_home_port = ports.find_free_port()
 pubkey_content = None
 if ssh_pubkey:
 with open(ssh_pubkey) as pubkey:
diff --git a/tests/avocado/migration.py b/tests/avocado/migration.py
index 584d6ef53f..4b25680c50 100644
--- a/tests/avocado/migration.py
+++ b/tests/avocado/migration.py
@@ -14,7 +14,7 @@
 from avocado_qemu import QemuSystemTest
 from avocado import skipUnless
 
-from avocado.utils import network
+from avocado.utils.network import ports
 from avocado.utils import wait
 from avocado.utils.path import find_command
 
@@ -57,7 +57,7 @@ def do_migrate(self, dest_uri, src_uri=None):
 self.assert_migration(source_vm, dest_vm)
 
 def _get_free_port(self):
-port = network.find_free_port()
+port = ports.find_free_port()
 if port is None:
 self.cancel('Failed to find a free port')
 return port
-- 
2.35.1




[PATCH 7/9] Avocado tests: improve documentation on tag filtering

2022-02-25 Thread Cleber Rosa
It's possible to filter based on a combination of criteria.  This adds
examples to the documentation.

Signed-off-by: Cleber Rosa 
---
 docs/devel/testing.rst | 22 ++
 1 file changed, 22 insertions(+)

diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst
index 92d40cdd19..f5b6e07b5c 100644
--- a/docs/devel/testing.rst
+++ b/docs/devel/testing.rst
@@ -936,6 +936,28 @@ in the current directory, tagged as "quick", run:
 
   avocado run -t quick .
 
+To run tests with a given value for a given tag, such as having the
+``accel`` tag set to ``kvm``, run:
+
+.. code::
+
+  avocado run -t accel:kvm .
+
+Multiple mandatory conditions can also be given.  To run only tests
+with ``arch`` set to ``x86_64`` and ``accell`` set to ``kvm``, run:
+
+.. code::
+
+  avocado run -t arch:x86_64,accel:kvm .
+
+It's also possible to exclude tests that contain a given value for a
+tag.  To list all tests that do *not* have ``arch`` set to ``x86_64``,
+run:
+
+.. code::
+
+  avocado run -t arch:-x86_64 .
+
 The ``avocado_qemu.Test`` base test class
 ^
 
-- 
2.35.1




[PATCH 5/9] tests/avocado/linux_ssh_mips_malta.py: add missing accel (tcg) tag

2022-02-25 Thread Cleber Rosa
Being explicit about the accelerator used on these tests is a good
thing in itself, but it will also be used in the filtering rules
applied on "make check-avocado".

Signed-off-by: Cleber Rosa 
---
 tests/avocado/linux_ssh_mips_malta.py | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/tests/avocado/linux_ssh_mips_malta.py 
b/tests/avocado/linux_ssh_mips_malta.py
index c0f0be5ade..0179d8a6ca 100644
--- a/tests/avocado/linux_ssh_mips_malta.py
+++ b/tests/avocado/linux_ssh_mips_malta.py
@@ -23,6 +23,9 @@
 @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout')
 @skipUnless(ssh.SSH_CLIENT_BINARY, 'No SSH client available')
 class LinuxSSH(QemuSystemTest, LinuxSSHMixIn):
+"""
+:avocado: tags=accel:tcg
+"""
 
 timeout = 150 # Not for 'configure --enable-debug --enable-debug-tcg'
 
-- 
2.35.1




[PATCH 2/9] Avocado tests: use logging namespace that is preserved in test logs

2022-02-25 Thread Cleber Rosa
Since Avocado 92.0[1], there's no universal preservation of logged
content via Python's "logging" APIs into the test log files.  This
changes were motivated by the fact that doing so is intrusive as it
touches on Python's root logger.

Test writers are now expected to use "avocado." as a namespace prefix
for everything that Avocado should collect/preserve, and other
prefixes for logged content that should be handled differently.

[1] - 
https://avocado-framework.readthedocs.io/en/94.0/releases/92_0.html#users-test-writers

Signed-off-by: Cleber Rosa 
---
 tests/avocado/avocado_qemu/__init__.py| 5 ++---
 tests/avocado/linux_initrd.py | 3 +--
 tests/avocado/machine_arm_integratorcp.py | 3 +--
 tests/avocado/machine_mips_malta.py   | 3 +--
 tests/avocado/replay_kernel.py| 5 ++---
 tests/avocado/replay_linux.py | 5 ++---
 tests/avocado/reverse_debugging.py| 5 ++---
 tests/avocado/tesseract_utils.py  | 6 +++---
 tests/avocado/virtio_check_params.py  | 3 +--
 9 files changed, 15 insertions(+), 23 deletions(-)

diff --git a/tests/avocado/avocado_qemu/__init__.py 
b/tests/avocado/avocado_qemu/__init__.py
index 75063c0c30..88cec83e5c 100644
--- a/tests/avocado/avocado_qemu/__init__.py
+++ b/tests/avocado/avocado_qemu/__init__.py
@@ -8,7 +8,6 @@
 # This work is licensed under the terms of the GNU GPL, version 2 or
 # later.  See the COPYING file in the top-level directory.
 
-import logging
 import os
 import shutil
 import subprocess
@@ -138,7 +137,7 @@ def _console_interaction(test, success_message, 
failure_message,
 if vm is None:
 vm = test.vm
 console = vm.console_socket.makefile(mode='rb', encoding='utf-8')
-console_logger = logging.getLogger('console')
+console_logger = test.log.getChild('console')
 while True:
 if send_string:
 vm.console_socket.sendall(send_string.encode())
@@ -370,7 +369,7 @@ class LinuxSSHMixIn:
 """Contains utility methods for interacting with a guest via SSH."""
 
 def ssh_connect(self, username, credential, credential_is_key=True):
-self.ssh_logger = logging.getLogger('ssh')
+self.ssh_logger = self.log.getChild('ssh')
 res = self.vm.command('human-monitor-command',
   command_line='info usernet')
 port = get_info_usernet_hostfwd_port(res)
diff --git a/tests/avocado/linux_initrd.py b/tests/avocado/linux_initrd.py
index ba02e5a563..6ebf299cd4 100644
--- a/tests/avocado/linux_initrd.py
+++ b/tests/avocado/linux_initrd.py
@@ -9,7 +9,6 @@
 # later.  See the COPYING file in the top-level directory.
 
 import os
-import logging
 import tempfile
 
 from avocado_qemu import QemuSystemTest
@@ -79,7 +78,7 @@ def test_with_2gib_file_should_work_with_linux_v4_16(self):
  '-m', '5120')
 self.vm.launch()
 console = self.vm.console_socket.makefile()
-console_logger = logging.getLogger('console')
+console_logger = self.log.getChild('console')
 while True:
 msg = console.readline()
 console_logger.debug(msg.strip())
diff --git a/tests/avocado/machine_arm_integratorcp.py 
b/tests/avocado/machine_arm_integratorcp.py
index 1ffe1073ef..697ee76f6c 100644
--- a/tests/avocado/machine_arm_integratorcp.py
+++ b/tests/avocado/machine_arm_integratorcp.py
@@ -9,7 +9,6 @@
 # later.  See the COPYING file in the top-level directory.
 
 import os
-import logging
 
 from avocado import skipUnless
 from avocado_qemu import QemuSystemTest
@@ -84,7 +83,7 @@ def test_framebuffer_tux_logo(self):
 self.vm.command('human-monitor-command', command_line='stop')
 self.vm.command('human-monitor-command',
 command_line='screendump %s' % screendump_path)
-logger = logging.getLogger('framebuffer')
+logger = self.log.getChild('framebuffer')
 
 cpu_count = 1
 match_threshold = 0.92
diff --git a/tests/avocado/machine_mips_malta.py 
b/tests/avocado/machine_mips_malta.py
index f1895d59f3..5f98ba1620 100644
--- a/tests/avocado/machine_mips_malta.py
+++ b/tests/avocado/machine_mips_malta.py
@@ -9,7 +9,6 @@
 
 import os
 import gzip
-import logging
 
 from avocado import skipUnless
 from avocado_qemu import QemuSystemTest
@@ -72,7 +71,7 @@ def do_test_i6400_framebuffer_logo(self, cpu_cores_count):
 self.vm.command('human-monitor-command', command_line='stop')
 self.vm.command('human-monitor-command',
 command_line='screendump %s' % screendump_path)
-logger = logging.getLogger('framebuffer')
+logger = self.log.getChild('framebuffer')
 
 match_threshold = 0.95
 screendump_bgr = cv2.imread(screendump_path, cv2.IMREAD_COLOR)
diff --git a/tests/avocado/replay_kernel.py b/tests/avocado/replay_kernel.py
index c68a953730..40f52b3913 100644
--- a/tests/avocado/rep

[PATCH 1/9] Avocado GitLab CI jobs: don't reset TARGETS and simplify commands

2022-02-25 Thread Cleber Rosa
The Avocado tests rely on the TARGETS variable, which is computed
based on the built targets.  The current set of commands on the
inherited scripts section will reset those, leaving TARGETS empty and
consequently the AVOCADO_CMDLINE_TAGS empty too.

This is causing the list of tests to have no filtering by tags, which
can be seen by the large number of CANCEL/SKIP statuses (because of
the lack of a matching qemu-system-$(ARCH) binary).

With this change, the TARGETS variable is properly computed, and so is
the AVOCADO_CMDLINE_TAGS.  This causes a reduction in the number of
tests attempted to be run on each job, and less noise on the test
results.

Signed-off-by: Cleber Rosa 
---
 .gitlab-ci.d/buildtest-template.yml | 3 +++
 .gitlab-ci.d/buildtest.yml  | 9 -
 2 files changed, 3 insertions(+), 9 deletions(-)

diff --git a/.gitlab-ci.d/buildtest-template.yml 
b/.gitlab-ci.d/buildtest-template.yml
index 2c7980a4f6..c038a0910f 100644
--- a/.gitlab-ci.d/buildtest-template.yml
+++ b/.gitlab-ci.d/buildtest-template.yml
@@ -64,6 +64,9 @@
 du -chs ${CI_PROJECT_DIR}/avocado-cache ;
   fi
 - export AVOCADO_ALLOW_UNTRUSTED_CODE=1
+  script:
+- cd build
+- make check-avocado
   after_script:
 - cd build
 - du -chs ${CI_PROJECT_DIR}/avocado-cache
diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml
index 0aa70213fb..d0bed9c382 100644
--- a/.gitlab-ci.d/buildtest.yml
+++ b/.gitlab-ci.d/buildtest.yml
@@ -33,7 +33,6 @@ avocado-system-alpine:
   artifacts: true
   variables:
 IMAGE: alpine
-MAKE_CHECK_ARGS: check-avocado
 
 build-system-ubuntu:
   extends: .native_build_job_template
@@ -66,7 +65,6 @@ avocado-system-ubuntu:
   artifacts: true
   variables:
 IMAGE: ubuntu2004
-MAKE_CHECK_ARGS: check-avocado
 
 build-system-debian:
   extends: .native_build_job_template
@@ -98,7 +96,6 @@ avocado-system-debian:
   artifacts: true
   variables:
 IMAGE: debian-amd64
-MAKE_CHECK_ARGS: check-avocado
 
 crash-test-debian:
   extends: .native_test_job_template
@@ -143,7 +140,6 @@ avocado-system-fedora:
   artifacts: true
   variables:
 IMAGE: fedora
-MAKE_CHECK_ARGS: check-avocado
 
 crash-test-fedora:
   extends: .native_test_job_template
@@ -189,7 +185,6 @@ avocado-system-centos:
   artifacts: true
   variables:
 IMAGE: centos8
-MAKE_CHECK_ARGS: check-avocado
 
 build-system-opensuse:
   extends: .native_build_job_template
@@ -221,7 +216,6 @@ avocado-system-opensuse:
   artifacts: true
   variables:
 IMAGE: opensuse-leap
-MAKE_CHECK_ARGS: check-avocado
 
 
 # This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
@@ -382,7 +376,6 @@ avocado-cfi-aarch64:
   artifacts: true
   variables:
 IMAGE: fedora
-MAKE_CHECK_ARGS: check-avocado
 
 build-cfi-ppc64-s390x:
   extends: .native_build_job_template
@@ -424,7 +417,6 @@ avocado-cfi-ppc64-s390x:
   artifacts: true
   variables:
 IMAGE: fedora
-MAKE_CHECK_ARGS: check-avocado
 
 build-cfi-x86_64:
   extends: .native_build_job_template
@@ -460,7 +452,6 @@ avocado-cfi-x86_64:
   artifacts: true
   variables:
 IMAGE: fedora
-MAKE_CHECK_ARGS: check-avocado
 
 tsan-build:
   extends: .native_build_job_template
-- 
2.35.1




Re: "make check-acceptance" takes way too long

2022-02-01 Thread Cleber Rosa
On Tue, Feb 1, 2022 at 1:06 PM Alex Bennée  wrote:
>
>
> Cleber Rosa  writes:
>
> > On Tue, Feb 1, 2022 at 11:20 AM Daniel P. Berrangé  
> > wrote:
> >>
> >> On Tue, Feb 01, 2022 at 11:01:43AM -0500, Cleber Rosa wrote:
> >> > On Tue, Feb 1, 2022 at 6:25 AM Alex Bennée  
> >> > wrote:
> >> > >
> >> > > We have up to now tried really hard as a project to avoid building and
> >> > > hosting our own binaries to avoid theoretical* GPL compliance issues.
> >> > > This is why we've ended up relying so much on distros to build and host
> >> > > binaries we can use. Most QEMU developers have their own personal zoo 
> >> > > of
> >> > > kernels and userspaces which they use for testing. I use custom kernels
> >> > > with a buildroot user space in initramfs for example. We even use the
> >> > > qemu advent calendar for a number of our avocado tests but we basically
> >> > > push responsibility for GPL compliance to the individual developers in
> >> > > that case.
> >> > >
> >> > > *theoretical in so far I suspect most people would be happy with a
> >> > > reference to an upstream repo/commit and .config even if that is not to
> >> > > the letter of the "offer of source code" required for true compliance.
> >> > >
> >> >
> >> > Yes, it'd be fine (great, really!) if a lightweight distro (or
> >> > kernels/initrd) were to
> >> > be maintained and identified as an "official" QEMU pick.  Putting the 
> >> > binaries
> >> > in the source tree though, brings all sorts of compliance issues.
> >>
> >> All that's really needed is to have the source + build recipes
> >> in a separate git repo. A pipeline can build them periodically
> >> and publish artifacts, which QEMU can then consume in its pipeline.
> >>
> >
> > I get your point, but then to acquire the artifacts one needs to:
> >
> > 1. depend on the CI system to deploy the artifacts in subsequent job
> > stages (a limitation IMO), OR
> > 2. if outside the CI, implement a download/cache mechanism for those
> > artifacts, which gets us back to the previous point, only with a
> > different distro/kernel+initrd.
> >
> > With that, the value proposal has to be in the characteristics of
> > distro/kernel+initrd itself. It has to have enough differentiation to
> > justify the development/maintenance work, as opposed to using existing
> > ones.
> >
> > FWIW, my non-scientific tests booting on my 3+ YO machine:
> >
> > * CirrOS x86_64+KVM: ~2 seconds
> > * CirroOS aarch64+TCG: ~20 seconds
> > * Fedora kernel+initrd aarch64+TCG
> > (tests/avocado/boot_linux_console.py:BootLinuxConsole.test_aarch64_virt):
> > ~1 second
> >
> > I would imagine that CirrOS aarch64+KVM on an adequate system would be
> > similar to the CirrOS x86_64+KVM.  We can develop/maintain a slimmer
> > distro, and/or set the default test workloads where they perform the
> > best.  The development cost of the latter is quite small.  I've added
> > a missing bit to the filtering capabilities in Avocado[1] and will
> > send a proposal to QEMU along these lines.
>
> FWIW the bit I'm interested in for the slow test in question here is
> that it does a full boot through the EDK2 bios (EL3->EL2->EL1). I'm not
> overly concerned about what gets run in userspace as long as something
> is run that shows EL0 can be executed and handle task switching. I
> suspect most of the userspace startup of a full distro basically just
> ends up testing the same code paths over and over again.
>

That's an interesting point.

Does that mean that ,if you are able to determine a condition that the
boot has progressed far enough, you would consider the test a success?
 I mean, that's what the "boot_linux_console.py" tests do: they find a
known pattern in the console, and do not care about what happens next.

The same could be done with the "full blown distro boot" tests
(boot_linux.py). They could be configurable to consider a "successful
boot"  anything, not just a "login prompt" or a "fully initialized and
cloud-init configured system".  We can reuse most of the same code,
and add configurable conditions for different test cases.

Does that make sense?

- Cleber.




Re: "make check-acceptance" takes way too long

2022-02-01 Thread Cleber Rosa
On Tue, Feb 1, 2022 at 12:01 PM Daniel P. Berrangé  wrote:
>
> On Tue, Feb 01, 2022 at 12:29:56AM -0500, Cleber Rosa wrote:
> >
> > Assuming this is about "Testing that QEMU can boot a full distro", I 
> > wouldn't
> > try to solve the problem by making the distro too slim to get to the
> > point of becoming
> > an unrealistic system.
>
> At a high level our with acceptance (integration) testing is of
> course to make sure that QEMU is correctly emulating a full virtual
> machine, such that we have confidence that it can run real world
> operating systems.
>
> There are a number of approaches to achieve that with varying
> tradeoffs.
>
>   - Testing with very specific tailored environments, running
> very specific userspace tools and minimal kernel setup.
>
> This can give us a pretty decent amount of coverage of
> the core features of the emulated environment in a tightly
> controlled amount of wallclock time. When it fails it ought
> to be relatively easy to understand and debug.
>
> The downside is that it is the QEMU code paths it hits are
> going to be fairly static.
>
>
>   - Testing with arbitrary execution of real world OS images.
>
> I think of this as a bit of scattergun approach. We're not
> trying to tightly control what runs, we actually want it
> to run alot of arbitrarily complex and unusual stuff.
>
> This is going to be time consuming and is likely to have
> higher false positive failure rates. It is worthwhile
> because it is going to find the edge cases that you simply
> won't detect any other way, because you can't even imagine
> the problems that you're trying to uncover until you uncover
> them by accident with a real OS workload.
>
> It is kinda like fuzzing QEMU with an entire OS :-)
>
>
> Both of these approaches are valid/complementary and we should
> want to have both.
>

Agreed.

> Any test suite is only going to find bugs though if it is
> actually executed.
>
> As a contributor though the former is stuff I'm likely to be
> willing to run myself before sending patches, while the latter
> is stuff I'm just always going to punt to merge testing infra.
>
> We want to be wary of leaving too much to be caught at time
> of merge tests, because that puts a significant burden on the
> person responsible for merging code in QEMU.  We need our
> contributors to be motivated to run as much testing as possible
> ahead of submitting patches.
>
> > IMO the deal breaker with regards to test time can be solved more cheaply by
> > having and using KVM where these tests will run, and not running them by
> > default otherwise.  With the tagging mechanism we should be able to set a
> > condition such as: "If using TCG, exclude tests that boot a full blown 
> > distro.
> > If using KVM, do not criticize what gets booted".  Resulting in something
> > like:
>
> > Does that sound like something appropriate?
>
> Depends whether you only care about KVM or not. From a POV of QEMU
> community CI, I think it is valid to want to test TCG functionality
>
>

Maybe I wasn't clear enough.  I am suggesting that tests using TCG do
not run by default (on a "make check-avocado") if, and only if, they
are booting a complete OS.  That would  bring the time to run "make
check-avocado" to a fifth of its current time.

And to be clear, there are a *lot* of tests running TCG, but they
happen to boot kernel+initrd by default, so we're not necessarily
abandoning TCG at all.

Also, we can have another target, or option as suggested by others in
this thread, where those lengthy TCG based full fistro boot tests get
to run.

> > BTW, on the topic of "Using something as a base OS for scripts (tests) to 
> > run
> > on it", another possibility for using full blown OS would be to save
> > their initialized
> > state, and load it to memory for each test, saving the guest boot time.  
> > This
> > should of course be done at the framework level and transparent to tests.
>
> There is *massive* virtue in simplicity & predictability for testing.
>
> Building more complex infrastructure to pre-initialize caches with
> clever techniques like saving running OS state is clever, but is
> certainly not simple or predictable. When that kind of stuff goes
> wrong, whoever gets to debug it is going to have a really bad day.
>
> This can be worth doing if there's no other viable approach to achieve
> the desired end goal. I don't think that's the case for our integration
> testing needs in QEMU though. There's masses of scope for us to explore
> testing with minimal tailored guest images/environments, before we need
> to resort to building more complex optimization strategies.
>

I'm aware and second that. Avocado-VT tests transitioned from a model
where VMs would, by default, be reused across tests, to a "start every
VM from scratch".  But, users can still opt-in to the "reuse VM model"
if they feel the tradeoff is valid.

Best regards!
- Cleber




Re: "make check-acceptance" takes way too long

2022-02-01 Thread Cleber Rosa
On Tue, Feb 1, 2022 at 11:20 AM Daniel P. Berrangé  wrote:
>
> On Tue, Feb 01, 2022 at 11:01:43AM -0500, Cleber Rosa wrote:
> > On Tue, Feb 1, 2022 at 6:25 AM Alex Bennée  wrote:
> > >
> > > We have up to now tried really hard as a project to avoid building and
> > > hosting our own binaries to avoid theoretical* GPL compliance issues.
> > > This is why we've ended up relying so much on distros to build and host
> > > binaries we can use. Most QEMU developers have their own personal zoo of
> > > kernels and userspaces which they use for testing. I use custom kernels
> > > with a buildroot user space in initramfs for example. We even use the
> > > qemu advent calendar for a number of our avocado tests but we basically
> > > push responsibility for GPL compliance to the individual developers in
> > > that case.
> > >
> > > *theoretical in so far I suspect most people would be happy with a
> > > reference to an upstream repo/commit and .config even if that is not to
> > > the letter of the "offer of source code" required for true compliance.
> > >
> >
> > Yes, it'd be fine (great, really!) if a lightweight distro (or
> > kernels/initrd) were to
> > be maintained and identified as an "official" QEMU pick.  Putting the 
> > binaries
> > in the source tree though, brings all sorts of compliance issues.
>
> All that's really needed is to have the source + build recipes
> in a separate git repo. A pipeline can build them periodically
> and publish artifacts, which QEMU can then consume in its pipeline.
>

I get your point, but then to acquire the artifacts one needs to:

1. depend on the CI system to deploy the artifacts in subsequent job
stages (a limitation IMO), OR
2. if outside the CI, implement a download/cache mechanism for those
artifacts, which gets us back to the previous point, only with a
different distro/kernel+initrd.

With that, the value proposal has to be in the characteristics of
distro/kernel+initrd itself. It has to have enough differentiation to
justify the development/maintenance work, as opposed to using existing
ones.

FWIW, my non-scientific tests booting on my 3+ YO machine:

* CirrOS x86_64+KVM: ~2 seconds
* CirroOS aarch64+TCG: ~20 seconds
* Fedora kernel+initrd aarch64+TCG
(tests/avocado/boot_linux_console.py:BootLinuxConsole.test_aarch64_virt):
~1 second

I would imagine that CirrOS aarch64+KVM on an adequate system would be
similar to the CirrOS x86_64+KVM.  We can develop/maintain a slimmer
distro, and/or set the default test workloads where they perform the
best.  The development cost of the latter is quite small.  I've added
a missing bit to the filtering capabilities in Avocado[1] and will
send a proposal to QEMU along these lines.

Regards,
- Cleber.

[1] https://github.com/avocado-framework/avocado/pull/5245




Re: "make check-acceptance" takes way too long

2022-02-01 Thread Cleber Rosa
On Tue, Feb 1, 2022 at 6:07 AM Kashyap Chamarthy  wrote:
>
> On Tue, Jan 25, 2022 at 10:20:11AM +0100, Gerd Hoffmann wrote:
> >   Hi,
> >
> > > IMHO the ideal scenario would be for us to have a kernel, initrd
> > > containing just busybox tools for the key arch targets we care
> > > about. Those could be used with direct kernel boot or stuffed
> > > into a disk iamge. Either way, they would boot in ~1 second,
> > > even with TCG, and would be able to execute simple shell scripts
> > > to test a decent amount of QEMU functionality.
> >
> > I have some test images based on buildroot which are essentially that.
> > https://gitlab.com/kraxel/br-kraxel/
> >
> > Still a significant download, but much smaller than a full fedora or
> > ubuntu cloud image and it boots much faster too.  Not down to only one
> > second though.
>
> Any objection to using CirrOS[1] images for boot-testing?   FWIW,
> OpenStack upstream CI boots thousands of guests each day with these for
> many years now.  It boots quick, and also satisfies one of Peter's
> other requirements: AArch64 images.
>

Even though I strongly support CirrOS (see my reply to Dan), I strongly object
using it as the only OS on "boot tests" (that is, testing that QEMU can fully
boot a system).  The reason is because actual functional coverage is reduced
and detached from most real world scenarios (I'm not aware of CirrOS, Alpine
and similar distros being used significantly on real world workloads).

This is the reasoning behind tests such as
"tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_q35_kvm" which takes ~12
seconds to run on my 4 years old laptop.

Depending on what one considers a system to be booted, the existing approach
on "tests/avocado/boot_linux_console.py:BootLinuxConsole.test_x86_64_pc" of
booting only a kernel / initrd is also valid.  That takes around 0.4
seconds with KVM
and ~2 seconds to run with TCG on my system.

> A downside of CirrOS is it doesn't have a package manager, so installing
> custom packages is a PITA.  The main use-case of CirrOS images
> is any kind of boot-testing only.
>
> To make the booting even quicker with CirrOS, do disable the "metadata
> service lookup" (this is queried 20 times) at boot time.  It can be
> trivially done by making this change in this file
> /etc/cirros-init/config (in the disk image):
>
> - DATASOURCE_LIST="nocloud configdrive ec2"
> + DATASOURCE_LIST="nocloud"
>

That's a good tip!

If CirrOS had better support for "nocloud"[1], the existing boot tests could
transparently use it.  For instance, you can currently do this:

$ ./tests/venv/bin/avocado vmimage get --distro=ubuntu --distro-version=20.04
The image was downloaded:
Provider Version Architecture File
ubuntu   20.04   amd64
/home/cleber/avocado/data/cache/by_location/ca6ab0fdb5d175bbf3dfc3d070511559f6eab449/ubuntu-20.04-server-cloudimg-amd64.img

$ ./tests/venv/bin/avocado run -p distro=ubuntu -p
distro_version=20.04
tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_q35_kvm

The "-p distro=cirros" works, but only up to the downloading/preparing
the image.
The lack of proper support for cloud-init/nocloud then breaks it. I
would be a bit
reluctant of adding another family of tests or a third way of dealing
with guests
because they implement a custom behavior for something that is supposed
to be so standard at this point (cloud-init / nocloud).

Regards,
- Cleber.

[1] https://github.com/cirros-dev/cirros/issues/67




Re: "make check-acceptance" takes way too long

2022-02-01 Thread Cleber Rosa
On Tue, Feb 1, 2022 at 6:25 AM Alex Bennée  wrote:
>
> We have up to now tried really hard as a project to avoid building and
> hosting our own binaries to avoid theoretical* GPL compliance issues.
> This is why we've ended up relying so much on distros to build and host
> binaries we can use. Most QEMU developers have their own personal zoo of
> kernels and userspaces which they use for testing. I use custom kernels
> with a buildroot user space in initramfs for example. We even use the
> qemu advent calendar for a number of our avocado tests but we basically
> push responsibility for GPL compliance to the individual developers in
> that case.
>
> *theoretical in so far I suspect most people would be happy with a
> reference to an upstream repo/commit and .config even if that is not to
> the letter of the "offer of source code" required for true compliance.
>

Yes, it'd be fine (great, really!) if a lightweight distro (or
kernels/initrd) were to
be maintained and identified as an "official" QEMU pick.  Putting the binaries
in the source tree though, brings all sorts of compliance issues.

The downloading of the images at test "setup time" is still a better approach,
given that tests will simply skip if the download is not possible.

- Cleber.




Re: "make check-acceptance" takes way too long

2022-01-31 Thread Cleber Rosa
On Fri, Jan 21, 2022 at 10:22 AM Daniel P. Berrangé  wrote:
>
> On Fri, Jan 21, 2022 at 12:23:23PM +, Alex Bennée wrote:
> >
> > Peter Maydell  writes:
> >
> > > On Fri, 21 Jan 2022 at 10:50, Markus Armbruster  wrote:
> > >> No objection, but it's no replacement for looking into why these tests
> > >> are so slow.
> > >>
> > >> The #1 reason for things being slow is not giving a damn :)
> > >
> > > See previous messages in the thread -- the test starts a
> > > full-fat guest OS including UEFI boot, and it takes forever to
> > > get to the login prompt because systemd is starting everything
> > > including the kitchen sink.
> >
> > There has to be a half-way house between booting a kernel until it fails
> > to find a rootfs and running a full Ubuntu distro. Maybe just asking
> > systemd to reach "rescue.target" would be enough to show the disks are
> > up and userspace works.
>
> Booting up full OS distros is useful, but at the same time I feel it
> is too much as something to expect developers to do on any kind of
> regular basis.
>

Agreed.  The solution IMO can be as simple as having different "test
job profiles".

> Ideally some decent amount of acceptance testing could be a standard
> part of the 'make check', but that's impossible as long as we're
> downloading large disk images or booting things that are very slow,
> especially so with TCG.
>
> IMHO the ideal scenario would be for us to have a kernel, initrd
> containing just busybox tools for the key arch targets we care
> about. Those could be used with direct kernel boot or stuffed
> into a disk iamge. Either way, they would boot in ~1 second,
> even with TCG, and would be able to execute simple shell scripts
> to test a decent amount of QEMU functionality.
>

I see different use cases here:

A) Testing that QEMU can boot a full distro

For testing purposes, the more different subsystems the "boot" process
depends on, the better.  Currently the "boot_linux.py" tests require the entire
guest boot to complete and have a networking configuration and interaction.

B) Using something as a base OS for scripts (tests) to run on it

Here's where there's the most benefit in having a more lightweight distro
(or kernel + initrd).  But, this requirement will also come in
different "optimal"
sizes for different people.  Some of the existing tests require not
only a Fedora
system, but a given version that has given capabilities.

For a sustainable, framework-like solution, tests should be able to determine
the guest they need with minimal setup from test writers[1].  If a Fedora-like
system is not needed, maybe a lightweight system like CirrOS[2] is enough.
CirrOS, unfortunately, can not be used Today as the distro in most of the
acceptance tests because the cloud-init mechanism used to configure the
networking is not currently supported, although there have been discussions
to consider implementing it[3].

> It wouldn't eliminate the need to test with full OS, but it
> would let us have some acceptance testing run as standard with
> 'make check' in a decently fast time.  It would then be less
> critical if the more thorough full OS tests were somewhat
> slower than we'd like. We could just leave those as a scheduled
> job to run overnight post-merge. If they do detect any problems
> post-merge, then write a dedicated test scenario to replicate it
> under the minimal kernel/initrd acceptance test so it'll be
> caught pre-merge in future.
>

Assuming this is about "Testing that QEMU can boot a full distro", I wouldn't
try to solve the problem by making the distro too slim to get to the
point of becoming
an unrealistic system.

IMO the deal breaker with regards to test time can be solved more cheaply by
having and using KVM where these tests will run, and not running them by
default otherwise.  With the tagging mechanism we should be able to set a
condition such as: "If using TCG, exclude tests that boot a full blown distro.
If using KVM, do not criticize what gets booted".  Resulting in something
like:

$ avocado list -t accel:tcg,boots:-distro -t accel:kvm
~/src/qemu/tests/avocado/{boot_linux.py,boot_linux_console.py}
avocado-instrumented
/home/cleber/src/qemu/tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_i440fx_kvm
avocado-instrumented
/home/cleber/src/qemu/tests/avocado/boot_linux.py:BootLinuxX8664.test_pc_q35_kvm
avocado-instrumented
/home/cleber/src/qemu/tests/avocado/boot_linux.py:BootLinuxAarch64.test_virt_kvm
avocado-instrumented
/home/cleber/src/qemu/tests/avocado/boot_linux_console.py:BootLinuxConsole.test_aarch64_virt
avocado-instrumented
/home/cleber/src/qemu/tests/avocado/boot_linux_console.py:BootLinuxConsole.test_aarch64_xlnx_versal_virt
avocado-instrumented
/home/cleber/src/qemu/tests/avocado/boot_linux_console.py:BootLinuxConsole.test_arm_virt
avocado-instrumented
/home/cleber/src/qemu/tests/avocado/boot_linux_console.py:BootLinuxConsole.test_arm_emcraft_sf2
avocado-instrumented

Re: [PATCH] python: pin setuptools below v60.0.0

2022-01-21 Thread Cleber Rosa


John Snow  writes:

> setuptools is a package that replaces the python stdlib 'distutils'. It
> is generally installed by all venv-creating tools "by default". It isn't
> actually needed at runtime for the qemu package, so our own setup.cfg
> does not mention it as a dependency.
>
> However, tox will create virtual environments that include it, and will
> upgrade it to the very latest version. the 'venv' tool will also include
> whichever version your host system happens to have.
>
> Unfortunately, setuptools version 60.0.0 and above include a hack to
> forcibly overwrite python's built-in distutils. The pylint tool that we
> use to run code analysis checks on this package relies on distutils and
> suffers regressions when setuptools >= 60.0.0 is present at all, see
> https://github.com/PyCQA/pylint/issues/5704
>
> Instruct tox and the 'check-dev' targets to avoid setuptools packages
> that are too new, for now. Pipenv is unaffected, because setuptools 60
> does not offer Python 3.6 support, and our pipenv config is pinned
> against Python 3.6.
>
> Signed-off-by: John Snow 
> ---
>  python/Makefile  | 2 ++
>  python/setup.cfg | 1 +
>  2 files changed, 3 insertions(+)
>

Reviewed-by: Cleber Rosa 
Tested-by: Cleber Rosa 




Give the fosshost.org VM a purpose or a retirement

2021-11-24 Thread Cleber Rosa
Hi,

Fosshost.org was kind enough to supply the QEMU project with a public
VM hosted by them.  The original use case we anticipated was to set up
a GitLab CI runner, because we assumed the VM was KVM capable, but
that turned out not to be the case.

So, at this point, adding it as a GitLab CI runner would not add any
significant improvement over the shared runners already provided, and
it would require more maintenance effort.

If there are any ideas for making use of this resource, and volunteers
to configure and maintain it, please let me know.

Otherwise, it seems fair to relinquish the resource back to Fosshost.org.

This is also related to: https://gitlab.com/qemu-project/qemu-web/-/issues/2

Thanks,
- Cleber.




[PATCH v2 1/1] Jobs based on custom runners: add CentOS Stream 8

2021-11-11 Thread Cleber Rosa
This introduces three different parts of a job designed to run
on a custom runner managed by Red Hat.  The goals include:

  a) propose a model for other organizations that want to onboard
 their own runners, with their specific platforms, build
 configuration and tests.

  b) bring awareness to the differences between upstream QEMU and the
 version available under CentOS Stream, which is "A preview of
 upcoming Red Hat Enterprise Linux minor and major releases".

  c) because of b), it should be easier to identify and reduce the gap
 between Red Hat's downstream and upstream QEMU.

The components of this custom job are:

  I) OS build environment setup code:

 - additions to the existing "build-environment.yml" playbook
   that can be used to set up CentOS/EL 8 systems.

 - a CentOS Stream 8 specific "build-environment.yml" playbook
   that adds to the generic one.

 II) QEMU build configuration: a script that will produce binaries with
 features as similar as possible to the ones built and packaged on
 CentOS stream 8.

III) Scripts that define the minimum amount of testing that the
 binaries built with the given configuration (point II) under the
 given OS build environment (point I) should be subjected to.

 IV) Job definition: GitLab CI jobs that will dispatch the build/test
 jobs (see points #II and #III) to the machine specifically
 configured according to #I.

Signed-off-by: Cleber Rosa 
---
 .gitlab-ci.d/custom-runners.yml   |  29 +++
 docs/devel/ci-jobs.rst.inc|   7 +
 .../org.centos/stream/8/build-environment.yml |  51 +
 .../ci/org.centos/stream/8/x86_64/configure   | 208 ++
 .../org.centos/stream/8/x86_64/test-avocado   |  70 ++
 scripts/ci/org.centos/stream/README   |  17 ++
 scripts/ci/setup/build-environment.yml|  38 
 7 files changed, 420 insertions(+)
 create mode 100644 scripts/ci/org.centos/stream/8/build-environment.yml
 create mode 100755 scripts/ci/org.centos/stream/8/x86_64/configure
 create mode 100755 scripts/ci/org.centos/stream/8/x86_64/test-avocado
 create mode 100644 scripts/ci/org.centos/stream/README

diff --git a/.gitlab-ci.d/custom-runners.yml b/.gitlab-ci.d/custom-runners.yml
index a89a20da48..1f56297dfa 100644
--- a/.gitlab-ci.d/custom-runners.yml
+++ b/.gitlab-ci.d/custom-runners.yml
@@ -248,3 +248,32 @@ ubuntu-20.04-aarch64-notcg:
  - ../configure --disable-libssh --disable-tcg
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc` check V=1
+
+centos-stream-8-x86_64:
+ allow_failure: true
+ needs: []
+ stage: build
+ tags:
+ - centos_stream_8
+ - x86_64
+ rules:
+ - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ 
/^staging/'
+ - if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE"
+ artifacts:
+   name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
+   when: on_failure
+   expire_in: 7 days
+   paths:
+ - build/tests/results/latest/results.xml
+ - build/tests/results/latest/test-results
+   reports:
+ junit: build/tests/results/latest/results.xml
+ before_script:
+ - JOBS=$(expr $(nproc) + 1)
+ script:
+ - mkdir build
+ - cd build
+ - ../scripts/ci/org.centos/stream/8/x86_64/configure
+ - make -j"$JOBS"
+ - make NINJA=":" check
+ - ../scripts/ci/org.centos/stream/8/x86_64/test-avocado
diff --git a/docs/devel/ci-jobs.rst.inc b/docs/devel/ci-jobs.rst.inc
index 277975e4ad..db3f571d5f 100644
--- a/docs/devel/ci-jobs.rst.inc
+++ b/docs/devel/ci-jobs.rst.inc
@@ -49,3 +49,10 @@ S390X_RUNNER_AVAILABLE
 If you've got access to an IBM Z host that can be used as a gitlab-CI
 runner, you can set this variable to enable the tests that require this
 kind of host. The runner should be tagged with "s390x".
+
+CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE
+~~~
+If you've got access to a CentOS Stream 8 x86_64 host that can be
+used as a gitlab-CI runner, you can set this variable to enable the
+tests that require this kind of host. The runner should be tagged with
+both "centos_stream_8" and "x86_64".
diff --git a/scripts/ci/org.centos/stream/8/build-environment.yml 
b/scripts/ci/org.centos/stream/8/build-environment.yml
new file mode 100644
index 00..42b0471634
--- /dev/null
+++ b/scripts/ci/org.centos/stream/8/build-environment.yml
@@ -0,0 +1,51 @@
+---
+- name: Installation of extra packages to build QEMU
+  hosts: all
+  tasks:
+- name: Extra check for CentOS Stream 8
+  lineinfile:
+path: /etc/redhat-release
+line: CentOS Stream release 8
+state: present
+  check_mode: yes
+  register: centos_stream_8
+
+- name: Enable PowerTools repo on CentOS Stream 8
+  ini_file:
+path: /etc/yum.repos.d/CentOS-Stream-PowerTools.repo
+section: powertools
+option: enabled
+value: "1"

[PATCH v2 0/1] Jobs based on custom runners: add CentOS Stream 8

2021-11-11 Thread Cleber Rosa
This adds a new custom runner, showing an example of how other
entities can add their own custom jobs to the GitLab CI pipeline.

The runner (the machine and job) is to be managed by Red Hat, and
adds, at the very least, bare metal x86_64 KVM testing capabilities to
the QEMU pipeline.  This brings extra coverage for some unittests, and
the ability to run the Avocado tests that depend on KVM.

The runner is already completely set up and registered to the
https://gitlab.com/qemu-project/qemu project instance.  Jobs will be
triggered according to the same rules for the jobs s390x and aarch64
jobs running on QEMU project's custom runners, that is, pushes to the
staging branch of the "qemu-project" project, or by setting a specific
variable.

Still, the job is set with mode "allow failures", so it should not
disrupt the existing pipeline.  Once its reliability is proved (rules
and service levels are to be determined), it can be "upgraded" to
a "gating" condition.

Even though the formal method of tracking machine/job maintainers have
not been formalized, it should be known that the contacts/admins for
this machine and job are:

 - Willian Rampazzo
   
   willianr on #qemu

 - Cleber Rosa
   
   clebergnu on #qemu

One example of a job introduced here, running on the host reserved for
this purpose can be seen at:

 - https://gitlab.com/cleber.gnu/qemu/-/jobs/1773761640

Changes from v1[1]:

 * Replaced "--disable-fdt" for "--enable-fdt", given that according
   to "TARGET_NEED_FDT=y" in "configs/targets/x86_64-softmmu.mak" it
   is required for x86_64-softmmu.

 * Added libfdt-devel to list of package requirements (see previous
   point for reasoning).

 * Removed patch 1 that contained a duplicate bug fix.

 * Removed patches 2 and 3 that implemented a "feature probe" and
   "feature requirement" that would cancel tests if features were not
   present.  That will be treated in a different patch series.

 * Removed --disable-jemalloc and --disabletcmalloc according to
   3b4da1329.

 * Introduced "test-avocado" script with a list of vetted tests

 * Do not install meson from CentOS Stream 8 PowerTools repo, instead
   meson from git submodule due to minimum version requirements.

 * Sync with commit f68d21ab8eac56c4097a3d63a8c86689bb507911 (HEAD of
   c8s-stream-rhel branch) from CentOS repo at
   https://git.centos.org/rpms/qemu-kvm/.

 * Further separated distribution version and architecture specific
   files into separate sub directories.

 * Added a gitlab CI rule and variable to allow other repos/users who
   have a CentOS Stream 8 x86_64 runner to trigger the job.

[1] https://lists.gnu.org/archive/html/qemu-devel/2021-06/msg02066.html

Cleber Rosa (1):
  Jobs based on custom runners: add CentOS Stream 8

 .gitlab-ci.d/custom-runners.yml   |  29 +++
 docs/devel/ci-jobs.rst.inc|   7 +
 .../org.centos/stream/8/build-environment.yml |  51 +
 .../ci/org.centos/stream/8/x86_64/configure   | 208 ++
 .../org.centos/stream/8/x86_64/test-avocado   |  70 ++
 scripts/ci/org.centos/stream/README   |  17 ++
 scripts/ci/setup/build-environment.yml|  38 
 7 files changed, 420 insertions(+)
 create mode 100644 scripts/ci/org.centos/stream/8/build-environment.yml
 create mode 100755 scripts/ci/org.centos/stream/8/x86_64/configure
 create mode 100755 scripts/ci/org.centos/stream/8/x86_64/test-avocado
 create mode 100644 scripts/ci/org.centos/stream/README

-- 
2.33.1





[PATCH 14/16] tests/acceptance/ppc_prep_40p.py: NetBSD 7.1.2 location update

2021-09-24 Thread Cleber Rosa
The NetBSD-7.1.2-prep.iso is no longer available on the CDN, but it's
still available in the archive.

Let's update its location so that users without the file on cache can
still fetch it and run the test.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/ppc_prep_40p.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tests/acceptance/ppc_prep_40p.py b/tests/acceptance/ppc_prep_40p.py
index 2993ee3b07..6b28a69ea5 100644
--- a/tests/acceptance/ppc_prep_40p.py
+++ b/tests/acceptance/ppc_prep_40p.py
@@ -67,8 +67,8 @@ def test_openbios_and_netbsd(self):
 :avocado: tags=machine:40p
 :avocado: tags=os:netbsd
 """
-drive_url = ('https://cdn.netbsd.org/pub/NetBSD/iso/7.1.2/'
- 'NetBSD-7.1.2-prep.iso')
+drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/'
+ 'NetBSD-7.1.2/iso/NetBSD-7.1.2-prep.iso')
 drive_hash = 'ac6fa2707d888b36d6fa64de6e7fe48e'
 drive_path = self.fetch_asset(drive_url, asset_hash=drive_hash,
   algorithm='md5')
-- 
2.31.1




[PATCH 16/16] tests/acceptance/ppc_prep_40p.py: unify tags

2021-09-24 Thread Cleber Rosa
The arch and machine tags apply to all tests, so let's define them
only once.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/ppc_prep_40p.py | 12 
 1 file changed, 4 insertions(+), 8 deletions(-)

diff --git a/tests/acceptance/ppc_prep_40p.py b/tests/acceptance/ppc_prep_40p.py
index 5e61e686bd..d1e5674673 100644
--- a/tests/acceptance/ppc_prep_40p.py
+++ b/tests/acceptance/ppc_prep_40p.py
@@ -13,6 +13,10 @@
 
 
 class IbmPrep40pMachine(Test):
+"""
+:avocado: tags=arch:ppc
+:avocado: tags=machine:40p
+"""
 
 timeout = 60
 
@@ -24,8 +28,6 @@ class IbmPrep40pMachine(Test):
 @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
 def test_factory_firmware_and_netbsd(self):
 """
-:avocado: tags=arch:ppc
-:avocado: tags=machine:40p
 :avocado: tags=os:netbsd
 :avocado: tags=slowness:high
 """
@@ -48,10 +50,6 @@ def test_factory_firmware_and_netbsd(self):
 wait_for_console_pattern(self, 'Model: IBM PPS Model 6015')
 
 def test_openbios_192m(self):
-"""
-:avocado: tags=arch:ppc
-:avocado: tags=machine:40p
-"""
 self.vm.set_console()
 self.vm.add_args('-m', '192') # test fw_cfg
 
@@ -62,8 +60,6 @@ def test_openbios_192m(self):
 
 def test_openbios_and_netbsd(self):
 """
-:avocado: tags=arch:ppc
-:avocado: tags=machine:40p
 :avocado: tags=os:netbsd
 """
 drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/'
-- 
2.31.1




[PATCH 13/16] tests/acceptance/boot_xen.py: use class attribute

2021-09-24 Thread Cleber Rosa
Rather than defining a single use variable, let's just use the class
attribute directly.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/boot_xen.py | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/tests/acceptance/boot_xen.py b/tests/acceptance/boot_xen.py
index 66621fd14d..8eec465414 100644
--- a/tests/acceptance/boot_xen.py
+++ b/tests/acceptance/boot_xen.py
@@ -49,11 +49,10 @@ def launch_xen(self, xen_path):
 
 self.vm.set_console()
 
-xen_command_line = self.XEN_COMMON_COMMAND_LINE
 self.vm.add_args('-machine', 'virtualization=on',
  '-m', '768',
  '-kernel', xen_path,
- '-append', xen_command_line,
+ '-append', self.XEN_COMMON_COMMAND_LINE,
  '-device',
  
'guest-loader,addr=0x4700,kernel=%s,bootargs=console=hvc0'
  % (self.kernel_path))
-- 
2.31.1




[PATCH 04/16] Acceptance Tests: keep track and disable tests with known issues

2021-09-24 Thread Cleber Rosa
This introduces a convention, under which tests with known open
issues, will be automatically excluded from execution via "make
check-acceptance".  It should by itself improve the reliability
of test jobs, even those with rare failures.

Those tests can still be run with:

  ./tests/venv/bin/avocado run $TEST_ID

With the test identification are obtainable via:

  ./tests/venv/bin/avocado list tests/acceptance

And for tests with known issues:

  ./tests/venv/bin/avocado list -t issue tests/acceptance

A list of tests that are enabled (that is, match the currently
configured targets, and do not have known issues) can be obtained
with:

  make list-acceptance

As a future improvement, a tool can be added to sync the "issue" tags
with issues that end up being resolved closed.

Signed-off-by: Cleber Rosa 
---
 docs/devel/testing.rst | 28 ++
 tests/Makefile.include |  3 ++-
 tests/acceptance/boot_linux_console.py |  3 +++
 tests/acceptance/machine_rx_gdbsim.py  |  3 +++
 4 files changed, 36 insertions(+), 1 deletion(-)

diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst
index 2818c58ff8..f93a9bed3f 100644
--- a/docs/devel/testing.rst
+++ b/docs/devel/testing.rst
@@ -1147,6 +1147,34 @@ variable as shown on the code snippet below to skip the 
test:
   def test(self):
   do_something()
 
+Disabling tests
+---
+
+If you want to have a test temporarily disabled (from ``make
+check-acceptance``), you can add an Avocado ``issue`` tag to the test.  
Example::
+
+  class Feature(Test):
+
+  def test(self):
+"""
+:avocado: tags=issue
+"""
+do_something()
+
+Rather than simply adding an ``issue`` tag, it may be helpful to point
+to an actual issue on the project's issue tracker::
+
+  class Feature(Test):
+
+  def test(self):
+"""
+Reason for disabling this test is documented and tracked at
+https://gitlab.com/qemu-project/qemu/-/issues/
+
+:avocado: tags=issue:
+"""
+do_something()
+
 Uninstalling Avocado
 
 
diff --git a/tests/Makefile.include b/tests/Makefile.include
index d1f90572a7..eb3e5d2aa6 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -94,7 +94,8 @@ TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results
 # information please refer to "avocado --help".
 AVOCADO_SHOW=app
 AVOCADO_RUNNER=runner
-AVOCADO_TAGS=$(patsubst %-softmmu,-t arch:%, $(filter %-softmmu,$(TARGETS)))
+comma := ,
+AVOCADO_TAGS=$(patsubst %-softmmu,--filter-by-tags=-issue$(comma)arch:%, 
$(filter %-softmmu,$(TARGETS)))
 
 $(TESTS_VENV_DIR): $(TESTS_VENV_REQ)
$(call quiet-command, \
diff --git a/tests/acceptance/boot_linux_console.py 
b/tests/acceptance/boot_linux_console.py
index 0a49c0e276..4c824bc161 100644
--- a/tests/acceptance/boot_linux_console.py
+++ b/tests/acceptance/boot_linux_console.py
@@ -485,6 +485,9 @@ def test_arm_raspi2_initrd(self):
 """
 :avocado: tags=arch:arm
 :avocado: tags=machine:raspi2b
+
+# https://gitlab.com/qemu-project/qemu/-/issues/636
+:avocado: tags=issue:636
 """
 deb_url = ('http://archive.raspberrypi.org/debian/'
'pool/main/r/raspberrypi-firmware/'
diff --git a/tests/acceptance/machine_rx_gdbsim.py 
b/tests/acceptance/machine_rx_gdbsim.py
index 32b737b6d8..62893a9989 100644
--- a/tests/acceptance/machine_rx_gdbsim.py
+++ b/tests/acceptance/machine_rx_gdbsim.py
@@ -54,6 +54,9 @@ def test_linux_sash(self):
 :avocado: tags=arch:rx
 :avocado: tags=machine:gdbsim-r5f562n7
 :avocado: tags=endian:little
+
+# https://gitlab.com/qemu-project/qemu/-/issues/507
+:avocado: tags=issue:507
 """
 dtb_url = ('https://acc.dl.osdn.jp/users/23/23887/rx-virt.dtb')
 dtb_hash = '7b4e4e2c71905da44e86ce47adee2210b026ac18'
-- 
2.31.1




[PATCH 12/16] tests/acceptance/boot_xen.py: removed unused import

2021-09-24 Thread Cleber Rosa
Just a clean up for an unused import.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/boot_xen.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/tests/acceptance/boot_xen.py b/tests/acceptance/boot_xen.py
index cd916ddba4..66621fd14d 100644
--- a/tests/acceptance/boot_xen.py
+++ b/tests/acceptance/boot_xen.py
@@ -13,7 +13,6 @@
 
 import os
 
-from avocado import skipIf
 from avocado_qemu import wait_for_console_pattern
 from boot_linux_console import LinuxKernelTest
 
-- 
2.31.1




[PATCH 02/16] Acceptance Tests: improve check-acceptance description

2021-09-24 Thread Cleber Rosa
The "check-acceptance" make rule won't necessarily run *all* available
tests, because it employs a filter based on the currently configured
targets.  This change in the description of the rule makes that
behavior extra clear.

Signed-off-by: Cleber Rosa 
---
 tests/Makefile.include | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/Makefile.include b/tests/Makefile.include
index ac289a2e41..2c03256ae8 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -16,7 +16,7 @@ ifneq ($(filter $(all-check-targets), check-softfloat),)
@echo " $(MAKE) check-tcgRun TCG tests"
@echo " $(MAKE) check-softfloat  Run FPU emulation tests"
 endif
-   @echo " $(MAKE) check-acceptance Run all acceptance (functional) 
tests"
+   @echo " $(MAKE) check-acceptance Run acceptance (functional) tests 
for currently configured targets"
@echo
@echo " $(MAKE) check-report.tap Generates an aggregated TAP test 
report"
@echo " $(MAKE) check-venv   Creates a Python venv for tests"
-- 
2.31.1




[PATCH 15/16] tests/acceptance/ppc_prep_40p.py: clean up unused import

2021-09-24 Thread Cleber Rosa
Just a removal of an unused imported symbol.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/ppc_prep_40p.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/tests/acceptance/ppc_prep_40p.py b/tests/acceptance/ppc_prep_40p.py
index 6b28a69ea5..5e61e686bd 100644
--- a/tests/acceptance/ppc_prep_40p.py
+++ b/tests/acceptance/ppc_prep_40p.py
@@ -7,7 +7,6 @@
 
 import os
 
-from avocado import skipIf
 from avocado import skipUnless
 from avocado_qemu import Test
 from avocado_qemu import wait_for_console_pattern
-- 
2.31.1




[PATCH 11/16] tests/acceptance/boot_xen.py: fetch kernel during test setUp()

2021-09-24 Thread Cleber Rosa
The kernel is a common blob used in all tests.  By moving it to the
setUp() method, the "fetch asset" plugin will recognize the kernel and
attempt to fetch it and cache it before the tests are started.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/boot_xen.py | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/tests/acceptance/boot_xen.py b/tests/acceptance/boot_xen.py
index e2c59f6592..cd916ddba4 100644
--- a/tests/acceptance/boot_xen.py
+++ b/tests/acceptance/boot_xen.py
@@ -31,23 +31,22 @@ class BootXen(LinuxKernelTest):
 timeout = 90
 XEN_COMMON_COMMAND_LINE = 'dom0_mem=128M loglvl=all guest_loglvl=all'
 
-def fetch_guest_kernel(self):
+def setUp(self):
+super(BootXen, self).setUp()
+
 # Using my own built kernel - which works
 kernel_url = ('https://fileserver.linaro.org/'
   's/JSsewXGZ6mqxPr5/download?path=%2F='
   'linux-5.9.9-arm64-ajb')
 kernel_sha1 = '4f92bc4b9f88d5ab792fa7a43a68555d344e1b83'
-kernel_path = self.fetch_asset(kernel_url,
-   asset_hash=kernel_sha1)
-
-return kernel_path
+self.kernel_path = self.fetch_asset(kernel_url,
+asset_hash=kernel_sha1)
 
 def launch_xen(self, xen_path):
 """
 Launch Xen with a dom0 guest kernel
 """
 self.log.info("launch with xen_path: %s", xen_path)
-kernel_path = self.fetch_guest_kernel()
 
 self.vm.set_console()
 
@@ -58,7 +57,7 @@ def launch_xen(self, xen_path):
  '-append', xen_command_line,
  '-device',
  
'guest-loader,addr=0x4700,kernel=%s,bootargs=console=hvc0'
- % (kernel_path))
+ % (self.kernel_path))
 
 self.vm.launch()
 
-- 
2.31.1




[PATCH 10/16] tests/acceptance/boot_xen.py: unify tags

2021-09-24 Thread Cleber Rosa
Because all tests share the same tags, it's possible to have all of
them at the class level.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/boot_xen.py | 26 +-
 1 file changed, 5 insertions(+), 21 deletions(-)

diff --git a/tests/acceptance/boot_xen.py b/tests/acceptance/boot_xen.py
index 4c14f9e2e7..e2c59f6592 100644
--- a/tests/acceptance/boot_xen.py
+++ b/tests/acceptance/boot_xen.py
@@ -21,6 +21,11 @@
 class BootXen(LinuxKernelTest):
 """
 Boots a Xen hypervisor with a Linux DomU kernel.
+
+:avocado: tags=arch:aarch64
+:avocado: tags=accel:tcg
+:avocado: tags=cpu:cortex-a57
+:avocado: tags=machine:virt
 """
 
 timeout = 90
@@ -61,13 +66,6 @@ def launch_xen(self, xen_path):
 wait_for_console_pattern(self, console_pattern, "Panic on CPU 0:")
 
 def test_arm64_xen_411_and_dom0(self):
-"""
-:avocado: tags=arch:aarch64
-:avocado: tags=accel:tcg
-:avocado: tags=cpu:cortex-a57
-:avocado: tags=machine:virt
-"""
-
 # archive of file from https://deb.debian.org/debian/pool/main/x/xen/
 xen_url = ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/'
'download?path=%2F='
@@ -79,13 +77,6 @@ def test_arm64_xen_411_and_dom0(self):
 self.launch_xen(xen_path)
 
 def test_arm64_xen_414_and_dom0(self):
-"""
-:avocado: tags=arch:aarch64
-:avocado: tags=accel:tcg
-:avocado: tags=cpu:cortex-a57
-:avocado: tags=machine:virt
-"""
-
 # archive of file from https://deb.debian.org/debian/pool/main/x/xen/
 xen_url = ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/'
'download?path=%2F='
@@ -97,13 +88,6 @@ def test_arm64_xen_414_and_dom0(self):
 self.launch_xen(xen_path)
 
 def test_arm64_xen_415_and_dom0(self):
-"""
-:avocado: tags=arch:aarch64
-:avocado: tags=accel:tcg
-:avocado: tags=cpu:cortex-a57
-:avocado: tags=machine:virt
-"""
-
 xen_url = ('https://fileserver.linaro.org/'
's/JSsewXGZ6mqxPr5/download'
'?path=%2F=xen-upstream-4.15-unstable.deb')
-- 
2.31.1




[PATCH 07/16] Acceptance Tests: workaround expired mipsdistros.mips.com HTTPS cert

2021-09-24 Thread Cleber Rosa
The certficiate at https://mipsdistros.mips.com/ is currently
expired (since Jan 23, 2021).

Instead of failing to obtaining the files, let's downgrade to HTTP
instead, given that the integrity of the files are already performed
locally, after the download, using the recorded hash.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/boot_linux_console.py | 6 +++---
 tests/acceptance/replay_kernel.py  | 6 +++---
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/tests/acceptance/boot_linux_console.py 
b/tests/acceptance/boot_linux_console.py
index 80e3a2f7a6..bc0caa1099 100644
--- a/tests/acceptance/boot_linux_console.py
+++ b/tests/acceptance/boot_linux_console.py
@@ -278,7 +278,7 @@ def test_mips_malta32el_nanomips_4k(self):
 :avocado: tags=endian:little
 :avocado: tags=cpu:I7200
 """
-kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
+kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page4k.xz')
 kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6'
@@ -291,7 +291,7 @@ def test_mips_malta32el_nanomips_16k_up(self):
 :avocado: tags=endian:little
 :avocado: tags=cpu:I7200
 """
-kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
+kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page16k_up.xz')
 kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc'
@@ -304,7 +304,7 @@ def test_mips_malta32el_nanomips_64k_dbg(self):
 :avocado: tags=endian:little
 :avocado: tags=cpu:I7200
 """
-kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
+kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page64k_dbg.xz')
 kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180'
diff --git a/tests/acceptance/replay_kernel.py 
b/tests/acceptance/replay_kernel.py
index cfca71e65f..cac795ab4f 100644
--- a/tests/acceptance/replay_kernel.py
+++ b/tests/acceptance/replay_kernel.py
@@ -434,7 +434,7 @@ def test_mips_malta32el_nanomips_4k(self):
 :avocado: tags=endian:little
 :avocado: tags=cpu:I7200
 """
-kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
+kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page4k.xz')
 kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6'
@@ -448,7 +448,7 @@ def test_mips_malta32el_nanomips_16k_up(self):
 :avocado: tags=endian:little
 :avocado: tags=cpu:I7200
 """
-kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
+kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page16k_up.xz')
 kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc'
@@ -462,7 +462,7 @@ def test_mips_malta32el_nanomips_64k_dbg(self):
 :avocado: tags=endian:little
 :avocado: tags=cpu:I7200
 """
-kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/'
+kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/'
   'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/'
   'generic_nano32r6el_page64k_dbg.xz')
 kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180'
-- 
2.31.1




[PATCH 09/16] tests/acceptance/boot_xen.py: merge base classes

2021-09-24 Thread Cleber Rosa
While it's a good practice to have reusable base classes, in this
specific case there's no other user of the BootXenBase class.

By unifying the class used in this test, we can improve readability
and have the opportunity to add some future improvements in a clearer
fashion.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/boot_xen.py | 5 +
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/tests/acceptance/boot_xen.py b/tests/acceptance/boot_xen.py
index b5860b7d88..4c14f9e2e7 100644
--- a/tests/acceptance/boot_xen.py
+++ b/tests/acceptance/boot_xen.py
@@ -18,7 +18,7 @@
 from boot_linux_console import LinuxKernelTest
 
 
-class BootXenBase(LinuxKernelTest):
+class BootXen(LinuxKernelTest):
 """
 Boots a Xen hypervisor with a Linux DomU kernel.
 """
@@ -60,9 +60,6 @@ def launch_xen(self, xen_path):
 console_pattern = 'VFS: Cannot open root device'
 wait_for_console_pattern(self, console_pattern, "Panic on CPU 0:")
 
-
-class BootXen(BootXenBase):
-
 def test_arm64_xen_411_and_dom0(self):
 """
 :avocado: tags=arch:aarch64
-- 
2.31.1




[PATCH 03/16] Acceptance Tests: add mechanism for listing tests

2021-09-24 Thread Cleber Rosa
It is helpful to know the tests that would be executed with a "make
check-acceptance" without executing them.  Let's introduce a "make
list-acceptance" rule for that purpose.

Signed-off-by: Cleber Rosa 
---
 tests/Makefile.include | 8 
 1 file changed, 8 insertions(+)

diff --git a/tests/Makefile.include b/tests/Makefile.include
index 2c03256ae8..d1f90572a7 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -17,6 +17,7 @@ ifneq ($(filter $(all-check-targets), check-softfloat),)
@echo " $(MAKE) check-softfloat  Run FPU emulation tests"
 endif
@echo " $(MAKE) check-acceptance Run acceptance (functional) tests 
for currently configured targets"
+   @echo " $(MAKE) check-acceptance List acceptance (functional) tests 
for currently configured targets"
@echo
@echo " $(MAKE) check-report.tap Generates an aggregated TAP test 
report"
@echo " $(MAKE) check-venv   Creates a Python venv for tests"
@@ -135,6 +136,13 @@ check-acceptance: check-venv $(TESTS_RESULTS_DIR) 
get-vm-images
 $(if $(GITLAB_CI),,--failfast) tests/acceptance, \
 "AVOCADO", "tests/acceptance")
 
+list-acceptance: check-venv
+   $(call quiet-command, \
+$(TESTS_VENV_DIR)/bin/python -m avocado list \
+--filter-by-tags-include-empty --filter-by-tags-include-empty-key \
+$(AVOCADO_TAGS) tests/acceptance, \
+"AVOCADO", "list tests/acceptance")
+
 # Consolidated targets
 
 .PHONY: check-block check check-clean get-vm-images
-- 
2.31.1




[PATCH 06/16] Acceptance Tests: use extract from package from avocado.utils

2021-09-24 Thread Cleber Rosa
There's code in avocado.utils to handle package extractions in various
(currently rpm, and deb) formats.  Let's use it.

Reference: 
https://avocado-framework.readthedocs.io/en/91.0/api/utils/avocado.utils.software_manager.html#avocado.utils.software_manager.SoftwareManager.extract_from_package
Signed-off-by: Cleber Rosa 
---
 tests/acceptance/boot_linux_console.py | 84 ++
 tests/acceptance/boot_xen.py   |  6 +-
 tests/acceptance/replay_kernel.py  | 12 ++--
 tests/acceptance/tcg_plugins.py|  2 +-
 4 files changed, 41 insertions(+), 63 deletions(-)

diff --git a/tests/acceptance/boot_linux_console.py 
b/tests/acceptance/boot_linux_console.py
index 4c824bc161..80e3a2f7a6 100644
--- a/tests/acceptance/boot_linux_console.py
+++ b/tests/acceptance/boot_linux_console.py
@@ -23,6 +23,7 @@
 from avocado.utils import process
 from avocado.utils import archive
 from avocado.utils.path import find_command, CmdNotFoundError
+from avocado.utils.software_manager import SoftwareManager
 
 P7ZIP_AVAILABLE = True
 try:
@@ -54,42 +55,19 @@ def wait_for_console_pattern(self, success_message, 
vm=None):
  failure_message='Kernel panic - not syncing',
  vm=vm)
 
-def extract_from_deb(self, deb, path):
+def extract_from_pkg(self, pkg, path):
 """
-Extracts a file from a deb package into the test workdir
+Extracts a file from a DEB or RPM package into the test workdir
 
-:param deb: path to the deb archive
-:param path: path within the deb archive of the file to be extracted
+:param pkg: path to the DEB or RPM package
+:param path: path within the DEB or RPM archive of the file to
+  be extracted
 :returns: path of the extracted file
 """
-cwd = os.getcwd()
-os.chdir(self.workdir)
-file_path = process.run("ar t %s" % deb).stdout_text.split()[2]
-process.run("ar x %s %s" % (deb, file_path))
-archive.extract(file_path, self.workdir)
-os.chdir(cwd)
-# Return complete path to extracted file.  Because callers to
-# extract_from_deb() specify 'path' with a leading slash, it is
-# necessary to use os.path.relpath() as otherwise os.path.join()
-# interprets it as an absolute path and drops the self.workdir part.
+SoftwareManager.extract_from_package(pkg, self.workdir)
 return os.path.normpath(os.path.join(self.workdir,
  os.path.relpath(path, '/')))
 
-def extract_from_rpm(self, rpm, path):
-"""
-Extracts a file from an RPM package into the test workdir.
-
-:param rpm: path to the rpm archive
-:param path: path within the rpm archive of the file to be extracted
- needs to be a relative path (starting with './') because
- cpio(1), which is used to extract the file, expects that.
-:returns: path of the extracted file
-"""
-cwd = os.getcwd()
-os.chdir(self.workdir)
-process.run("rpm2cpio %s | cpio -id %s" % (rpm, path), shell=True)
-os.chdir(cwd)
-return os.path.normpath(os.path.join(self.workdir, path))
 
 class BootLinuxConsole(LinuxKernelTest):
 """
@@ -128,7 +106,7 @@ def test_mips_malta(self):
'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb')
 deb_hash = 'a8cfc28ad8f45f54811fc6cf74fc43ffcfe0ba04'
 deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
-kernel_path = self.extract_from_deb(deb_path,
+kernel_path = self.extract_from_pkg(deb_path,
 '/boot/vmlinux-2.6.32-5-4kc-malta')
 
 self.vm.set_console()
@@ -160,7 +138,7 @@ def test_mips64el_malta(self):
'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb')
 deb_hash = '1aaec92083bf22fda31e0d27fa8d9a388e5fc3d5'
 deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
-kernel_path = self.extract_from_deb(deb_path,
+kernel_path = self.extract_from_pkg(deb_path,
 '/boot/vmlinux-2.6.32-5-5kc-malta')
 
 self.vm.set_console()
@@ -181,7 +159,7 @@ def test_mips64el_fuloong2e(self):

'linux-image-3.16.0-6-loongson-2e_3.16.56-1+deb8u1_mipsel.deb')
 deb_hash = 'd04d446045deecf7b755ef576551de0c4184dd44'
 deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
-kernel_path = self.extract_from_deb(deb_path,
+kernel_path = self.extract_from_pkg(deb_path,
 
'/boot/vmlinux-3.16.0-6-loongson-2e')
 
 self.vm.set_console()
@@ -203,7 +181,7 @@ def test_mips_malta_cpio(self):
'linux

[PATCH 01/16] Acceptance Tests: bump Avocado requirement to 91.0

2021-09-24 Thread Cleber Rosa
Avocado 91.0, brings, among other changes, a switch to a new runner
implementation, known as "nrunner".  While my personal testing shows
that it's suitable for running the QEMU tests, there are some
considerations that should be addressed before the change.

For instance, the "nrunner" implementation will run tests in parallel
by default.  With more tests running, and resources shared, some tests
may fail with timeouts on some environments.

So, for now, let's bump the Avocado version to allow for the other
features to be used.  And the "nrunner" implementation to be used by
those that choose to do so.

More information can be found at the release notes at:
   https://avocado-framework.readthedocs.io/en/latest/releases/91_0.html

Signed-off-by: Cleber Rosa 
---
 docs/devel/testing.rst | 12 
 tests/Makefile.include |  2 ++
 tests/requirements.txt |  2 +-
 3 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst
index 4a0abbf23d..2818c58ff8 100644
--- a/docs/devel/testing.rst
+++ b/docs/devel/testing.rst
@@ -740,6 +740,18 @@ may be invoked by running:
 
   tests/venv/bin/avocado run $OPTION1 $OPTION2 tests/acceptance/
 
+Running tests in parallel
+-
+
+The current ``make check-acceptance`` target continues to use the
+Avocado runner implementation simply known as "runner".  But, it's now
+possible to opt-in and choose the "nrunner" implementation, which,
+among other things, allows for parallel execution of tests:
+
+.. code::
+
+  make AVOCADO_RUNNER=nrunner check-acceptance
+
 Manual Installation
 ---
 
diff --git a/tests/Makefile.include b/tests/Makefile.include
index 6e16c05f10..ac289a2e41 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -92,6 +92,7 @@ TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results
 # Any number of command separated loggers are accepted.  For more
 # information please refer to "avocado --help".
 AVOCADO_SHOW=app
+AVOCADO_RUNNER=runner
 AVOCADO_TAGS=$(patsubst %-softmmu,-t arch:%, $(filter %-softmmu,$(TARGETS)))
 
 $(TESTS_VENV_DIR): $(TESTS_VENV_REQ)
@@ -128,6 +129,7 @@ check-acceptance: check-venv $(TESTS_RESULTS_DIR) 
get-vm-images
$(call quiet-command, \
 $(TESTS_VENV_DIR)/bin/python -m avocado \
 --show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \
+--test-runner=$(AVOCADO_RUNNER) \
 --filter-by-tags-include-empty --filter-by-tags-include-empty-key \
 $(AVOCADO_TAGS) \
 $(if $(GITLAB_CI),,--failfast) tests/acceptance, \
diff --git a/tests/requirements.txt b/tests/requirements.txt
index a21b59b443..40af24c664 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -1,5 +1,5 @@
 # Add Python module requirements, one per line, to be installed
 # in the tests/venv Python virtual environment. For more info,
 # refer to: https://pip.pypa.io/en/stable/user_guide/#id1
-avocado-framework==88.1
+avocado-framework==91.0
 pycdlib==1.11.0
-- 
2.31.1




[PATCH 08/16] acceptance/tests/vnc.py: use explicit syntax for enabling passwords

2021-09-24 Thread Cleber Rosa
This matches the command line on 82a17d1d67, where the "on" or "off"
should be explicitly given.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/vnc.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/acceptance/vnc.py b/tests/acceptance/vnc.py
index 22656bbcc2..f301fbb4f5 100644
--- a/tests/acceptance/vnc.py
+++ b/tests/acceptance/vnc.py
@@ -45,7 +45,7 @@ def test_change_password_requires_a_password(self):
  'Could not set password')
 
 def test_change_password(self):
-self.vm.add_args('-nodefaults', '-S', '-vnc', ':0,password')
+self.vm.add_args('-nodefaults', '-S', '-vnc', ':0,password=on')
 self.vm.launch()
 self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled'])
 set_password_response = self.vm.qmp('change-vnc-password',
-- 
2.31.1




[PATCH 00/16] Acceptance Tests: use Avocado 91.0 features and other improvements

2021-09-24 Thread Cleber Rosa
This is a collection of patches for the Acceptance Tests to leverage
some of the features of Avocado 91.0.  With the Avocado version bump
by itself, there would be a change in the default "test runner"
implementation that Avocado uses, from the one simply known as
"runner" to the new one called "nrunner".

Among the changes from one implementation to the other, is the fact
that "nrunner" will run tests in parallel by default.  This is *not
yet* enabled by default on "make check-acceptance", but users can
choose to use simply by setting the "AVOCADO_RUNNER" variable, that
is:

  make AVOCADO_RUNNER=nrunner check-acceptance

If you are curious about the architectural differences of the nrunner,
please refer to:

  
https://avocado-framework.readthedocs.io/en/91.0/guides/contributor/chapters/runners.html

One other noteworthy proposal is a convention to tag tests that either
have known issues, or that touch on QEMU features that have known
issues.  By tagging those tests accordingly, they will be
automatically excluded from the regular execution with "make
check-acceptance".

Finally, some updates to assets locations and some tests refactors and
cleanups.

Cleber Rosa (16):
  Acceptance Tests: bump Avocado requirement to 91.0
  Acceptance Tests: improve check-acceptance description
  Acceptance Tests: add mechanism for listing tests
  Acceptance Tests: keep track and disable tests with known issues
  Acceptance Tests: add standard clean up at test tearDown()
  Acceptance Tests: use extract from package from avocado.utils
  Acceptance Tests: workaround expired mipsdistros.mips.com HTTPS cert
  acceptance/tests/vnc.py: use explicit syntax for enabling passwords
  tests/acceptance/boot_xen.py: merge base classes
  tests/acceptance/boot_xen.py: unify tags
  tests/acceptance/boot_xen.py: fetch kernel during test setUp()
  tests/acceptance/boot_xen.py: removed unused import
  tests/acceptance/boot_xen.py: use class attribute
  tests/acceptance/ppc_prep_40p.py: NetBSD 7.1.2 location update
  tests/acceptance/ppc_prep_40p.py: clean up unused import
  tests/acceptance/ppc_prep_40p.py: unify tags

 docs/devel/testing.rst| 40 ++
 tests/Makefile.include| 15 +++-
 tests/acceptance/avocado_qemu/__init__.py |  1 +
 tests/acceptance/boot_linux_console.py| 93 +--
 tests/acceptance/boot_xen.py  | 54 -
 tests/acceptance/machine_rx_gdbsim.py |  3 +
 tests/acceptance/ppc_prep_40p.py  | 17 ++---
 tests/acceptance/replay_kernel.py | 18 ++---
 tests/acceptance/tcg_plugins.py   |  2 +-
 tests/acceptance/vnc.py   |  2 +-
 tests/requirements.txt|  2 +-
 11 files changed, 128 insertions(+), 119 deletions(-)

-- 
2.31.1





[PATCH 05/16] Acceptance Tests: add standard clean up at test tearDown()

2021-09-24 Thread Cleber Rosa
The avocado.Test class, used as the basis of the avocado_qemu.Test
class, performs a clean of temporary directories up as part of its own
tearDown() implementation.

But the avocado_qemu.Test class is currently missing the same clean
up, as it implemented its own tearDown() method without resorting to
the upper class behavior.

This brings avocado_qemu.Test behavior in sync with the standard
avocado.Test behavior and prevents temporary directories from
cluttering the test results directory (unless instructed to do so with
Avocado's "--keep-tmp" option).

Reported-by: Peter Maydell 
Signed-off-by: Cleber Rosa 
---
 tests/acceptance/avocado_qemu/__init__.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/tests/acceptance/avocado_qemu/__init__.py 
b/tests/acceptance/avocado_qemu/__init__.py
index 2c4fef3e14..1e807e2e55 100644
--- a/tests/acceptance/avocado_qemu/__init__.py
+++ b/tests/acceptance/avocado_qemu/__init__.py
@@ -276,6 +276,7 @@ def tearDown(self):
 for vm in self._vms.values():
 vm.shutdown()
 self._sd = None
+super(Test, self).tearDown()
 
 def fetch_asset(self, name,
 asset_hash=None, algorithm=None,
-- 
2.31.1




[PATCH 2/2] Acceptance Tests: updates to the MAINTAINERS file

2021-08-03 Thread Cleber Rosa
The tests/acceptance directory is currently lacking a maintainer
entry, even though I've been performing that role (of course with help
from many others).  Thus, its status is, even more now, Maintained.

This also removes the currently broken Trello board link, which was
make unavailable unintentionally by a third party.

Signed-off-by: Cleber Rosa 
---
 MAINTAINERS | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index 37b1a8e442..d35b948e8d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3418,11 +3418,11 @@ F: tests/tcg/Makefile
 F: tests/tcg/Makefile.include
 
 Acceptance (Integration) Testing with the Avocado framework
-W: https://trello.com/b/6Qi1pxVn/avocado-qemu
-R: Cleber Rosa 
+M: Cleber Rosa 
 R: Philippe Mathieu-Daudé 
 R: Wainer dos Santos Moschetta 
-S: Odd Fixes
+S: Maintained
+F: tests/Makefile.include
 F: tests/acceptance/
 
 Documentation
-- 
2.31.1




[PATCH 0/2] Acceptance Tests: clean up of temporary dirs and MAINTAINERS entry

2021-08-03 Thread Cleber Rosa
This is a reply to an issue[1] reported by Peter, and while at it, an
update of the MAINTAINERS entry so other people now the status and
where to go for help with regards to the acceptance tests'
infrastructure.

[1] https://lists.gnu.org/archive/html/qemu-devel/2021-08/msg00349.html

Cleber Rosa (2):
  Acceptance Tests: add standard clean up at test tearDown()
  Acceptance Tests: updates to the MAINTAINERS file

 MAINTAINERS   | 6 +++---
 tests/acceptance/avocado_qemu/__init__.py | 1 +
 2 files changed, 4 insertions(+), 3 deletions(-)

-- 
2.31.1





[PATCH 1/2] Acceptance Tests: add standard clean up at test tearDown()

2021-08-03 Thread Cleber Rosa
The avocado.Test class, used as the basis of the avocado_qemu.Test
class, performs a clean of temporary directories up as part of its own
tearDown() implementation.

But the avocado_qemu.Test class is currently missing the same clean
up, as it implemented its own tearDown() method without resorting to
the upper class behavior.

This brings avocado_qemu.Test behavior in sync with the standard
avocado.Test behavior and prevents temporary directories from
cluttering the test results directory (unless instructed to do so with
Avocado's "--keep-tmp" option).

Reported-by: Peter Maydell 
Signed-off-by: Cleber Rosa 
---
 tests/acceptance/avocado_qemu/__init__.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/tests/acceptance/avocado_qemu/__init__.py 
b/tests/acceptance/avocado_qemu/__init__.py
index 2c4fef3e14..1e807e2e55 100644
--- a/tests/acceptance/avocado_qemu/__init__.py
+++ b/tests/acceptance/avocado_qemu/__init__.py
@@ -276,6 +276,7 @@ def tearDown(self):
 for vm in self._vms.values():
 vm.shutdown()
 self._sd = None
+super(Test, self).tearDown()
 
 def fetch_asset(self, name,
 asset_hash=None, algorithm=None,
-- 
2.31.1




Re: 'make check-acceptance' eats lots of disk space and never cleans it up

2021-08-03 Thread Cleber Rosa
On Tue, Aug 3, 2021 at 9:47 AM Peter Maydell  wrote:
>
> On Tue, 3 Aug 2021 at 13:58, Cleber Rosa  wrote:
> >
> > On Tue, Aug 3, 2021 at 8:43 AM Peter Maydell  
> > wrote:
> > >
> > > It looks like 'make check-acceptance' creates directories in
> > > build/clang/tests/results which are huge and which it never
> > > cleans up. For example one of my build directories (configured
> > > just for arm targets) has over 350 'job-[timestamp]' directories,
> > > many of which are 2.5GB or more in size.
> > >
> >
> > Every "job-[timestamp]" directory is the result of an "avocado run"
> > invocation, that is, one "make check-acceptance" command.
> >
> > > I assume most of this is artefacts (disk images etc) needed to
> > > rerun the tests. That's useful to keep around so you can manually
> > > run a test. However, we should be sharing this between runs, not
> > > creating a fresh copy for every time check-acceptance is
> > > run, surely ?
> > >
> >
> > They contain results and files needed for debugging the results of
> > tests, not artefacts needed to re-run them.  Everything that is
> > shareable is in the "~/avocado/data/caches" directory.
>
> This doesn't in practice seem to be the case. Picking a subdirectory
> at random:
>
> ./build/clang/tests/results/job-2021-07-30T11.20-63bd0a6/test-results/tmp_dir4_a3m36o/091-tests_acceptance_machine_sparc64_sun4u.py_Sun4uMachine.test_sparc64_sun4u/day23
>
> This contains (among other things) a vmlinux file which I assume is
> the one we run on the guest. It looks to me like this is a directory
> where we unzipped/untarred a downloaded file with the guest image.
>
> And another:
>
> ./build/clang/tests/results/job-2021-07-30T11.20-63bd0a6/test-results/tmp_dirwowk1bzp/026-tests_acceptance_boot_linux_console.py_BootLinuxConsole.test_arm_cubieboard_initrd/
>
> This seems to contain a rootfilesystem for some test or other,
> with a boot/, lib/, usr/, etc.
>
> These all look like artefacts to me, in the sense that they're
> the same every time.
>
> I notice that all these have 'tmp_dir*' directories in the paths. Is the
> problem just that we're failing to clean up a tempdir in some situations?
>

These are all directories meant to be temporary (the name gives it
away) and meant to be cleaned up.  You actually found a bug in the
"avocado_qemu.Test" class that is *not* calling the base
"avocado.Test" class tearDown().  It's a trivial one liner fix:

---

diff --git a/tests/acceptance/avocado_qemu/__init__.py
b/tests/acceptance/avocado_qemu/__init__.py
index 2c4fef3e14..1e807e2e55 100644
--- a/tests/acceptance/avocado_qemu/__init__.py
+++ b/tests/acceptance/avocado_qemu/__init__.py
@@ -276,6 +276,7 @@ def tearDown(self):
for vm in self._vms.values():
vm.shutdown()
self._sd = None
+super(Test, self).tearDown()

def fetch_asset(self, name,
asset_hash=None, algorithm=None,

---

> thanks
> -- PMM
>

Thanks a lot for spotting that, I'll send a fix to the ML right away.

Best regards,
- Cleber.




Re: 'make check-acceptance' eats lots of disk space and never cleans it up

2021-08-03 Thread Cleber Rosa
On Tue, Aug 3, 2021 at 8:43 AM Peter Maydell  wrote:
>
> It looks like 'make check-acceptance' creates directories in
> build/clang/tests/results which are huge and which it never
> cleans up. For example one of my build directories (configured
> just for arm targets) has over 350 'job-[timestamp]' directories,
> many of which are 2.5GB or more in size.
>

Every "job-[timestamp]" directory is the result of an "avocado run"
invocation, that is, one "make check-acceptance" command.

> I assume most of this is artefacts (disk images etc) needed to
> rerun the tests. That's useful to keep around so you can manually
> run a test. However, we should be sharing this between runs, not
> creating a fresh copy for every time check-acceptance is
> run, surely ?
>

They contain results and files needed for debugging the results of
tests, not artefacts needed to re-run them.  Everything that is
shareable is in the "~/avocado/data/caches" directory.

> I just freed 72 GB (!) of disk on my local box just by doing
> rm -rf build/arm-clang/tests/results/ ...
>
> thanks
> -- PMM
>

There's the "make check-clean" rule, which will clear everything too.
We can also add a flag to *not* save the results from the beginning,
but I guess one would miss them when needed.

Any other ideas?

Thanks,
- Cleber.




Re: "make check-acceptance" takes way too long

2021-08-02 Thread Cleber Rosa
On Sat, Jul 31, 2021 at 4:33 PM Peter Maydell  wrote:
>
> On Sat, 31 Jul 2021 at 19:43, Alex Bennée  wrote:
> >
> >
> > Peter Maydell  writes:
> >
> > > "make check-acceptance" takes way way too long. I just did a run
> > > on an arm-and-aarch64-targets-only debug build and it took over
> > > half an hour, and this despite it skipping or cancelling 26 out
> > > of 58 tests!
> > >
> > > I think that ~10 minutes runtime is reasonable. 30 is not;
> > > ideally no individual test would take more than a minute or so.
> > >
> > > Output saying where the time went. The first two tests take
> > > more than 10 minutes *each*. I think a good start would be to find
> > > a way of testing what they're testing that is less heavyweight.
> > >
> > >  (01/58) 
> > > tests/acceptance/boot_linux.py:BootLinuxAarch64.test_virt_tcg_gicv2:
> > > PASS (629.74 s)
> > >  (02/58) 
> > > tests/acceptance/boot_linux.py:BootLinuxAarch64.test_virt_tcg_gicv3:
> > > PASS (628.75 s)
> > >  (03/58) tests/acceptance/boot_linux.py:BootLinuxAarch64.test_virt_kvm:
> > > CANCEL: kvm accelerator does not seem to be available (1.18 s)
> >
> > For these tests which purport to exercise the various GIC configurations
> > I think we would be much better served by running kvm-unit-tests which
> > at least try and exercise all the features rather than rely on the side
> > effect of booting an entire OS.
>
> I think "can we boot Linux via UEFI?" is worth testing, as is
> "can we boot Linux and do at least some stuff in userspace?"
> (there's a lot of TCG that doesn't get exercised by pure kernel boot).
> We just need to find a guest OS that isn't so overweight it takes 10
> minutes...
>
> -- PMM
>

I think using alternative guests is absolutely the way to go here.  I
had that in mind in the past, so much that I made sure to include
cirros[1] as one of the supported images[2] in avocado.utils.vmimage
(used in these tests above).  These tests are based on the LinuxTest
class[3], and they support the distro[4] and distro_version[5]
parameters.

But, cirros doesn't ship with a fully capable cloud-init package and I
deferred to support it in avocado.utils.cloudinit, and thus, support
cirrus in those tests.  I gave that idea another try, and the results
are encouraging, with reduction of runtime by almost a factor of 6.
On my system I get:

$ avocado run -p distro=fedora -p distro_version=31
tests/acceptance/boot_linux.py:BootLinuxAarch64.test_virt_tcg_gicv3
 (1/1) tests/acceptance/boot_linux.py:BootLinuxAarch64.test_virt_tcg_gicv3:
PASS (165.48 s)

And with cirros:

$ avocado run -p distro=cirros -p distro_version=0.5.2
tests/acceptance/boot_linux.py:BootLinuxAarch64.test_virt_tcg_gicv3
(1/1) tests/acceptance/boot_linux.py:BootLinuxAarch64.test_virt_tcg_gicv3:
PASS (28.80 s)

I'll work on posting the bits needed to have this working out of the
box, but it'll require new code on the Avocado side too (tentative to
version 91.0).

Regards,
- Cleber.

[1] https://github.com/cirros-dev/cirros
[2] 
https://avocado-framework.readthedocs.io/en/90.0/guides/writer/libs/vmimage.html#supported-images
[3] 
https://qemu-project.gitlab.io/qemu/devel/testing.html#the-avocado-qemu-linuxtest-base-test-class
[4] https://qemu-project.gitlab.io/qemu/devel/testing.html#distro
[5] https://qemu-project.gitlab.io/qemu/devel/testing.html#distro-version




Re: "make check-acceptance" takes way too long

2021-07-31 Thread Cleber Rosa
On Sat, Jul 31, 2021 at 2:40 AM Thomas Huth  wrote:
>
> On 31/07/2021 00.04, Cleber Rosa wrote:
> > On Fri, Jul 30, 2021 at 11:43 AM Peter Maydell  
> > wrote:
> >>
> >> On Fri, 30 Jul 2021 at 16:12, Peter Maydell  
> >> wrote:
> >>>
> >>> "make check-acceptance" takes way way too long. I just did a run
> >>> on an arm-and-aarch64-targets-only debug build and it took over
> >>> half an hour, and this despite it skipping or cancelling 26 out
> >>> of 58 tests!
> >>>
> >>> I think that ~10 minutes runtime is reasonable. 30 is not;
> >>> ideally no individual test would take more than a minute or so.
> >>
> >> Side note, can check-acceptance run multiple tests in parallel?
> >
> > Yes, it can, but it's not currently enabled to do so, but I'm planning
> > to.  As a matter of fact, Yesterday I was trying out Avocado's
> > parallel capable runner on a GitLab CI pipeline[1] and it went well.
>
> Was this one of the shared gitlab CI runners? ... well, those feature only a
> single CPU, so the run was likely not very different compared to a single run.
>

Yes, the two pipeline executions I referred to were run in the shared
GitLab CI runners.  I was testing two things:

1. Possible caveats/issues with the parallel Avocado runner (AKA
"nrunner") and the Acceptance tests (first pipeline linked, with "max
parallel tasks" set to 1)
2. Any possible gains/losses with running with "max parallel tasks"
set to 2 (second pipeline linked)

> > But the environment on GitLab CI is fluid, and I bet there's already
> > some level of overcommit of (at least) CPUs there.  The only pipeline
> > I ran there with tests running in parallel, resulted in some jobs with
> > improvements, and others with regressions in runtime.  Additionally,
> > lack of adequate resources can make more tests time out, and thus give
> > out false negatives.
>
> It certainly does not make sense to enable parallel tests for the shared
> runners there.
>
>   Thomas
>
>

There could be gains on scenario #2 if there's considerable I/O wait
on some tests.  That's why I mention that previous experiences mixing
the acceptance tests with the iotests were very interesting.  But
you're right, with only acceptance tests, mostly CPU bound, there was
no clear gain.

Best,
- Cleber.




Re: "make check-acceptance" takes way too long

2021-07-30 Thread Cleber Rosa
On Fri, Jul 30, 2021 at 11:43 AM Peter Maydell  wrote:
>
> On Fri, 30 Jul 2021 at 16:12, Peter Maydell  wrote:
> >
> > "make check-acceptance" takes way way too long. I just did a run
> > on an arm-and-aarch64-targets-only debug build and it took over
> > half an hour, and this despite it skipping or cancelling 26 out
> > of 58 tests!
> >
> > I think that ~10 minutes runtime is reasonable. 30 is not;
> > ideally no individual test would take more than a minute or so.
>
> Side note, can check-acceptance run multiple tests in parallel?

Yes, it can, but it's not currently enabled to do so, but I'm planning
to.  As a matter of fact, Yesterday I was trying out Avocado's
parallel capable runner on a GitLab CI pipeline[1] and it went well.

> Running 3 or 4 at once would also improve the runtime...
>

About the time savings, on my own machine I see good results.  On a
build with only the x86_64 target, the parallel execution gets me:

$ avocado run -t arch:x86_64 --filter-by-tags-include-empty
--filter-by-tags-include-empty-key --test-runner=nrunner
--nrunner-max-parallel-tasks=4 tests/acceptance/
...
RESULTS: PASS 37 | ERROR 0 | FAIL 0 | SKIP 6 | WARN 5 | INTERRUPT
0 | CANCEL 0
...
JOB TIME   : 244.59 s

While the serial execution gets me:

$ avocado run -t arch:x86_64 --filter-by-tags-include-empty
--filter-by-tags-include-empty-key tests/acceptance/
...
RESULTS: PASS 37 | ERROR 0 | FAIL 0 | SKIP 6 | WARN 5 | INTERRUPT
0 | CANCEL 0
...
JOB TIME   : 658.65 s

But the environment on GitLab CI is fluid, and I bet there's already
some level of overcommit of (at least) CPUs there.  The only pipeline
I ran there with tests running in parallel, resulted in some jobs with
improvements, and others with regressions in runtime.  Additionally,
lack of adequate resources can make more tests time out, and thus give
out false negatives.

Anyway, my current plan is to allow users to configure the
parallelization level on their machines, while slowly and steadily
experimenting what can safely improve the runtime on GitLab CI.

BTW, another **very** sweet spot, which I have experimented with
before, is letting Avocado run the acceptance tests and the iotests in
parallel because they compete for pretty much different resources.
But, that's a matter for another round.

> -- PMM
>

Best regards,
- Cleber.

[1] https://gitlab.com/cleber.gnu/qemu/-/pipelines/344471529
[2] https://gitlab.com/cleber.gnu/qemu/-/pipelines/345082239




Re: Regression caught by replay_kernel.py:ReplayKernelNormal.test_aarch64_virt

2021-07-27 Thread Cleber Rosa
On Tue, Jul 27, 2021 at 9:48 AM Peter Maydell  wrote:
>
> On Tue, 27 Jul 2021 at 14:24, Cleber Rosa  wrote:
> > Yes, I've spent quite some time with some flaky behavior while running
> > the replay tests as well. But in the end, the test remained unchanged
> > because we found the issues in the actual code under test (one time
> > the recording of the replay file would sometimes be corrupted when
> > using >=1 CPUs, but 100% of the time when using a single CPU).
> >
> > This time, it was failing 100% of the time in my experience, and now,
> > after the fix in df3a2de51a07089a4a729fe1f792f658df9dade4, it's
> > passing 100% of the time.  So I guess even tests with some observed
> > flakiness can have their value.
>
> To me they have very little value, because once I notice a test
> is flaky I simply start to ignore whether it is passing or failing,
> and then it might as well not be there at all.
> (This is happening currently with the gitlab CI tests, which have
> been failing for a week.)
>
> -- PMM
>

I hear you... and I acknowledge that we currently don't have a good
solution for keeping track of the test results data and thus going
beyond one's perceived value of a test.

It's not something for the short term, but I do plan to work on a
"confidence" tracker for tests.  There is some seed work in the CKI
data warehouse project[1] but it's very incipient.

- Cleber.

[1] - 
https://gitlab.com/cki-project/datawarehouse/-/blob/main/datawarehouse/views.py#L158




Re: Regression caught by replay_kernel.py:ReplayKernelNormal.test_aarch64_virt

2021-07-27 Thread Cleber Rosa
On Tue, Jul 27, 2021 at 5:17 AM Peter Maydell  wrote:
>
> On Tue, 27 Jul 2021 at 01:39, Cleber Rosa  wrote:
> > tests/acceptance/replay_kernel.py:ReplayKernelNormal.test_aarch64_virt
> > is currently failing consistently (first found that in [1]).
>
> FWIW I find that on my local machine this test is consistently flaky
> and always has been, so I just ignore any failure I see in it when
> running 'make check-acceptance' locally.
>
> -- PMM
>

Hi Peter,

Yes, I've spent quite some time with some flaky behavior while running
the replay tests as well. But in the end, the test remained unchanged
because we found the issues in the actual code under test (one time
the recording of the replay file would sometimes be corrupted when
using >=1 CPUs, but 100% of the time when using a single CPU).

This time, it was failing 100% of the time in my experience, and now,
after the fix in df3a2de51a07089a4a729fe1f792f658df9dade4, it's
passing 100% of the time.  So I guess even tests with some observed
flakiness can have their value.

Cheers,
- Cleber.




Re: Regression caught by replay_kernel.py:ReplayKernelNormal.test_aarch64_virt

2021-07-27 Thread Cleber Rosa
On Tue, Jul 27, 2021 at 3:37 AM Peter Maydell  wrote:
>
> On Tue, 27 Jul 2021 at 01:39, Cleber Rosa  wrote:
> >
> >
> > Hi everyone,
> >
> > tests/acceptance/replay_kernel.py:ReplayKernelNormal.test_aarch64_virt
> > is currently failing consistently (first found that in [1]).
> >
> > I've bisected it down to the following commit:
> >
> > ---
> >
> > 78ff82bb1b67c0d79113688e4b3427fc99cab9d4 is the first bad commit
> > commit 78ff82bb1b67c0d79113688e4b3427fc99cab9d4
> > Author: Richard Henderson 
> >
> > accel/tcg: Reduce CF_COUNT_MASK to match TCG_MAX_INSNS
> >
> > The space reserved for CF_COUNT_MASK was overly large.
> > Reduce to free up cflags bits and eliminate an extra test.
> >
> > Tested-by: Mark Cave-Ayland 
> > Signed-off-by: Richard Henderson 
> > Reviewed-by: Alex Bennée 
> > Reviewed-by: Peter Maydell 
> > Message-Id: <20210717221851.2124573-2-richard.hender...@linaro.org>
> >
> >  accel/tcg/translate-all.c | 5 ++---
> >  include/exec/exec-all.h   | 4 +++-
> >  2 files changed, 5 insertions(+), 4 deletions(-)
>
> This is probably fixed by
> https://patchew.org/QEMU/20210725174405.24568-1-peter.mayd...@linaro.org/
> (which is in RTH's pullreq currently on list).
>
> -- PMM
>

Actually, it is already fixed by df3a2de51a07089a4a729fe1f792f658df9dade4.

BTW, TCG looks like the right place where the bug was, because it
affected other targets and machines.  This is the actual list of tests
I was seeing the same issue (and are now fixed):

(1/4) tests/acceptance/replay_kernel.py:ReplayKernelNormal.test_aarch64_virt:
PASS (8.86 s)
(2/4) tests/acceptance/replay_kernel.py:ReplayKernelNormal.test_arm_virt:
PASS (13.42 s)
(3/4) tests/acceptance/replay_kernel.py:ReplayKernelNormal.test_m68k_mcf5208evb:
PASS (3.20 s)
(4/4) tests/acceptance/replay_kernel.py:ReplayKernelNormal.test_xtensa_lx60:
PASS (12.29 s)

Cheers,
- Cleber.




Regression caught by replay_kernel.py:ReplayKernelNormal.test_aarch64_virt

2021-07-26 Thread Cleber Rosa


Hi everyone,

tests/acceptance/replay_kernel.py:ReplayKernelNormal.test_aarch64_virt
is currently failing consistently (first found that in [1]).

I've bisected it down to the following commit:

---

78ff82bb1b67c0d79113688e4b3427fc99cab9d4 is the first bad commit
commit 78ff82bb1b67c0d79113688e4b3427fc99cab9d4
Author: Richard Henderson 

accel/tcg: Reduce CF_COUNT_MASK to match TCG_MAX_INSNS

The space reserved for CF_COUNT_MASK was overly large.
Reduce to free up cflags bits and eliminate an extra test.

Tested-by: Mark Cave-Ayland 
Signed-off-by: Richard Henderson 
Reviewed-by: Alex Bennée 
Reviewed-by: Peter Maydell 
Message-Id: <20210717221851.2124573-2-richard.hender...@linaro.org>

 accel/tcg/translate-all.c | 5 ++---
 include/exec/exec-all.h   | 4 +++-
 2 files changed, 5 insertions(+), 4 deletions(-)

---

To reproduce it:

1. configure --target-list=aarch64-softmmu
2. meson compile
3. make check-venv
4. ./tests/venv/bin/avocado --show=test run 
tests/acceptance/replay_kernel.py:ReplayKernelNormal.test_aarch64_virt

PS: I haven't had the time yet to scan the mailing list for possible
discussions about it.

[1] https://gitlab.com/qemu-project/qemu/-/jobs/1445513133#L268

-- 
Cleber Rosa
[ Sr Software Engineer - Virtualization Team - Red Hat ]
[ Avocado Test Framework - avocado-framework.github.io ]
[  7ABB 96EB 8B46 B94D 5E0F  E9BB 657E 8D33 A5F2 09F3  ]




Re: [PATCH for-6.1 v2] machine: Disallow specifying topology parameters as zero

2021-07-23 Thread Cleber Rosa


wangyanan (Y) writes:

> Hi Cleber,
>
> On 2021/7/23 6:25, Cleber Rosa wrote:
>> Yanan Wang writes:
>>
>>> In the SMP configuration, we should either specify a topology
>>> parameter with a reasonable value (equal to or greater than 1)
>>> or just leave it omitted and QEMU will calculate its value.
>>> Configurations which explicitly specify the topology parameters
>>> as zero like "sockets=0" are meaningless, so disallow them.
>>>
>>> However, the commit 1e63fe685804d
>>> (machine: pass QAPI struct to mc->smp_parse) has documented that
>>> '0' has the same semantics as omitting a parameter in the qapi
>>> comment for SMPConfiguration. So this patch fixes the doc and
>>> also adds the corresponding sanity check in the smp parsers.
>>>
>>> Suggested-by: Andrew Jones 
>>> Signed-off-by: Yanan Wang 
>>> ---
>>>   hw/core/machine.c | 14 ++
>>>   qapi/machine.json |  6 +++---
>>>   qemu-options.hx   | 12 +++-
>>>   3 files changed, 24 insertions(+), 8 deletions(-)
>> Hi Yanan,
>>
>> This looks somewhat similar to this very old patch of mine:
>>
>> https://mail.gnu.org/archive/html/qemu-devel/2020-10/msg03039.html
>>
>> I'm putting a reference here because I believe the test can be salvaged
>> and slightly adapted for this patch of yours.
>>
>> Let me know if I can help anyhow.
>>
> Thanks for this.
> I was introducing an unit test for the smp parsing in [1], in which all
> possible valid and invalid smp configs were covered, and actually the
> "parameter=0" stuff was also covered. You can have a look, and
> suggestions are welcome. I'm not sure we need two different tests
> for the same part. :)
>

Right, I only saw the other series later.  Nice work there!

- Cleber.




Re: [PATCH for-6.1 v2] machine: Disallow specifying topology parameters as zero

2021-07-22 Thread Cleber Rosa


Yanan Wang writes:

> In the SMP configuration, we should either specify a topology
> parameter with a reasonable value (equal to or greater than 1)
> or just leave it omitted and QEMU will calculate its value.
> Configurations which explicitly specify the topology parameters
> as zero like "sockets=0" are meaningless, so disallow them.
>
> However, the commit 1e63fe685804d
> (machine: pass QAPI struct to mc->smp_parse) has documented that
> '0' has the same semantics as omitting a parameter in the qapi
> comment for SMPConfiguration. So this patch fixes the doc and
> also adds the corresponding sanity check in the smp parsers.
>
> Suggested-by: Andrew Jones 
> Signed-off-by: Yanan Wang 
> ---
>  hw/core/machine.c | 14 ++
>  qapi/machine.json |  6 +++---
>  qemu-options.hx   | 12 +++-
>  3 files changed, 24 insertions(+), 8 deletions(-)

Hi Yanan,

This looks somewhat similar to this very old patch of mine:

   https://mail.gnu.org/archive/html/qemu-devel/2020-10/msg03039.html

I'm putting a reference here because I believe the test can be salvaged
and slightly adapted for this patch of yours.

Let me know if I can help anyhow.

Thanks,
- Cleber.




Re: [PATCH-for-6.1 v2] gitlab-ci: Extract OpenSBI job rules and fix 'when' condition

2021-07-22 Thread Cleber Rosa


Philippe Mathieu-Daudé writes:

> First, all jobs depending on 'docker-opensbi' job must use at most
> all the rules that triggers it. The simplest way to ensure that is
> to always use the same rules. Extract all the rules to a reusable
> section, and include this section (with the 'extends' keyword) in
> both 'docker-opensbi' and 'build-opensbi' jobs.
>
> Second, jobs depending on another should not use the 'when: always'
> condition, because if a dependency failed we should not keep running
> jobs depending on it. The correct condition is 'when: on_success'.
>
> The problems were introduced in commit c6fc0fc1a71 ("gitlab-ci.yml:
> Add jobs to build OpenSBI firmware binaries"), but were revealed in
> commit 91e9c47e50a ("docker: OpenSBI build job depends on OpenSBI
> container").
>
> This fix is similar to the one used with the EDK2 firmware job in
> commit ac0595cf6b3 ("gitlab-ci: Extract EDK2 job rules to reusable
> section").
>
> Reported-by: Daniel P. Berrangé 
> Reviewed-by: Daniel P. Berrangé 
> Reviewed-by: Willian Rampazzo 
> Signed-off-by: Philippe Mathieu-Daudé 
> ---
> v2: when 'always' -> 'on_success' & reworded (danpb)
>
> Supersedes: <20210720164829.3949558-1-phi...@redhat.com>
> ---
>  .gitlab-ci.d/opensbi.yml | 30 ++
>  1 file changed, 18 insertions(+), 12 deletions(-)
>

Reviewed-by: Cleber Rosa 




Re: [PATCH for-6.1 2/2] docs: Move licence/copyright from HTML output to rST comments

2021-07-22 Thread Cleber Rosa


Peter Maydell writes:

> Our built HTML documentation now has a standard footer which
> gives the license for QEMU (and its documentation as a whole).
> In almost all pages, we either don't bother to state the
> copyright/license for the individual rST sources, or we put
> it in an rST comment. There are just three pages which render
> copyright or license information into the user-visible HTML.
>
> Quoting a specific (different) license for an individual HTML
> page within the manual is confusing. Downgrade the license
> and copyright info to a comment within the rST source, bringing
> these pages in line with the rest of our documents.
>
> Suggested-by: Markus Armbruster 
> Signed-off-by: Peter Maydell 
> ---
>  docs/interop/vhost-user-gpu.rst |  7 ---
>  docs/interop/vhost-user.rst | 12 +++-
>  docs/system/generic-loader.rst  |  4 ++--
>  3 files changed, 13 insertions(+), 10 deletions(-)

Reviewed-by: Cleber Rosa 




Re: [PATCH for-6.1 1/2] docs: Remove stale TODO comments about license and version

2021-07-22 Thread Cleber Rosa


Peter Maydell writes:

> Since commits 13f934e79fa and 3a50c8f3067aaf, our HTML docs include a
> footer to all pages stating the license and version.  We can
> therefore delete the TODO comments suggesting we should do that from
> our .rst files.
>
> Signed-off-by: Peter Maydell 
> ---
>  docs/interop/qemu-ga-ref.rst | 9 -
>  docs/interop/qemu-qmp-ref.rst| 9 -
>  docs/interop/qemu-storage-daemon-qmp-ref.rst | 9 -
>  3 files changed, 27 deletions(-)
>

Reviewed-by: Cleber Rosa 




[PULL 4/7] tests/acceptance/virtio-gpu.py: combine kernel command line

2021-07-20 Thread Cleber Rosa
Both tests use the same kernel command line arguments, so there's no
need to have a common and then an additional set of arguments.

Signed-off-by: Cleber Rosa 
Message-Id: <20210714174051.28164-5-cr...@redhat.com>
Reviewed-by: Willian Rampazzo 
Signed-off-by: Cleber Rosa 
---
 tests/acceptance/virtio-gpu.py | 12 +++-
 1 file changed, 3 insertions(+), 9 deletions(-)

diff --git a/tests/acceptance/virtio-gpu.py b/tests/acceptance/virtio-gpu.py
index 20a59fabf3..fbde278705 100644
--- a/tests/acceptance/virtio-gpu.py
+++ b/tests/acceptance/virtio-gpu.py
@@ -34,7 +34,7 @@ class VirtioGPUx86(Test):
 :avocado: tags=cpu:host
 """
 
-KERNEL_COMMON_COMMAND_LINE = "printk.time=0 "
+KERNEL_COMMAND_LINE = "printk.time=0 console=ttyS0 rdinit=/bin/bash"
 KERNEL_URL = (
 "https://archives.fedoraproject.org/pub/fedora;
 "/linux/releases/33/Everything/x86_64/os/images"
@@ -58,9 +58,6 @@ def test_virtio_vga_virgl(self):
 """
 :avocado: tags=device:virtio-vga
 """
-kernel_command_line = (
-self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
-)
 # FIXME: should check presence of virtio, virgl etc
 self.require_accelerator('kvm')
 
@@ -78,7 +75,7 @@ def test_virtio_vga_virgl(self):
 "-initrd",
 initrd_path,
 "-append",
-kernel_command_line,
+self.KERNEL_COMMAND_LINE,
 )
 try:
 self.vm.launch()
@@ -96,9 +93,6 @@ def test_vhost_user_vga_virgl(self):
 """
 :avocado: tags=device:vhost-user-vga
 """
-kernel_command_line = (
-self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
-)
 # FIXME: should check presence of vhost-user-gpu, virgl, memfd etc
 self.require_accelerator('kvm')
 
@@ -145,7 +139,7 @@ def test_vhost_user_vga_virgl(self):
 "-initrd",
 initrd_path,
 "-append",
-kernel_command_line,
+self.KERNEL_COMMAND_LINE,
 )
 self.vm.launch()
 self.wait_for_console_pattern("as init process")
-- 
2.31.1




[PULL 3/7] tests/acceptance/virtio-gpu.py: combine CPU tags

2021-07-20 Thread Cleber Rosa
Like previously done with the arch tags, all tests use the same CPU
value so it's possible to combine them at the class level.

Signed-off-by: Cleber Rosa 
Message-Id: <20210714174051.28164-4-cr...@redhat.com>
Reviewed-by: Willian Rampazzo 
Signed-off-by: Cleber Rosa 
---
 tests/acceptance/virtio-gpu.py | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/tests/acceptance/virtio-gpu.py b/tests/acceptance/virtio-gpu.py
index 729b99b2e5..20a59fabf3 100644
--- a/tests/acceptance/virtio-gpu.py
+++ b/tests/acceptance/virtio-gpu.py
@@ -31,6 +31,7 @@ class VirtioGPUx86(Test):
 """
 :avocado: tags=virtio-gpu
 :avocado: tags=arch:x86_64
+:avocado: tags=cpu:host
 """
 
 KERNEL_COMMON_COMMAND_LINE = "printk.time=0 "
@@ -56,7 +57,6 @@ def wait_for_console_pattern(self, success_message, vm=None):
 def test_virtio_vga_virgl(self):
 """
 :avocado: tags=device:virtio-vga
-:avocado: tags=cpu:host
 """
 kernel_command_line = (
 self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
@@ -95,7 +95,6 @@ def test_virtio_vga_virgl(self):
 def test_vhost_user_vga_virgl(self):
 """
 :avocado: tags=device:vhost-user-vga
-:avocado: tags=cpu:host
 """
 kernel_command_line = (
 self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
-- 
2.31.1




[PULL 1/7] tests/acceptance/virtio-gpu.py: use require_accelerator()

2021-07-20 Thread Cleber Rosa
Since efe30d501 there's a shorthand for requiring specific
accelerators, and canceling the test if it's not available.

Signed-off-by: Cleber Rosa 
Message-Id: <20210714174051.28164-2-cr...@redhat.com>
Reviewed-by: Willian Rampazzo 
Signed-off-by: Cleber Rosa 
---
 tests/acceptance/virtio-gpu.py | 10 ++
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/tests/acceptance/virtio-gpu.py b/tests/acceptance/virtio-gpu.py
index 589332c1b7..42602a240a 100644
--- a/tests/acceptance/virtio-gpu.py
+++ b/tests/acceptance/virtio-gpu.py
@@ -17,10 +17,6 @@
 import subprocess
 
 
-ACCEL_NOT_AVAILABLE_FMT = "%s accelerator does not seem to be available"
-KVM_NOT_AVAILABLE = ACCEL_NOT_AVAILABLE_FMT % "KVM"
-
-
 def pick_default_vug_bin():
 relative_path = "./contrib/vhost-user-gpu/vhost-user-gpu"
 if is_readable_executable_file(relative_path):
@@ -66,8 +62,7 @@ def test_virtio_vga_virgl(self):
 self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
 )
 # FIXME: should check presence of virtio, virgl etc
-if not kvm_available(self.arch, self.qemu_bin):
-self.cancel(KVM_NOT_AVAILABLE)
+self.require_accelerator('kvm')
 
 kernel_path = self.fetch_asset(self.KERNEL_URL)
 initrd_path = self.fetch_asset(self.INITRD_URL)
@@ -107,8 +102,7 @@ def test_vhost_user_vga_virgl(self):
 self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
 )
 # FIXME: should check presence of vhost-user-gpu, virgl, memfd etc
-if not kvm_available(self.arch, self.qemu_bin):
-self.cancel(KVM_NOT_AVAILABLE)
+self.require_accelerator('kvm')
 
 vug = pick_default_vug_bin()
 if not vug:
-- 
2.31.1




[PULL 2/7] tests/acceptance/virtio-gpu.py: combine x86_64 arch tags

2021-07-20 Thread Cleber Rosa
The test class in question is x86_64 specific, so it's possible to set
the tags at the class level.

Signed-off-by: Cleber Rosa 
Message-Id: <20210714174051.28164-3-cr...@redhat.com>
Reviewed-by: Willian Rampazzo 
Signed-off-by: Cleber Rosa 
---
 tests/acceptance/virtio-gpu.py | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/tests/acceptance/virtio-gpu.py b/tests/acceptance/virtio-gpu.py
index 42602a240a..729b99b2e5 100644
--- a/tests/acceptance/virtio-gpu.py
+++ b/tests/acceptance/virtio-gpu.py
@@ -30,6 +30,7 @@ def pick_default_vug_bin():
 class VirtioGPUx86(Test):
 """
 :avocado: tags=virtio-gpu
+:avocado: tags=arch:x86_64
 """
 
 KERNEL_COMMON_COMMAND_LINE = "printk.time=0 "
@@ -54,7 +55,6 @@ def wait_for_console_pattern(self, success_message, vm=None):
 
 def test_virtio_vga_virgl(self):
 """
-:avocado: tags=arch:x86_64
 :avocado: tags=device:virtio-vga
 :avocado: tags=cpu:host
 """
@@ -94,7 +94,6 @@ def test_virtio_vga_virgl(self):
 
 def test_vhost_user_vga_virgl(self):
 """
-:avocado: tags=arch:x86_64
 :avocado: tags=device:vhost-user-vga
 :avocado: tags=cpu:host
 """
-- 
2.31.1




[PULL 5/7] tests/acceptance/virtio-gpu.py: use virtio-vga-gl

2021-07-20 Thread Cleber Rosa
Since 49afbca3b, the use of an optional virgl renderer is not
available anymore, and since b36eb8860f, the way to choose a GL based
rendered is to use the "virtio-vga-gl" device.

Signed-off-by: Cleber Rosa 
Message-Id: <20210714174051.28164-6-cr...@redhat.com>
Reviewed-by: Willian Rampazzo 
Reviewed-by: Marc-André Lureau 
Signed-off-by: Cleber Rosa 
---
 tests/acceptance/virtio-gpu.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tests/acceptance/virtio-gpu.py b/tests/acceptance/virtio-gpu.py
index fbde278705..0f84affe82 100644
--- a/tests/acceptance/virtio-gpu.py
+++ b/tests/acceptance/virtio-gpu.py
@@ -56,7 +56,7 @@ def wait_for_console_pattern(self, success_message, vm=None):
 
 def test_virtio_vga_virgl(self):
 """
-:avocado: tags=device:virtio-vga
+:avocado: tags=device:virtio-vga-gl
 """
 # FIXME: should check presence of virtio, virgl etc
 self.require_accelerator('kvm')
@@ -67,7 +67,7 @@ def test_virtio_vga_virgl(self):
 self.vm.set_console()
 self.vm.add_args("-m", "2G")
 self.vm.add_args("-machine", "pc,accel=kvm")
-self.vm.add_args("-device", "virtio-vga,virgl=on")
+self.vm.add_args("-device", "virtio-vga-gl")
 self.vm.add_args("-display", "egl-headless")
 self.vm.add_args(
 "-kernel",
-- 
2.31.1




[PULL 7/7] remote/memory: Replace share parameter with ram_flags

2021-07-20 Thread Cleber Rosa
From: Yang Zhong 

Fixes: d5015b801340 ("softmmu/memory: Pass ram_flags to
qemu_ram_alloc_from_fd()")

Signed-off-by: Yang Zhong 
Reviewed-by: David Hildenbrand 
Reviewed-by: Philippe Mathieu-Daudé 
Reviewed-by: Pankaj Gupta 
Reviewed-by: Peter Xu 
Message-Id: <20210709052800.63588-1-yang.zh...@intel.com>
Signed-off-by: Cleber Rosa 
---
 hw/remote/memory.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hw/remote/memory.c b/hw/remote/memory.c
index 472ed2a272..6e21ab1a45 100644
--- a/hw/remote/memory.c
+++ b/hw/remote/memory.c
@@ -46,7 +46,7 @@ void remote_sysmem_reconfig(MPQemuMsg *msg, Error **errp)
 subregion = g_new(MemoryRegion, 1);
 memory_region_init_ram_from_fd(subregion, NULL,
name, sysmem_info->sizes[region],
-   true, msg->fds[region],
+   RAM_SHARED, msg->fds[region],
sysmem_info->offsets[region],
errp);
 
-- 
2.31.1




[PULL 6/7] tests/acceptance/virtio-gpu.py: provide kernel and initrd hashes

2021-07-20 Thread Cleber Rosa
By providing kernel and initrd hashes, the test guarantees the
integrity of the images used and avoids the warnings set by
fetch_asset() when hashes are lacking.

Signed-off-by: Cleber Rosa 
Message-Id: <20210714174051.28164-7-cr...@redhat.com>
Reviewed-by: Willian Rampazzo 
Signed-off-by: Cleber Rosa 
---
 tests/acceptance/virtio-gpu.py | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/tests/acceptance/virtio-gpu.py b/tests/acceptance/virtio-gpu.py
index 0f84affe82..4acc1e6d5f 100644
--- a/tests/acceptance/virtio-gpu.py
+++ b/tests/acceptance/virtio-gpu.py
@@ -40,11 +40,13 @@ class VirtioGPUx86(Test):
 "/linux/releases/33/Everything/x86_64/os/images"
 "/pxeboot/vmlinuz"
 )
+KERNEL_HASH = '1433cfe3f2ffaa44de4ecfb57ec25dc2399cdecf'
 INITRD_URL = (
 "https://archives.fedoraproject.org/pub/fedora;
 "/linux/releases/33/Everything/x86_64/os/images"
 "/pxeboot/initrd.img"
 )
+INITRD_HASH = 'c828d68a027b53e5220536585efe03412332c2d9'
 
 def wait_for_console_pattern(self, success_message, vm=None):
 wait_for_console_pattern(
@@ -61,8 +63,8 @@ def test_virtio_vga_virgl(self):
 # FIXME: should check presence of virtio, virgl etc
 self.require_accelerator('kvm')
 
-kernel_path = self.fetch_asset(self.KERNEL_URL)
-initrd_path = self.fetch_asset(self.INITRD_URL)
+kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH)
+initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH)
 
 self.vm.set_console()
 self.vm.add_args("-m", "2G")
@@ -100,8 +102,8 @@ def test_vhost_user_vga_virgl(self):
 if not vug:
 self.cancel("Could not find vhost-user-gpu")
 
-kernel_path = self.fetch_asset(self.KERNEL_URL)
-initrd_path = self.fetch_asset(self.INITRD_URL)
+kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH)
+initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH)
 
 # Create socketpair to connect proxy and remote processes
 qemu_sock, vug_sock = socket.socketpair(
-- 
2.31.1




[PULL for 6.1 0/7] Python and Acceptance Tests

2021-07-20 Thread Cleber Rosa
The following changes since commit c04b4d9e6b596ead3cf6046a9243fbfee068ef33:

  Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging 
(2021-07-20 16:59:33 +0100)

are available in the Git repository at:

  https://gitlab.com/cleber.gnu/qemu.git/ tags/python-next-pull-request

for you to fetch changes up to f4a3fda43e389fa26d41ec9cd24f42c5fe20ba9d:

  remote/memory: Replace share parameter with ram_flags (2021-07-20 15:34:20 
-0400)


Acceptance Tests

- Fix for tests/acceptance/virtio-gpu.py to match the change in device
  name
- Fix for failure caught by tests/acceptance/multiprocess.py

PS: While not a maintainer for the subsystem in PATCH 7, I'm including
it as a one-off to facilitate the landing of the fix as discussed in
the mailing list.



Cleber Rosa (6):
  tests/acceptance/virtio-gpu.py: use require_accelerator()
  tests/acceptance/virtio-gpu.py: combine x86_64 arch tags
  tests/acceptance/virtio-gpu.py: combine CPU tags
  tests/acceptance/virtio-gpu.py: combine kernel command line
  tests/acceptance/virtio-gpu.py: use virtio-vga-gl
  tests/acceptance/virtio-gpu.py: provide kernel and initrd hashes

Yang Zhong (1):
  remote/memory: Replace share parameter with ram_flags

 hw/remote/memory.c |  2 +-
 tests/acceptance/virtio-gpu.py | 42 --
 2 files changed, 16 insertions(+), 28 deletions(-)

-- 
2.31.1





Re: tests/acceptance/multiprocess.py test failure

2021-07-20 Thread Cleber Rosa


Jag Raman writes:

>
> Hi Cleber,
>
> We presently don’t have permissions to send a PR to
> upstream (Peter Maydell).
>
> Presently, we are requesting someone else who has
> permissions to do PRs on our behalf. We will work
> on getting permissions to send PRs going forward.
>
> Thank you!

Hi Jag,

I'm going to include that patch in an upcoming PR.  Please let me know
if this is not what you intended.

PS: I'm not sure I follow what your specific permission problem is, if
it's technical or something else.  But, in either case, I'd recommend you
sync the MAINTAINERS file entries with your roles/abilities to maintain
those files listed.

Best Regards,
- Cleber.




Re: tests/acceptance/multiprocess.py test failure

2021-07-15 Thread Cleber Rosa


David Hildenbrand writes:

>
> Hi,
>
> maybe
>
> https://lkml.kernel.org/r/20210709052800.63588-1-yang.zh...@intel.com
>
> resolves your issue. If not, pleas let me know and I'll try
> reproducing (will have to install avocado).

Hi David,

Yes, that fixes it.  Sorry for missing that patch on the ml.

Maintainers (Elena, Jagannathan, John),

Are you planning a PR with this patch?

Thanks,

-- 
Cleber Rosa
[ Sr Software Engineer - Virtualization Team - Red Hat ]
[ Avocado Test Framework - avocado-framework.github.io ]
[  7ABB 96EB 8B46 B94D 5E0F  E9BB 657E 8D33 A5F2 09F3  ]




tests/acceptance/multiprocess.py test failure

2021-07-14 Thread Cleber Rosa
Hi everyone,

The tests/acceptance/multiprocess.py:Multiprocess.test_multiprocess_x86_64
is currently failing (as of a9649a719a44894b81f38dc1c5c1888ee684acef).
Unfortunately CI was unable to catch this issue earlier, because tests
that require KVM are not yet running (but this should change soon).
The relevant part of the test logs is:

VM launch command: './qemu-system-x86_64 -display none -vga none
-chardev 
socket,id=mon,path=/var/tmp/avo_qemu_sock_5g22rvrp/qemu-427815-monitor.sock
-mon chardev=mon,mode=control -chardev
socket,id=console,path=/var/tmp/avo_qemu_sock_5g22rvrp/qemu-427815-console.sock,server=on,wait=off
-serial chardev:console -machine pc -accel kvm -cpu host -object
memory-backend-memfd,id=sysmem-file,size=2G --numa
node,memdev=sysmem-file -m 2048 -kernel
/home/cleber/avocado/data/cache/by_location/b4c64f15a75b083966d39d9246dd8db177736bb4/vmlinuz
-initrd 
/home/cleber/avocado/data/cache/by_location/b4c64f15a75b083966d39d9246dd8db177736bb4/initrd.img
-append printk.time=0 console=ttyS0 rdinit=/bin/bash -device
x-pci-proxy-dev,id=lsi1,fd=16'
>>> {'execute': 'qmp_capabilities'}

The test remains stuck here for as long as the test is allowed to run.
Because there's currently no timeout in the test, it can remain stuck
forever.  But, with a timeout, we end up getting:

Error launching VM
Command: './qemu-system-x86_64 -display none -vga none -chardev
socket,id=mon,path=/var/tmp/avo_qemu_sock_5g22rvrp/qemu-427815-monitor.sock
-mon chardev=mon,mode=control -chardev
socket,id=console,path=/var/tmp/avo_qemu_sock_5g22rvrp/qemu-427815-console.sock,server=on,wait=off
-serial chardev:console -machine pc -accel kvm -cpu host -object
memory-backend-memfd,id=sysmem-file,size=2G --numa
node,memdev=sysmem-file -m 2048 -kernel
/home/cleber/avocado/data/cache/by_location/b4c64f15a75b083966d39d9246dd8db177736bb4/vmlinuz
-initrd 
/home/cleber/avocado/data/cache/by_location/b4c64f15a75b083966d39d9246dd8db177736bb4/initrd.img
-append printk.time=0 console=ttyS0 rdinit=/bin/bash -device
x-pci-proxy-dev,id=lsi1,fd=16'
Output: "qemu-system-x86_64: ../../src/qemu/softmmu/physmem.c:2055:
qemu_ram_alloc_from_fd: Assertion `(ram_flags & ~(RAM_SHARED |
RAM_PMEM | RAM_NORESERVE)) == 0' failed.\n"

I've bisected it to:

---

d5015b80134047013eeec1df5ce2014ee114 is the first bad commit
commit d5015b80134047013eeec1df5ce2014ee114
Author: David Hildenbrand 
Date:   Mon May 10 13:43:17 2021 +0200

softmmu/memory: Pass ram_flags to qemu_ram_alloc_from_fd()

Let's pass in ram flags just like we do with qemu_ram_alloc_from_file(),
to clean up and prepare for more flags.

Simplify the documentation of passed ram flags: Looking at our
documentation of RAM_SHARED and RAM_PMEM is sufficient, no need to be
repetitive.

Reviewed-by: Philippe Mathieu-Daudé 
Reviewed-by: Peter Xu 
Acked-by: Eduardo Habkost  for memory backend
and machine core
Signed-off-by: David Hildenbrand 
Message-Id: <20210510114328.21835-5-da...@redhat.com>
Signed-off-by: Paolo Bonzini 

 backends/hostmem-memfd.c | 7 ---
 hw/misc/ivshmem.c| 5 ++---
 include/exec/memory.h| 9 +++--
 include/exec/ram_addr.h  | 6 +-
 softmmu/memory.c | 7 +++
 5 files changed, 13 insertions(+), 21 deletions(-)

---

To reproduce it:

1. configure --target-list=x86_64-softmmu
2. meson compile
3. make check-venv
4. ./tests/venv/bin/avocado --show=test run --job-timeout=20s
tests/acceptance/multiprocess.py:Multiprocess.test_multiprocess_x86_64

It'd be helpful to know if anyone else is experiencing the same failure.

Thanks,
- Cleber.




Re: [PATCH-for-6.1] gitlab-ci: Extract EDK2 job rules to reusable section

2021-07-14 Thread Cleber Rosa


Philippe Mathieu-Daudé writes:

> All jobs depending on 'docker-edk2' job must use at most all
> the rules that triggers it. The simplest way to ensure that
> is to always use the same rules. Extract all the rules to a
> reusable section, and include this section (with the 'extends'
> keyword) in both 'docker-edk2' and 'build-edk2' jobs.
>
> The problem was introduced in commit 71920809cea ("gitlab-ci.yml:
> Add jobs to build EDK2 firmware binaries"), but was revealed in
> commit 1925468ddbf ("docker: EDK2 build job depends on EDK2
> container") and eventually failed on CI:
> https://gitlab.com/qemu-project/qemu/-/pipelines/335995843
>
> Reported-by: Daniel P. Berrangé 
> Signed-off-by: Philippe Mathieu-Daudé 
> ---
> If this is a correct fix, I'll send a similar fix for the
> OpenSBI jobs.
> ---
>  .gitlab-ci.d/edk2.yml | 27 ---
>  1 file changed, 16 insertions(+), 11 deletions(-)
>
> diff --git a/.gitlab-ci.d/edk2.yml b/.gitlab-ci.d/edk2.yml
> index ba7280605c4..aae2f7ad880 100644
> --- a/.gitlab-ci.d/edk2.yml
> +++ b/.gitlab-ci.d/edk2.yml
> @@ -1,10 +1,22 @@
> -docker-edk2:
> - stage: containers
> - rules: # Only run this job when the Dockerfile is modified
> +# All jobs needing docker-edk2 must use the same rules it uses.
> +.edk2_job_rules:
> + rules: # Only run this job when ...
>   - changes:
> +   # this file is modified
> - .gitlab-ci.d/edk2.yml
> +   # or the Dockerfile is modified
> - .gitlab-ci.d/edk2/Dockerfile
> +   # or roms/edk2/ is modified (submodule updated)
> +   - roms/edk2/*
> when: always
> + - if: '$CI_COMMIT_REF_NAME =~ /^edk2/' # or the branch/tag starts with 
> 'edk2'
> +   when: always
> + - if: '$CI_COMMIT_MESSAGE =~ /edk2/i' # or last commit description contains 
> 'EDK2'
> +   when: always
> +
> +docker-edk2:
> + extends: .edk2_job_rules
> + stage: containers
>   image: docker:19.03.1
>   services:
>   - docker:19.03.1-dind
> @@ -24,16 +36,9 @@ docker-edk2:
>   - docker push $IMAGE_TAG
>  
>  build-edk2:
> + extends: .edk2_job_rules
>   stage: build
>   needs: ['docker-edk2']
> - rules: # Only run this job when ...
> - - changes: # ... roms/edk2/ is modified (submodule updated)
> -   - roms/edk2/*
> -   when: always
> - - if: '$CI_COMMIT_REF_NAME =~ /^edk2/' # or the branch/tag starts with 
> 'edk2'
> -   when: always
> - - if: '$CI_COMMIT_MESSAGE =~ /edk2/i' # or last commit description contains 
> 'EDK2'
> -   when: always
>   artifacts:
> paths: # 'artifacts.zip' will contains the following files:
> - pc-bios/edk2*bz2


Hi Phil,

This looks good, but have you triggered a pipeline with these changes?
It'd be helpful for an extra level of assurance for the intended results.

Thanks!

-- 
Cleber Rosa
[ Sr Software Engineer - Virtualization Team - Red Hat ]
[ Avocado Test Framework - avocado-framework.github.io ]
[  7ABB 96EB 8B46 B94D 5E0F  E9BB 657E 8D33 A5F2 09F3  ]




[PATCH 4/6] tests/acceptance/virtio-gpu.py: combine kernel command line

2021-07-14 Thread Cleber Rosa
Both tests use the same kernel command line arguments, so there's no
need to have a common and then an additional set of arguments.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/virtio-gpu.py | 12 +++-
 1 file changed, 3 insertions(+), 9 deletions(-)

diff --git a/tests/acceptance/virtio-gpu.py b/tests/acceptance/virtio-gpu.py
index 20a59fabf3..fbde278705 100644
--- a/tests/acceptance/virtio-gpu.py
+++ b/tests/acceptance/virtio-gpu.py
@@ -34,7 +34,7 @@ class VirtioGPUx86(Test):
 :avocado: tags=cpu:host
 """
 
-KERNEL_COMMON_COMMAND_LINE = "printk.time=0 "
+KERNEL_COMMAND_LINE = "printk.time=0 console=ttyS0 rdinit=/bin/bash"
 KERNEL_URL = (
 "https://archives.fedoraproject.org/pub/fedora;
 "/linux/releases/33/Everything/x86_64/os/images"
@@ -58,9 +58,6 @@ def test_virtio_vga_virgl(self):
 """
 :avocado: tags=device:virtio-vga
 """
-kernel_command_line = (
-self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
-)
 # FIXME: should check presence of virtio, virgl etc
 self.require_accelerator('kvm')
 
@@ -78,7 +75,7 @@ def test_virtio_vga_virgl(self):
 "-initrd",
 initrd_path,
 "-append",
-kernel_command_line,
+self.KERNEL_COMMAND_LINE,
 )
 try:
 self.vm.launch()
@@ -96,9 +93,6 @@ def test_vhost_user_vga_virgl(self):
 """
 :avocado: tags=device:vhost-user-vga
 """
-kernel_command_line = (
-self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
-)
 # FIXME: should check presence of vhost-user-gpu, virgl, memfd etc
 self.require_accelerator('kvm')
 
@@ -145,7 +139,7 @@ def test_vhost_user_vga_virgl(self):
 "-initrd",
 initrd_path,
 "-append",
-kernel_command_line,
+self.KERNEL_COMMAND_LINE,
 )
 self.vm.launch()
 self.wait_for_console_pattern("as init process")
-- 
2.31.1




[PATCH 6/6] tests/acceptance/virtio-gpu.py: provide kernel and initrd hashes

2021-07-14 Thread Cleber Rosa
By providing kernel and initrd hashes, the test guarantees the
integrity of the images used and avoids the warnings set by
fetch_asset() when hashes are lacking.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/virtio-gpu.py | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/tests/acceptance/virtio-gpu.py b/tests/acceptance/virtio-gpu.py
index 0f84affe82..4acc1e6d5f 100644
--- a/tests/acceptance/virtio-gpu.py
+++ b/tests/acceptance/virtio-gpu.py
@@ -40,11 +40,13 @@ class VirtioGPUx86(Test):
 "/linux/releases/33/Everything/x86_64/os/images"
 "/pxeboot/vmlinuz"
 )
+KERNEL_HASH = '1433cfe3f2ffaa44de4ecfb57ec25dc2399cdecf'
 INITRD_URL = (
 "https://archives.fedoraproject.org/pub/fedora;
 "/linux/releases/33/Everything/x86_64/os/images"
 "/pxeboot/initrd.img"
 )
+INITRD_HASH = 'c828d68a027b53e5220536585efe03412332c2d9'
 
 def wait_for_console_pattern(self, success_message, vm=None):
 wait_for_console_pattern(
@@ -61,8 +63,8 @@ def test_virtio_vga_virgl(self):
 # FIXME: should check presence of virtio, virgl etc
 self.require_accelerator('kvm')
 
-kernel_path = self.fetch_asset(self.KERNEL_URL)
-initrd_path = self.fetch_asset(self.INITRD_URL)
+kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH)
+initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH)
 
 self.vm.set_console()
 self.vm.add_args("-m", "2G")
@@ -100,8 +102,8 @@ def test_vhost_user_vga_virgl(self):
 if not vug:
 self.cancel("Could not find vhost-user-gpu")
 
-kernel_path = self.fetch_asset(self.KERNEL_URL)
-initrd_path = self.fetch_asset(self.INITRD_URL)
+kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH)
+initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH)
 
 # Create socketpair to connect proxy and remote processes
 qemu_sock, vug_sock = socket.socketpair(
-- 
2.31.1




[PATCH 5/6] tests/acceptance/virtio-gpu.py: use virtio-vga-gl

2021-07-14 Thread Cleber Rosa
Since 49afbca3b, the use of an optional virgl renderer is not
available anymore, and since b36eb8860f, the way to choose a GL based
rendered is to use the "virtio-vga-gl" device.

Signed-off-by: Cleber Rosa 
---
 tests/acceptance/virtio-gpu.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tests/acceptance/virtio-gpu.py b/tests/acceptance/virtio-gpu.py
index fbde278705..0f84affe82 100644
--- a/tests/acceptance/virtio-gpu.py
+++ b/tests/acceptance/virtio-gpu.py
@@ -56,7 +56,7 @@ def wait_for_console_pattern(self, success_message, vm=None):
 
 def test_virtio_vga_virgl(self):
 """
-:avocado: tags=device:virtio-vga
+:avocado: tags=device:virtio-vga-gl
 """
 # FIXME: should check presence of virtio, virgl etc
 self.require_accelerator('kvm')
@@ -67,7 +67,7 @@ def test_virtio_vga_virgl(self):
 self.vm.set_console()
 self.vm.add_args("-m", "2G")
 self.vm.add_args("-machine", "pc,accel=kvm")
-self.vm.add_args("-device", "virtio-vga,virgl=on")
+self.vm.add_args("-device", "virtio-vga-gl")
 self.vm.add_args("-display", "egl-headless")
 self.vm.add_args(
 "-kernel",
-- 
2.31.1




  1   2   3   4   5   6   7   8   9   10   >