Re: [PATCH v2 2/4] iotests: Make qemu_nbd_popen() a contextmanager

2020-08-05 Thread Nir Soffer
On Wed, Aug 5, 2020 at 10:38 AM Vladimir Sementsov-Ogievskiy
 wrote:
>
> 28.07.2020 19:05, Nir Soffer wrote:
> > On Tue, Jul 28, 2020 at 4:43 PM Vladimir Sementsov-Ogievskiy
> >  wrote:
> >>
> >> 28.07.2020 00:58, Nir Soffer wrote:
> >>> Instead of duplicating the code to wait until the server is ready and
> >>> remember to terminate the server and wait for it, make it possible to
> >>> use like this:
> >>>
> >>>   with qemu_nbd_popen('-k', sock, image):
> >>>   # Access image via qemu-nbd socket...
> >>>
> >>> Only test 264 used this helper, but I had to modify the output since it
> >>> did not consistently when starting and stopping qemu-nbd.
> >>>
> >>> Signed-off-by: Nir Soffer 
> >>> ---
> >>>tests/qemu-iotests/264| 76 +--
> >>>tests/qemu-iotests/264.out|  2 +
> >>>tests/qemu-iotests/iotests.py | 28 -
> >>>3 files changed, 56 insertions(+), 50 deletions(-)
> >>>
> >>> diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264
> >>> index 304a7443d7..666f164ed8 100755
> >>> --- a/tests/qemu-iotests/264
> >>> +++ b/tests/qemu-iotests/264
> >>> @@ -36,48 +36,32 @@ wait_step = 0.2
> >>>
> >>>qemu_img_create('-f', iotests.imgfmt, disk_a, str(size))
> >>>qemu_img_create('-f', iotests.imgfmt, disk_b, str(size))
> >>> -srv = qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b)
> >>>
> >>> -# Wait for NBD server availability
> >>> -t = 0
> >>> -ok = False
> >>> -while t < wait_limit:
> >>> -ok = qemu_io_silent_check('-f', 'raw', '-c', 'read 0 512', nbd_uri)
> >>> -if ok:
> >>> -break
> >>> -time.sleep(wait_step)
> >>> -t += wait_step
> >>> +with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
> >>> +vm = iotests.VM().add_drive(disk_a)
> >>> +vm.launch()
> >>> +vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
> >>> +
> >>> +vm.qmp_log('blockdev-add', filters=[iotests.filter_qmp_testfiles],
> >>> +   **{'node_name': 'backup0',
> >>> +  'driver': 'raw',
> >>> +  'file': {'driver': 'nbd',
> >>> +   'server': {'type': 'unix', 'path': nbd_sock},
> >>> +   'reconnect-delay': 10}})
> >>> +vm.qmp_log('blockdev-backup', device='drive0', sync='full', 
> >>> target='backup0',
> >>> +   speed=(1 * 1024 * 1024))
> >>> +
> >>> +# Wait for some progress
> >>> +t = 0
> >>> +while t < wait_limit:
> >>> +jobs = vm.qmp('query-block-jobs')['return']
> >>> +if jobs and jobs[0]['offset'] > 0:
> >>> +break
> >>> +time.sleep(wait_step)
> >>> +t += wait_step
> >>>
> >>> -assert ok
> >>> -
> >>> -vm = iotests.VM().add_drive(disk_a)
> >>> -vm.launch()
> >>> -vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
> >>> -
> >>> -vm.qmp_log('blockdev-add', filters=[iotests.filter_qmp_testfiles],
> >>> -   **{'node_name': 'backup0',
> >>> -  'driver': 'raw',
> >>> -  'file': {'driver': 'nbd',
> >>> -   'server': {'type': 'unix', 'path': nbd_sock},
> >>> -   'reconnect-delay': 10}})
> >>> -vm.qmp_log('blockdev-backup', device='drive0', sync='full', 
> >>> target='backup0',
> >>> -   speed=(1 * 1024 * 1024))
> >>> -
> >>> -# Wait for some progress
> >>> -t = 0
> >>> -while t < wait_limit:
> >>> -jobs = vm.qmp('query-block-jobs')['return']
> >>>if jobs and jobs[0]['offset'] > 0:
> >>> -break
> >>> -time.sleep(wait_step)
> >>> -t += wait_step
> >>> -
> >>> -if jobs and jobs[0]['offset'] > 0:
> >>> -log('Backup job is started')
> >>> -
> >>> -log('Kill NBD server')
> >>> -srv.kill()
> >>> -srv.wait()
> >>> +log('Backup job is started')
> >>>
> >>>jobs = vm.qmp('query-block-jobs')['return']
> >>>if jobs and jobs[0]['offset'] < jobs[0]['len']:
> >>> @@ -88,12 +72,8 @@ vm.qmp_log('block-job-set-speed', device='drive0', 
> >>> speed=0)
> >>># Emulate server down time for 1 second
> >>>time.sleep(1)
> >>>
> >>> -log('Start NBD server')
> >>> -srv = qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b)
> >>> -
> >>> -e = vm.event_wait('BLOCK_JOB_COMPLETED')
> >>> -log('Backup completed: {}'.format(e['data']['offset']))
> >>> -
> >>> -vm.qmp_log('blockdev-del', node_name='backup0')
> >>> -srv.kill()
> >>> -vm.shutdown()
> >>> +with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
> >>> +e = vm.event_wait('BLOCK_JOB_COMPLETED')
> >>> +log('Backup completed: {}'.format(e['data']['offset']))
> >>> +vm.qmp_log('blockdev-del', node_name='backup0')
> >>> +vm.shutdown()
> >>> diff --git a/tests/qemu-iotests/264.out b/tests/qemu-iotests/264.out
> >>> index 3000944b09..c45b1e81ef 100644
> >>> --- a/tests/qemu-iotests/264.out
> >>> +++ b/tests/qemu-iotests/264.out
> >>> @@ -1,3 +1,4 @@
> >>> +Start NBD server
> >>>{"execute": "blockdev-add", "arguments": {"driver": "raw", 

Re: [PATCH v2 2/4] iotests: Make qemu_nbd_popen() a contextmanager

2020-08-05 Thread Vladimir Sementsov-Ogievskiy

28.07.2020 19:05, Nir Soffer wrote:

On Tue, Jul 28, 2020 at 4:43 PM Vladimir Sementsov-Ogievskiy
 wrote:


28.07.2020 00:58, Nir Soffer wrote:

Instead of duplicating the code to wait until the server is ready and
remember to terminate the server and wait for it, make it possible to
use like this:

  with qemu_nbd_popen('-k', sock, image):
  # Access image via qemu-nbd socket...

Only test 264 used this helper, but I had to modify the output since it
did not consistently when starting and stopping qemu-nbd.

Signed-off-by: Nir Soffer 
---
   tests/qemu-iotests/264| 76 +--
   tests/qemu-iotests/264.out|  2 +
   tests/qemu-iotests/iotests.py | 28 -
   3 files changed, 56 insertions(+), 50 deletions(-)

diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264
index 304a7443d7..666f164ed8 100755
--- a/tests/qemu-iotests/264
+++ b/tests/qemu-iotests/264
@@ -36,48 +36,32 @@ wait_step = 0.2

   qemu_img_create('-f', iotests.imgfmt, disk_a, str(size))
   qemu_img_create('-f', iotests.imgfmt, disk_b, str(size))
-srv = qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b)

-# Wait for NBD server availability
-t = 0
-ok = False
-while t < wait_limit:
-ok = qemu_io_silent_check('-f', 'raw', '-c', 'read 0 512', nbd_uri)
-if ok:
-break
-time.sleep(wait_step)
-t += wait_step
+with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
+vm = iotests.VM().add_drive(disk_a)
+vm.launch()
+vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
+
+vm.qmp_log('blockdev-add', filters=[iotests.filter_qmp_testfiles],
+   **{'node_name': 'backup0',
+  'driver': 'raw',
+  'file': {'driver': 'nbd',
+   'server': {'type': 'unix', 'path': nbd_sock},
+   'reconnect-delay': 10}})
+vm.qmp_log('blockdev-backup', device='drive0', sync='full', 
target='backup0',
+   speed=(1 * 1024 * 1024))
+
+# Wait for some progress
+t = 0
+while t < wait_limit:
+jobs = vm.qmp('query-block-jobs')['return']
+if jobs and jobs[0]['offset'] > 0:
+break
+time.sleep(wait_step)
+t += wait_step

-assert ok
-
-vm = iotests.VM().add_drive(disk_a)
-vm.launch()
-vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
-
-vm.qmp_log('blockdev-add', filters=[iotests.filter_qmp_testfiles],
-   **{'node_name': 'backup0',
-  'driver': 'raw',
-  'file': {'driver': 'nbd',
-   'server': {'type': 'unix', 'path': nbd_sock},
-   'reconnect-delay': 10}})
-vm.qmp_log('blockdev-backup', device='drive0', sync='full', target='backup0',
-   speed=(1 * 1024 * 1024))
-
-# Wait for some progress
-t = 0
-while t < wait_limit:
-jobs = vm.qmp('query-block-jobs')['return']
   if jobs and jobs[0]['offset'] > 0:
-break
-time.sleep(wait_step)
-t += wait_step
-
-if jobs and jobs[0]['offset'] > 0:
-log('Backup job is started')
-
-log('Kill NBD server')
-srv.kill()
-srv.wait()
+log('Backup job is started')

   jobs = vm.qmp('query-block-jobs')['return']
   if jobs and jobs[0]['offset'] < jobs[0]['len']:
@@ -88,12 +72,8 @@ vm.qmp_log('block-job-set-speed', device='drive0', speed=0)
   # Emulate server down time for 1 second
   time.sleep(1)

-log('Start NBD server')
-srv = qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b)
-
-e = vm.event_wait('BLOCK_JOB_COMPLETED')
-log('Backup completed: {}'.format(e['data']['offset']))
-
-vm.qmp_log('blockdev-del', node_name='backup0')
-srv.kill()
-vm.shutdown()
+with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
+e = vm.event_wait('BLOCK_JOB_COMPLETED')
+log('Backup completed: {}'.format(e['data']['offset']))
+vm.qmp_log('blockdev-del', node_name='backup0')
+vm.shutdown()
diff --git a/tests/qemu-iotests/264.out b/tests/qemu-iotests/264.out
index 3000944b09..c45b1e81ef 100644
--- a/tests/qemu-iotests/264.out
+++ b/tests/qemu-iotests/264.out
@@ -1,3 +1,4 @@
+Start NBD server
   {"execute": "blockdev-add", "arguments": {"driver": "raw", "file": {"driver": "nbd", "reconnect-delay": 10, "server": {"path": 
"TEST_DIR/PID-nbd-sock", "type": "unix"}}, "node-name": "backup0"}}
   {"return": {}}
   {"execute": "blockdev-backup", "arguments": {"device": "drive0", "speed": 1048576, "sync": 
"full", "target": "backup0"}}
@@ -11,3 +12,4 @@ Start NBD server
   Backup completed: 5242880
   {"execute": "blockdev-del", "arguments": {"node-name": "backup0"}}
   {"return": {}}
+Kill NBD server
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 3590ed78a0..8f79668435 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -28,10 +28,13 @@ import signal
   import struct
   import subprocess
   import sys
+import time
   from typing import (Any, Callable, Dict, Iterable,
   

Re: [PATCH v2 2/4] iotests: Make qemu_nbd_popen() a contextmanager

2020-07-28 Thread Nir Soffer
On Tue, Jul 28, 2020 at 4:43 PM Vladimir Sementsov-Ogievskiy
 wrote:
>
> 28.07.2020 00:58, Nir Soffer wrote:
> > Instead of duplicating the code to wait until the server is ready and
> > remember to terminate the server and wait for it, make it possible to
> > use like this:
> >
> >  with qemu_nbd_popen('-k', sock, image):
> >  # Access image via qemu-nbd socket...
> >
> > Only test 264 used this helper, but I had to modify the output since it
> > did not consistently when starting and stopping qemu-nbd.
> >
> > Signed-off-by: Nir Soffer 
> > ---
> >   tests/qemu-iotests/264| 76 +--
> >   tests/qemu-iotests/264.out|  2 +
> >   tests/qemu-iotests/iotests.py | 28 -
> >   3 files changed, 56 insertions(+), 50 deletions(-)
> >
> > diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264
> > index 304a7443d7..666f164ed8 100755
> > --- a/tests/qemu-iotests/264
> > +++ b/tests/qemu-iotests/264
> > @@ -36,48 +36,32 @@ wait_step = 0.2
> >
> >   qemu_img_create('-f', iotests.imgfmt, disk_a, str(size))
> >   qemu_img_create('-f', iotests.imgfmt, disk_b, str(size))
> > -srv = qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b)
> >
> > -# Wait for NBD server availability
> > -t = 0
> > -ok = False
> > -while t < wait_limit:
> > -ok = qemu_io_silent_check('-f', 'raw', '-c', 'read 0 512', nbd_uri)
> > -if ok:
> > -break
> > -time.sleep(wait_step)
> > -t += wait_step
> > +with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
> > +vm = iotests.VM().add_drive(disk_a)
> > +vm.launch()
> > +vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
> > +
> > +vm.qmp_log('blockdev-add', filters=[iotests.filter_qmp_testfiles],
> > +   **{'node_name': 'backup0',
> > +  'driver': 'raw',
> > +  'file': {'driver': 'nbd',
> > +   'server': {'type': 'unix', 'path': nbd_sock},
> > +   'reconnect-delay': 10}})
> > +vm.qmp_log('blockdev-backup', device='drive0', sync='full', 
> > target='backup0',
> > +   speed=(1 * 1024 * 1024))
> > +
> > +# Wait for some progress
> > +t = 0
> > +while t < wait_limit:
> > +jobs = vm.qmp('query-block-jobs')['return']
> > +if jobs and jobs[0]['offset'] > 0:
> > +break
> > +time.sleep(wait_step)
> > +t += wait_step
> >
> > -assert ok
> > -
> > -vm = iotests.VM().add_drive(disk_a)
> > -vm.launch()
> > -vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
> > -
> > -vm.qmp_log('blockdev-add', filters=[iotests.filter_qmp_testfiles],
> > -   **{'node_name': 'backup0',
> > -  'driver': 'raw',
> > -  'file': {'driver': 'nbd',
> > -   'server': {'type': 'unix', 'path': nbd_sock},
> > -   'reconnect-delay': 10}})
> > -vm.qmp_log('blockdev-backup', device='drive0', sync='full', 
> > target='backup0',
> > -   speed=(1 * 1024 * 1024))
> > -
> > -# Wait for some progress
> > -t = 0
> > -while t < wait_limit:
> > -jobs = vm.qmp('query-block-jobs')['return']
> >   if jobs and jobs[0]['offset'] > 0:
> > -break
> > -time.sleep(wait_step)
> > -t += wait_step
> > -
> > -if jobs and jobs[0]['offset'] > 0:
> > -log('Backup job is started')
> > -
> > -log('Kill NBD server')
> > -srv.kill()
> > -srv.wait()
> > +log('Backup job is started')
> >
> >   jobs = vm.qmp('query-block-jobs')['return']
> >   if jobs and jobs[0]['offset'] < jobs[0]['len']:
> > @@ -88,12 +72,8 @@ vm.qmp_log('block-job-set-speed', device='drive0', 
> > speed=0)
> >   # Emulate server down time for 1 second
> >   time.sleep(1)
> >
> > -log('Start NBD server')
> > -srv = qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b)
> > -
> > -e = vm.event_wait('BLOCK_JOB_COMPLETED')
> > -log('Backup completed: {}'.format(e['data']['offset']))
> > -
> > -vm.qmp_log('blockdev-del', node_name='backup0')
> > -srv.kill()
> > -vm.shutdown()
> > +with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
> > +e = vm.event_wait('BLOCK_JOB_COMPLETED')
> > +log('Backup completed: {}'.format(e['data']['offset']))
> > +vm.qmp_log('blockdev-del', node_name='backup0')
> > +vm.shutdown()
> > diff --git a/tests/qemu-iotests/264.out b/tests/qemu-iotests/264.out
> > index 3000944b09..c45b1e81ef 100644
> > --- a/tests/qemu-iotests/264.out
> > +++ b/tests/qemu-iotests/264.out
> > @@ -1,3 +1,4 @@
> > +Start NBD server
> >   {"execute": "blockdev-add", "arguments": {"driver": "raw", "file": 
> > {"driver": "nbd", "reconnect-delay": 10, "server": {"path": 
> > "TEST_DIR/PID-nbd-sock", "type": "unix"}}, "node-name": "backup0"}}
> >   {"return": {}}
> >   {"execute": "blockdev-backup", "arguments": {"device": "drive0", "speed": 
> > 1048576, "sync": "full", "target": "backup0"}}
> > @@ -11,3 +12,4 @@ Start NBD server
> >   Backup completed: 5242880
> >   

Re: [PATCH v2 2/4] iotests: Make qemu_nbd_popen() a contextmanager

2020-07-28 Thread Eric Blake

On 7/28/20 8:42 AM, Vladimir Sementsov-Ogievskiy wrote:

28.07.2020 00:58, Nir Soffer wrote:

Instead of duplicating the code to wait until the server is ready and
remember to terminate the server and wait for it, make it possible to
use like this:

 with qemu_nbd_popen('-k', sock, image):
 # Access image via qemu-nbd socket...

Only test 264 used this helper, but I had to modify the output since it
did not consistently when starting and stopping qemu-nbd.

Signed-off-by: Nir Soffer 
---



+++ b/tests/qemu-iotests/iotests.py
@@ -28,10 +28,13 @@ import signal
  import struct
  import subprocess
  import sys
+import time
  from typing import (Any, Callable, Dict, Iterable,
  List, Optional, Sequence, Tuple, TypeVar)
  import unittest
+from contextlib import contextmanager
+
  # pylint: disable=import-error, wrong-import-position
  sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 
'python'))

  from qemu import qtest
@@ -270,9 +273,30 @@ def qemu_nbd_early_pipe(*args):
  return subp.returncode, output if subp.returncode else ''
+@contextmanager
  def qemu_nbd_popen(*args):
-    '''Run qemu-nbd in daemon mode and return the parent's exit code'''
-    return subprocess.Popen(qemu_nbd_args + ['--persistent'] + 
list(args))

+    '''Context manager running qemu-nbd within the context'''


PEP8 (or some another PEP referenced in PEP8) asks to use """ for 
doc-strings


But at least './check 297' still passes, so we are consistent enough 
with our current set of syntax checks to be acceptable.





+    pid_file = file_path("pid")
+
+    cmd = list(qemu_nbd_args)
+    cmd.extend(('--persistent', '--pid-file', pid_file))
+    cmd.extend(args)
+
+    log('Start NBD server')
+    p = subprocess.Popen(cmd)
+    try:
+    while not os.path.exists(pid_file):
+    if p.poll() is not None:
+    raise RuntimeError(
+    "qemu-nbd terminated with exit code {}: {}"
+    .format(p.returncode, ' '.join(cmd)))
+
+    time.sleep(0.01)
+    yield
+    finally:
+    log('Kill NBD server')
+    p.kill()
+    p.wait()


why do we need try-finally? I think, the only possible exception is your 
"raise RuntimeError", and in this case the process is alredy dead, no 
need to kill it (and print the log message)



  def compare_images(img1, img2, fmt1=imgfmt, fmt2=imgfmt):
  '''Return True if two image files are identical'''



anyway:
Reviewed-by: Vladimir Sementsov-Ogievskiy 



Thanks; I trust your python review more than mine.  Queuing for -rc2.

--
Eric Blake, Principal Software Engineer
Red Hat, Inc.   +1-919-301-3226
Virtualization:  qemu.org | libvirt.org




Re: [PATCH v2 2/4] iotests: Make qemu_nbd_popen() a contextmanager

2020-07-28 Thread Vladimir Sementsov-Ogievskiy

28.07.2020 00:58, Nir Soffer wrote:

Instead of duplicating the code to wait until the server is ready and
remember to terminate the server and wait for it, make it possible to
use like this:

 with qemu_nbd_popen('-k', sock, image):
 # Access image via qemu-nbd socket...

Only test 264 used this helper, but I had to modify the output since it
did not consistently when starting and stopping qemu-nbd.

Signed-off-by: Nir Soffer 
---
  tests/qemu-iotests/264| 76 +--
  tests/qemu-iotests/264.out|  2 +
  tests/qemu-iotests/iotests.py | 28 -
  3 files changed, 56 insertions(+), 50 deletions(-)

diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264
index 304a7443d7..666f164ed8 100755
--- a/tests/qemu-iotests/264
+++ b/tests/qemu-iotests/264
@@ -36,48 +36,32 @@ wait_step = 0.2
  
  qemu_img_create('-f', iotests.imgfmt, disk_a, str(size))

  qemu_img_create('-f', iotests.imgfmt, disk_b, str(size))
-srv = qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b)
  
-# Wait for NBD server availability

-t = 0
-ok = False
-while t < wait_limit:
-ok = qemu_io_silent_check('-f', 'raw', '-c', 'read 0 512', nbd_uri)
-if ok:
-break
-time.sleep(wait_step)
-t += wait_step
+with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
+vm = iotests.VM().add_drive(disk_a)
+vm.launch()
+vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
+
+vm.qmp_log('blockdev-add', filters=[iotests.filter_qmp_testfiles],
+   **{'node_name': 'backup0',
+  'driver': 'raw',
+  'file': {'driver': 'nbd',
+   'server': {'type': 'unix', 'path': nbd_sock},
+   'reconnect-delay': 10}})
+vm.qmp_log('blockdev-backup', device='drive0', sync='full', 
target='backup0',
+   speed=(1 * 1024 * 1024))
+
+# Wait for some progress
+t = 0
+while t < wait_limit:
+jobs = vm.qmp('query-block-jobs')['return']
+if jobs and jobs[0]['offset'] > 0:
+break
+time.sleep(wait_step)
+t += wait_step
  
-assert ok

-
-vm = iotests.VM().add_drive(disk_a)
-vm.launch()
-vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
-
-vm.qmp_log('blockdev-add', filters=[iotests.filter_qmp_testfiles],
-   **{'node_name': 'backup0',
-  'driver': 'raw',
-  'file': {'driver': 'nbd',
-   'server': {'type': 'unix', 'path': nbd_sock},
-   'reconnect-delay': 10}})
-vm.qmp_log('blockdev-backup', device='drive0', sync='full', target='backup0',
-   speed=(1 * 1024 * 1024))
-
-# Wait for some progress
-t = 0
-while t < wait_limit:
-jobs = vm.qmp('query-block-jobs')['return']
  if jobs and jobs[0]['offset'] > 0:
-break
-time.sleep(wait_step)
-t += wait_step
-
-if jobs and jobs[0]['offset'] > 0:
-log('Backup job is started')
-
-log('Kill NBD server')
-srv.kill()
-srv.wait()
+log('Backup job is started')
  
  jobs = vm.qmp('query-block-jobs')['return']

  if jobs and jobs[0]['offset'] < jobs[0]['len']:
@@ -88,12 +72,8 @@ vm.qmp_log('block-job-set-speed', device='drive0', speed=0)
  # Emulate server down time for 1 second
  time.sleep(1)
  
-log('Start NBD server')

-srv = qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b)
-
-e = vm.event_wait('BLOCK_JOB_COMPLETED')
-log('Backup completed: {}'.format(e['data']['offset']))
-
-vm.qmp_log('blockdev-del', node_name='backup0')
-srv.kill()
-vm.shutdown()
+with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
+e = vm.event_wait('BLOCK_JOB_COMPLETED')
+log('Backup completed: {}'.format(e['data']['offset']))
+vm.qmp_log('blockdev-del', node_name='backup0')
+vm.shutdown()
diff --git a/tests/qemu-iotests/264.out b/tests/qemu-iotests/264.out
index 3000944b09..c45b1e81ef 100644
--- a/tests/qemu-iotests/264.out
+++ b/tests/qemu-iotests/264.out
@@ -1,3 +1,4 @@
+Start NBD server
  {"execute": "blockdev-add", "arguments": {"driver": "raw", "file": {"driver": "nbd", "reconnect-delay": 10, "server": {"path": 
"TEST_DIR/PID-nbd-sock", "type": "unix"}}, "node-name": "backup0"}}
  {"return": {}}
  {"execute": "blockdev-backup", "arguments": {"device": "drive0", "speed": 1048576, "sync": 
"full", "target": "backup0"}}
@@ -11,3 +12,4 @@ Start NBD server
  Backup completed: 5242880
  {"execute": "blockdev-del", "arguments": {"node-name": "backup0"}}
  {"return": {}}
+Kill NBD server
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 3590ed78a0..8f79668435 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -28,10 +28,13 @@ import signal
  import struct
  import subprocess
  import sys
+import time
  from typing import (Any, Callable, Dict, Iterable,
  List, Optional, Sequence, Tuple, TypeVar)
  import unittest
  
+from contextlib import contextmanager

+
  # pylint: 

[PATCH v2 2/4] iotests: Make qemu_nbd_popen() a contextmanager

2020-07-27 Thread Nir Soffer
Instead of duplicating the code to wait until the server is ready and
remember to terminate the server and wait for it, make it possible to
use like this:

with qemu_nbd_popen('-k', sock, image):
# Access image via qemu-nbd socket...

Only test 264 used this helper, but I had to modify the output since it
did not consistently when starting and stopping qemu-nbd.

Signed-off-by: Nir Soffer 
---
 tests/qemu-iotests/264| 76 +--
 tests/qemu-iotests/264.out|  2 +
 tests/qemu-iotests/iotests.py | 28 -
 3 files changed, 56 insertions(+), 50 deletions(-)

diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264
index 304a7443d7..666f164ed8 100755
--- a/tests/qemu-iotests/264
+++ b/tests/qemu-iotests/264
@@ -36,48 +36,32 @@ wait_step = 0.2
 
 qemu_img_create('-f', iotests.imgfmt, disk_a, str(size))
 qemu_img_create('-f', iotests.imgfmt, disk_b, str(size))
-srv = qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b)
 
-# Wait for NBD server availability
-t = 0
-ok = False
-while t < wait_limit:
-ok = qemu_io_silent_check('-f', 'raw', '-c', 'read 0 512', nbd_uri)
-if ok:
-break
-time.sleep(wait_step)
-t += wait_step
+with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
+vm = iotests.VM().add_drive(disk_a)
+vm.launch()
+vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
+
+vm.qmp_log('blockdev-add', filters=[iotests.filter_qmp_testfiles],
+   **{'node_name': 'backup0',
+  'driver': 'raw',
+  'file': {'driver': 'nbd',
+   'server': {'type': 'unix', 'path': nbd_sock},
+   'reconnect-delay': 10}})
+vm.qmp_log('blockdev-backup', device='drive0', sync='full', 
target='backup0',
+   speed=(1 * 1024 * 1024))
+
+# Wait for some progress
+t = 0
+while t < wait_limit:
+jobs = vm.qmp('query-block-jobs')['return']
+if jobs and jobs[0]['offset'] > 0:
+break
+time.sleep(wait_step)
+t += wait_step
 
-assert ok
-
-vm = iotests.VM().add_drive(disk_a)
-vm.launch()
-vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
-
-vm.qmp_log('blockdev-add', filters=[iotests.filter_qmp_testfiles],
-   **{'node_name': 'backup0',
-  'driver': 'raw',
-  'file': {'driver': 'nbd',
-   'server': {'type': 'unix', 'path': nbd_sock},
-   'reconnect-delay': 10}})
-vm.qmp_log('blockdev-backup', device='drive0', sync='full', target='backup0',
-   speed=(1 * 1024 * 1024))
-
-# Wait for some progress
-t = 0
-while t < wait_limit:
-jobs = vm.qmp('query-block-jobs')['return']
 if jobs and jobs[0]['offset'] > 0:
-break
-time.sleep(wait_step)
-t += wait_step
-
-if jobs and jobs[0]['offset'] > 0:
-log('Backup job is started')
-
-log('Kill NBD server')
-srv.kill()
-srv.wait()
+log('Backup job is started')
 
 jobs = vm.qmp('query-block-jobs')['return']
 if jobs and jobs[0]['offset'] < jobs[0]['len']:
@@ -88,12 +72,8 @@ vm.qmp_log('block-job-set-speed', device='drive0', speed=0)
 # Emulate server down time for 1 second
 time.sleep(1)
 
-log('Start NBD server')
-srv = qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b)
-
-e = vm.event_wait('BLOCK_JOB_COMPLETED')
-log('Backup completed: {}'.format(e['data']['offset']))
-
-vm.qmp_log('blockdev-del', node_name='backup0')
-srv.kill()
-vm.shutdown()
+with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
+e = vm.event_wait('BLOCK_JOB_COMPLETED')
+log('Backup completed: {}'.format(e['data']['offset']))
+vm.qmp_log('blockdev-del', node_name='backup0')
+vm.shutdown()
diff --git a/tests/qemu-iotests/264.out b/tests/qemu-iotests/264.out
index 3000944b09..c45b1e81ef 100644
--- a/tests/qemu-iotests/264.out
+++ b/tests/qemu-iotests/264.out
@@ -1,3 +1,4 @@
+Start NBD server
 {"execute": "blockdev-add", "arguments": {"driver": "raw", "file": {"driver": 
"nbd", "reconnect-delay": 10, "server": {"path": "TEST_DIR/PID-nbd-sock", 
"type": "unix"}}, "node-name": "backup0"}}
 {"return": {}}
 {"execute": "blockdev-backup", "arguments": {"device": "drive0", "speed": 
1048576, "sync": "full", "target": "backup0"}}
@@ -11,3 +12,4 @@ Start NBD server
 Backup completed: 5242880
 {"execute": "blockdev-del", "arguments": {"node-name": "backup0"}}
 {"return": {}}
+Kill NBD server
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 3590ed78a0..8f79668435 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -28,10 +28,13 @@ import signal
 import struct
 import subprocess
 import sys
+import time
 from typing import (Any, Callable, Dict, Iterable,
 List, Optional, Sequence, Tuple, TypeVar)
 import unittest
 
+from contextlib import contextmanager
+
 # pylint: disable=import-error, wrong-import-position