Paolo Bonzini <pbonz...@redhat.com> wrote:
> The recent patches to use vectored I/O for RAM migration caused a
> regression in savevm speed.  To restore previous performance,
> add data to the buffer in qemu_put_buffer_async whenever writev_buffer
> is not available in the QEMUFile.
>
> Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
> ---
>  savevm.c | 49 ++++++++++++++++++++++++++++++++++---------------
>  1 file changed, 34 insertions(+), 15 deletions(-)
>
> diff --git a/savevm.c b/savevm.c
> index c952c41..b32e0a7 100644
> --- a/savevm.c
> +++ b/savevm.c
> @@ -525,27 +525,24 @@ static void qemu_file_set_error(QEMUFile *f, int ret)
>  static void qemu_fflush(QEMUFile *f)
>  {
>      ssize_t ret = 0;
> -    int i = 0;
>  
>      if (!f->ops->writev_buffer && !f->ops->put_buffer) {
>          return;
>      }
>  
> -    if (f->is_write && f->iovcnt > 0) {
> +    if (f->is_write) {
>          if (f->ops->writev_buffer) {
> -            ret = f->ops->writev_buffer(f->opaque, f->iov, f->iovcnt);
> -            if (ret >= 0) {
> -                f->pos += ret;

Code was there,  and it is a case of of style,  but adding 0 to one
ammount is a NOP.

> +            if (f->iovcnt > 0) {
> +                ret = f->ops->writev_buffer(f->opaque, f->iov, f->iovcnt);
>              }
>          } else {
> -            for (i = 0; i < f->iovcnt && ret >= 0; i++) {
> -                ret = f->ops->put_buffer(f->opaque, f->iov[i].iov_base, 
> f->pos,
> -                                         f->iov[i].iov_len);
> -                if (ret >= 0) {
> -                    f->pos += ret;
> -                }
> +            if (f->buf_index > 0) {
> +                ret = f->ops->put_buffer(f->opaque, f->buf, f->pos, 
> f->buf_index);
>              }
>          }
> +        if (ret >= 0) {
> +            f->pos += ret;
> +        }
>          f->buf_index = 0;
>          f->iovcnt = 0;
>      }
> @@ -640,6 +637,11 @@ static void add_to_iovec(QEMUFile *f, const uint8_t 
> *buf, int size)
>  
>  void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, int size)
>  {
> +    if (!f->ops->writev_buffer) {
> +        qemu_put_buffer(f, buf, size);
> +        return;
> +    }
> +

This

>      if (f->last_error) {
>          return;
>      }
> @@ -673,8 +675,17 @@ void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, 
> int size)
>          if (l > size)
>              l = size;
>          memcpy(f->buf + f->buf_index, buf, l);
> -        f->buf_index += l;
> -        qemu_put_buffer_async(f, f->buf + (f->buf_index - l), l);
> +        f->bytes_xfer += size;
> +        if (f->ops->writev_buffer) {
> +            add_to_iovec(f, f->buf + f->buf_index, l);
> +            f->buf_index += l;
> +        } else {
> +            f->is_write = 1;
> +            f->buf_index += l;
> +            if (f->buf_index == IO_BUF_SIZE) {
> +                qemu_fflush(f);
> +            }
> +        }

And this

>          if (qemu_file_get_error(f)) {
>              break;
>          }
> @@ -697,8 +708,16 @@ void qemu_put_byte(QEMUFile *f, int v)
>  
>      f->buf[f->buf_index] = v;
>      f->bytes_xfer++;
> -    add_to_iovec(f, f->buf + f->buf_index, 1);
> -    f->buf_index++;
> +    if (f->ops->writev_buffer) {
> +        add_to_iovec(f, f->buf + f->buf_index, 1);
> +        f->buf_index++;
> +    } else {
> +        f->is_write = 1;
> +        f->buf_index++;
> +        if (f->buf_index == IO_BUF_SIZE) {
> +            qemu_fflush(f);
> +        }
> +    }

And this



is not needed if we just put the code inside add_to_iovec() no?

Users of add_to_iovec() should not care if we are using vritev or
put_buffer() right.

and as a second note.  Having qemu_put_buffer_async() calling
qemu_put_buffer() and qemu_put_buffer() calling qemu_put_buffer_async()
is ..... weird?

>  }
>  
>  static void qemu_file_skip(QEMUFile *f, int size)

Reply via email to