On Thu, 20 Jun 2019 20:07:36 +0530 Kirti Wankhede <kwankh...@nvidia.com> wrote:
> Added .save_live_pending, .save_live_iterate and .save_live_complete_precopy > functions. These functions handles pre-copy and stop-and-copy phase. > > In _SAVING|_RUNNING device state or pre-copy phase: > - read pending_bytes > - read data_offset - indicates kernel driver to write data to staging > buffer which is mmapped. Why is data_offset the trigger rather than data_size? It seems that data_offset can't really change dynamically since it might be mmap'd, so it seems unnatural to bother re-reading it. > - read data_size - amount of data in bytes written by vendor driver in > migration > region. > - if data section is trapped, pread() number of bytes in data_size, from > data_offset. > - if data section is mmaped, read mmaped buffer of size data_size. > - Write data packet to file stream as below: > {VFIO_MIG_FLAG_DEV_DATA_STATE, data_size, actual data, > VFIO_MIG_FLAG_END_OF_STATE } > > In _SAVING device state or stop-and-copy phase > a. read config space of device and save to migration file stream. This > doesn't need to be from vendor driver. Any other special config state > from driver can be saved as data in following iteration. > b. read pending_bytes - indicates kernel driver to write data to staging > buffer which is mmapped. Is it pending_bytes or data_offset that triggers the write out of data? Why pending_bytes vs data_size? I was interpreting pending_bytes as the total data size while data_size is the size available to read now, so assumed data_size would be more closely aligned to making the data available. > c. read data_size - amount of data in bytes written by vendor driver in > migration region. > d. if data section is trapped, pread() from data_offset of size data_size. > e. if data section is mmaped, read mmaped buffer of size data_size. Should this read as "pread() from data_offset of data_size, or optionally if mmap is supported on the data area, read data_size from start of mapped buffer"? IOW, pread should always work. Same in previous section. > f. Write data packet as below: > {VFIO_MIG_FLAG_DEV_DATA_STATE, data_size, actual data} > g. iterate through steps b to f until (pending_bytes > 0) s/until/while/ > h. Write {VFIO_MIG_FLAG_END_OF_STATE} > > .save_live_iterate runs outside the iothread lock in the migration case, which > could race with asynchronous call to get dirty page list causing data > corruption > in mapped migration region. Mutex added here to serial migration buffer read > operation. Would we be ahead to use different offsets within the region for device data vs dirty bitmap to avoid this? > Signed-off-by: Kirti Wankhede <kwankh...@nvidia.com> > Reviewed-by: Neo Jia <c...@nvidia.com> > --- > hw/vfio/migration.c | 212 > ++++++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 212 insertions(+) > > diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c > index fe0887c27664..0a2f30872316 100644 > --- a/hw/vfio/migration.c > +++ b/hw/vfio/migration.c > @@ -107,6 +107,111 @@ static int vfio_migration_set_state(VFIODevice > *vbasedev, uint32_t state) > return 0; > } > > +static int vfio_save_buffer(QEMUFile *f, VFIODevice *vbasedev) > +{ > + VFIOMigration *migration = vbasedev->migration; > + VFIORegion *region = &migration->region.buffer; > + uint64_t data_offset = 0, data_size = 0; > + int ret; > + > + ret = pread(vbasedev->fd, &data_offset, sizeof(data_offset), > + region->fd_offset + offsetof(struct > vfio_device_migration_info, > + data_offset)); > + if (ret != sizeof(data_offset)) { > + error_report("Failed to get migration buffer data offset %d", > + ret); > + return -EINVAL; > + } > + > + ret = pread(vbasedev->fd, &data_size, sizeof(data_size), > + region->fd_offset + offsetof(struct > vfio_device_migration_info, > + data_size)); > + if (ret != sizeof(data_size)) { > + error_report("Failed to get migration buffer data size %d", > + ret); > + return -EINVAL; > + } > + > + if (data_size > 0) { > + void *buf = NULL; > + bool buffer_mmaped = false; > + > + if (region->mmaps) { > + int i; > + > + for (i = 0; i < region->nr_mmaps; i++) { > + if ((data_offset >= region->mmaps[i].offset) && > + (data_offset < region->mmaps[i].offset + > + region->mmaps[i].size)) { > + buf = region->mmaps[i].mmap + (data_offset - > + region->mmaps[i].offset); So you're expecting that data_offset is somewhere within the data area. Why doesn't the data always simply start at the beginning of the data area? ie. data_offset would coincide with the beginning of the mmap'able area (if supported) and be static. Does this enable some functionality in the vendor driver? Does resume data need to be written from the same offset where it's read? > + buffer_mmaped = true; > + break; > + } > + } > + } > + > + if (!buffer_mmaped) { > + buf = g_malloc0(data_size); > + ret = pread(vbasedev->fd, buf, data_size, > + region->fd_offset + data_offset); > + if (ret != data_size) { > + error_report("Failed to get migration data %d", ret); > + g_free(buf); > + return -EINVAL; > + } > + } > + > + qemu_put_be64(f, data_size); > + qemu_put_buffer(f, buf, data_size); > + > + if (!buffer_mmaped) { > + g_free(buf); > + } > + migration->pending_bytes -= data_size; > + } else { > + qemu_put_be64(f, data_size); > + } > + > + ret = qemu_file_get_error(f); > + > + return data_size; > +} > + > +static int vfio_update_pending(VFIODevice *vbasedev) > +{ > + VFIOMigration *migration = vbasedev->migration; > + VFIORegion *region = &migration->region.buffer; > + uint64_t pending_bytes = 0; > + int ret; > + > + ret = pread(vbasedev->fd, &pending_bytes, sizeof(pending_bytes), > + region->fd_offset + offsetof(struct > vfio_device_migration_info, > + pending_bytes)); Did this trigger the vendor driver to write out to the data area when we don't need it to? > + if ((ret < 0) || (ret != sizeof(pending_bytes))) { > + error_report("Failed to get pending bytes %d", ret); > + migration->pending_bytes = 0; > + return (ret < 0) ? ret : -EINVAL; > + } > + > + migration->pending_bytes = pending_bytes; > + return 0; > +} > + > +static int vfio_save_device_config_state(QEMUFile *f, void *opaque) > +{ > + VFIODevice *vbasedev = opaque; > + > + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_CONFIG_STATE); > + > + if (vbasedev->type == VFIO_DEVICE_TYPE_PCI) { > + vfio_pci_save_config(vbasedev, f); > + } > + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); > + > + return qemu_file_get_error(f); > +} > + > /* ---------------------------------------------------------------------- */ > > static int vfio_save_setup(QEMUFile *f, void *opaque) > @@ -163,9 +268,116 @@ static void vfio_save_cleanup(void *opaque) > } > } > > +static void vfio_save_pending(QEMUFile *f, void *opaque, > + uint64_t threshold_size, > + uint64_t *res_precopy_only, > + uint64_t *res_compatible, > + uint64_t *res_postcopy_only) > +{ > + VFIODevice *vbasedev = opaque; > + VFIOMigration *migration = vbasedev->migration; > + int ret; > + > + ret = vfio_update_pending(vbasedev); > + if (ret) { > + return; > + } > + > + if (vbasedev->device_state & VFIO_DEVICE_STATE_RUNNING) { > + *res_precopy_only += migration->pending_bytes; > + } else { > + *res_postcopy_only += migration->pending_bytes; > + } > + *res_compatible += 0; > +} > + > +static int vfio_save_iterate(QEMUFile *f, void *opaque) > +{ > + VFIODevice *vbasedev = opaque; > + VFIOMigration *migration = vbasedev->migration; > + int ret; > + > + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); > + > + qemu_mutex_lock(&migration->lock); > + ret = vfio_save_buffer(f, vbasedev); > + qemu_mutex_unlock(&migration->lock); > + > + if (ret < 0) { > + error_report("vfio_save_buffer failed %s", > + strerror(errno)); > + return ret; > + } > + > + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); > + > + ret = qemu_file_get_error(f); > + if (ret) { > + return ret; > + } > + > + return ret; > +} > + > +static int vfio_save_complete_precopy(QEMUFile *f, void *opaque) > +{ > + VFIODevice *vbasedev = opaque; > + VFIOMigration *migration = vbasedev->migration; > + int ret; > + > + ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_SAVING); > + if (ret) { > + error_report("Failed to set state STOP and SAVING"); > + return ret; > + } > + > + ret = vfio_save_device_config_state(f, opaque); > + if (ret) { > + return ret; > + } > + > + ret = vfio_update_pending(vbasedev); > + if (ret) { > + return ret; > + } > + > + while (migration->pending_bytes > 0) { > + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); > + ret = vfio_save_buffer(f, vbasedev); > + if (ret < 0) { > + error_report("Failed to save buffer"); > + return ret; > + } else if (ret == 0) { > + break; > + } > + > + ret = vfio_update_pending(vbasedev); > + if (ret) { > + return ret; > + } > + } > + > + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); > + > + ret = qemu_file_get_error(f); > + if (ret) { > + return ret; > + } > + > + ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOPPED); > + if (ret) { > + error_report("Failed to set state STOPPED"); > + return ret; > + } > + return ret; > +} > + > static SaveVMHandlers savevm_vfio_handlers = { > .save_setup = vfio_save_setup, > .save_cleanup = vfio_save_cleanup, > + .save_live_pending = vfio_save_pending, > + .save_live_iterate = vfio_save_iterate, > + .save_live_complete_precopy = vfio_save_complete_precopy, > }; > > /* ---------------------------------------------------------------------- */