[PATCH v1 2/2] system/cpus: Fix resume_all_vcpus() under vCPU hotplug condition

2024-03-17 Thread Keqian Zhu via
For vCPU being hotplugged, qemu_init_vcpu() is called. In this
function, we set vcpu state as stopped, and then wait vcpu thread
to be created.

As the vcpu state is stopped, it will inform us it has been created
and then wait on halt_cond. After we has realized vcpu object, we
will resume the vcpu thread.

However, during we wait vcpu thread to be created, the bql is
unlocked, and other thread is allowed to call resume_all_vcpus(),
which will resume the un-realized vcpu.

This fixes the issue by filter out un-realized vcpu during
resume_all_vcpus().

Signed-off-by: Keqian Zhu 
---
 system/cpus.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/system/cpus.c b/system/cpus.c
index 4e41abe23e..8871f5dfa9 100644
--- a/system/cpus.c
+++ b/system/cpus.c
@@ -638,6 +638,9 @@ void resume_all_vcpus(void)
 
 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
 CPU_FOREACH(cpu) {
+if (!object_property_get_bool(OBJECT(cpu), "realized", &error_abort)) {
+continue;
+}
 cpu_resume(cpu);
 }
 }
-- 
2.33.0




[PATCH v1 1/2] system/cpus: Fix pause_all_vcpus() under concurrent environment

2024-03-17 Thread Keqian Zhu via
Both main loop thread and vCPU thread are allowed to call
pause_all_vcpus(), and in general resume_all_vcpus() is called
after it. Two issues live in pause_all_vcpus():

1. There is possibility that during thread T1 waits on
qemu_pause_cond with bql unlocked, other thread has called
pause_all_vcpus() and resume_all_vcpus(), then thread T1 will
stuck, because the condition all_vcpus_paused() is always false.

2. After all_vcpus_paused() has been checked as true, we will
unlock bql to relock replay_mutex. During the bql was unlocked,
the vcpu's state may has been changed by other thread, so we
must retry.

Signed-off-by: Keqian Zhu 
---
 system/cpus.c | 29 -
 1 file changed, 24 insertions(+), 5 deletions(-)

diff --git a/system/cpus.c b/system/cpus.c
index 68d161d96b..4e41abe23e 100644
--- a/system/cpus.c
+++ b/system/cpus.c
@@ -571,12 +571,14 @@ static bool all_vcpus_paused(void)
 return true;
 }
 
-void pause_all_vcpus(void)
+static void request_pause_all_vcpus(void)
 {
 CPUState *cpu;
 
-qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
 CPU_FOREACH(cpu) {
+if (cpu->stopped) {
+continue;
+}
 if (qemu_cpu_is_self(cpu)) {
 qemu_cpu_stop(cpu, true);
 } else {
@@ -584,6 +586,14 @@ void pause_all_vcpus(void)
 qemu_cpu_kick(cpu);
 }
 }
+}
+
+void pause_all_vcpus(void)
+{
+qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
+
+retry:
+request_pause_all_vcpus();
 
 /* We need to drop the replay_lock so any vCPU threads woken up
  * can finish their replay tasks
@@ -592,14 +602,23 @@ void pause_all_vcpus(void)
 
 while (!all_vcpus_paused()) {
 qemu_cond_wait(&qemu_pause_cond, &bql);
-CPU_FOREACH(cpu) {
-qemu_cpu_kick(cpu);
-}
+/* During we waited on qemu_pause_cond the bql was unlocked,
+ * the vcpu's state may has been changed by other thread, so
+ * we must request the pause state on all vcpus again.
+ */
+request_pause_all_vcpus();
 }
 
 bql_unlock();
 replay_mutex_lock();
 bql_lock();
+
+/* During the bql was unlocked, the vcpu's state may has been
+ * changed by other thread, so we must retry.
+ */
+if (!all_vcpus_paused()) {
+goto retry;
+}
 }
 
 void cpu_resume(CPUState *cpu)
-- 
2.33.0




[PATCH v1 0/2] Some fixes for pause and resume all vcpus

2024-03-17 Thread Keqian Zhu via
I hit these bugs when I test the RFC patch of ARM vCPU hotplug feature.
This patch has been verified valid.

Keqian Zhu (2):
  system/cpus: Fix pause_all_vcpus() under concurrent environment
  system/cpus: Fix resume_all_vcpus() under vCPU hotplug condition

 system/cpus.c | 32 +++-
 1 file changed, 27 insertions(+), 5 deletions(-)

-- 
2.33.0




[PATCH] virtio-gpu: Optimize 2D resource data transfer

2023-06-11 Thread Keqian Zhu via
The following points sometimes can reduce much data
to copy:
1. When width matches, we can transfer data with one
call of iov_to_buf().
2. Only the required height need to transfer, not
whole image.

Signed-off-by: Keqian Zhu 
---
 hw/display/virtio-gpu.c | 22 +++---
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 66cddd94d9..af31018ab0 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -438,11 +438,11 @@ static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
 {
 struct virtio_gpu_simple_resource *res;
-int h;
+int h, bpp;
 uint32_t src_offset, dst_offset, stride;
-int bpp;
 pixman_format_code_t format;
 struct virtio_gpu_transfer_to_host_2d t2d;
+void *img_data;
 
 VIRTIO_GPU_FILL_CMD(t2d);
 virtio_gpu_t2d_bswap(&t2d);
@@ -471,23 +471,23 @@ static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
 format = pixman_image_get_format(res->image);
 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
 stride = pixman_image_get_stride(res->image);
+img_data = pixman_image_get_data(res->image);
 
-if (t2d.offset || t2d.r.x || t2d.r.y ||
-t2d.r.width != pixman_image_get_width(res->image)) {
-void *img_data = pixman_image_get_data(res->image);
+if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) {
 for (h = 0; h < t2d.r.height; h++) {
 src_offset = t2d.offset + stride * h;
 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
 
 iov_to_buf(res->iov, res->iov_cnt, src_offset,
-   (uint8_t *)img_data
-   + dst_offset, t2d.r.width * bpp);
+   (uint8_t *)img_data + dst_offset,
+   t2d.r.width * bpp);
 }
 } else {
-iov_to_buf(res->iov, res->iov_cnt, 0,
-   pixman_image_get_data(res->image),
-   pixman_image_get_stride(res->image)
-   * pixman_image_get_height(res->image));
+src_offset = t2d.offset;
+dst_offset = t2d.r.y * stride + t2d.r.x * bpp;
+iov_to_buf(res->iov, res->iov_cnt, src_offset,
+   (uint8_t *)img_data + dst_offset,
+   stride * t2d.r.height);
 }
 }
 
-- 
2.20.1




[PATCH v2] hw/acpi: Add ospm_status hook implementation for acpi-ged

2022-08-16 Thread Keqian Zhu via
Setup an ARM virtual machine of machine virt and execute qmp 
"query-acpi-ospm-status"
causes segmentation fault with following dumpstack:
 #1  0xab64235c in qmp_query_acpi_ospm_status 
(errp=errp@entry=0xf030) at ../monitor/qmp-cmds.c:312
 #2  0xabfc4e20 in qmp_marshal_query_acpi_ospm_status (args=, ret=0xea4ffe90, errp=0xea4ffe88) at qapi/qapi-commands-acpi.c:63
 #3  0xabff8ba0 in do_qmp_dispatch_bh (opaque=0xea4ffe98) at 
../qapi/qmp-dispatch.c:128
 #4  0xac02e594 in aio_bh_call (bh=0xe0004d80) at 
../util/async.c:150
 #5  aio_bh_poll (ctx=ctx@entry=0xad0f6040) at ../util/async.c:178
 #6  0xac00bd40 in aio_dispatch (ctx=ctx@entry=0xad0f6040) at 
../util/aio-posix.c:421
 #7  0xac02e010 in aio_ctx_dispatch (source=0xad0f6040, 
callback=, user_data=) at ../util/async.c:320
 #8  0xf76f6884 in g_main_context_dispatch () at 
/usr/lib64/libglib-2.0.so.0
 #9  0xac0452d4 in glib_pollfds_poll () at ../util/main-loop.c:297
 #10 os_host_main_loop_wait (timeout=0) at ../util/main-loop.c:320
 #11 main_loop_wait (nonblocking=nonblocking@entry=0) at ../util/main-loop.c:596
 #12 0xab5c9e50 in qemu_main_loop () at ../softmmu/runstate.c:734
 #13 0xab185370 in qemu_main (argc=argc@entry=47, 
argv=argv@entry=0xf518, envp=envp@entry=0x0) at ../softmmu/main.c:38
 #14 0xab16f99c in main (argc=47, argv=0xf518) at 
../softmmu/main.c:47

Fixes: ebb62075021a ("hw/acpi: Add ACPI Generic Event Device Support")
Signed-off-by: Keqian Zhu 
---
 hw/acpi/generic_event_device.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c
index e28457a7d1..a3d31631fe 100644
--- a/hw/acpi/generic_event_device.c
+++ b/hw/acpi/generic_event_device.c
@@ -267,6 +267,13 @@ static void acpi_ged_unplug_cb(HotplugHandler *hotplug_dev,
 }
 }
 
+static void acpi_ged_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
+{
+AcpiGedState *s = ACPI_GED(adev);
+
+acpi_memory_ospm_status(&s->memhp_state, list);
+}
+
 static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
 {
 AcpiGedState *s = ACPI_GED(adev);
@@ -409,6 +416,7 @@ static void acpi_ged_class_init(ObjectClass *class, void 
*data)
 hc->unplug_request = acpi_ged_unplug_request_cb;
 hc->unplug = acpi_ged_unplug_cb;
 
+adevc->ospm_status = acpi_ged_ospm_status;
 adevc->send_event = acpi_ged_send_event;
 }
 
-- 
2.33.0




[PATCH] acpi_ged: Add ospm_status hook implementation

2022-08-16 Thread Keqian Zhu via
This fixes a bug that causes segmentation fault with following dumpstack:
 #1  0xab64235c in qmp_query_acpi_ospm_status 
(errp=errp@entry=0xf030) at ../monitor/qmp-cmds.c:312
 #2  0xabfc4e20 in qmp_marshal_query_acpi_ospm_status (args=, ret=0xea4ffe90, errp=0xea4ffe88) at qapi/qapi-commands-acpi.c:63
 #3  0xabff8ba0 in do_qmp_dispatch_bh (opaque=0xea4ffe98) at 
../qapi/qmp-dispatch.c:128
 #4  0xac02e594 in aio_bh_call (bh=0xe0004d80) at 
../util/async.c:150
 #5  aio_bh_poll (ctx=ctx@entry=0xad0f6040) at ../util/async.c:178
 #6  0xac00bd40 in aio_dispatch (ctx=ctx@entry=0xad0f6040) at 
../util/aio-posix.c:421
 #7  0xac02e010 in aio_ctx_dispatch (source=0xad0f6040, 
callback=, user_data=) at ../util/async.c:320
 #8  0xf76f6884 in g_main_context_dispatch () at 
/usr/lib64/libglib-2.0.so.0
 #9  0xac0452d4 in glib_pollfds_poll () at ../util/main-loop.c:297
 #10 os_host_main_loop_wait (timeout=0) at ../util/main-loop.c:320
 #11 main_loop_wait (nonblocking=nonblocking@entry=0) at ../util/main-loop.c:596
 #12 0xab5c9e50 in qemu_main_loop () at ../softmmu/runstate.c:734
 #13 0xab185370 in qemu_main (argc=argc@entry=47, 
argv=argv@entry=0xf518, envp=envp@entry=0x0) at ../softmmu/main.c:38
 #14 0xab16f99c in main (argc=47, argv=0xf518) at 
../softmmu/main.c:47

Fixes: ebb62075021a ("hw/acpi: Add ACPI Generic Event Device Support")
Signed-off-by: Keqian Zhu 
---
 hw/acpi/generic_event_device.c | 8 
 1 file changed, 8 insertions(+)

diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c
index e28457a7d1..a3d31631fe 100644
--- a/hw/acpi/generic_event_device.c
+++ b/hw/acpi/generic_event_device.c
@@ -267,6 +267,13 @@ static void acpi_ged_unplug_cb(HotplugHandler *hotplug_dev,
 }
 }
 
+static void acpi_ged_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
+{
+AcpiGedState *s = ACPI_GED(adev);
+
+acpi_memory_ospm_status(&s->memhp_state, list);
+}
+
 static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
 {
 AcpiGedState *s = ACPI_GED(adev);
@@ -409,6 +416,7 @@ static void acpi_ged_class_init(ObjectClass *class, void 
*data)
 hc->unplug_request = acpi_ged_unplug_request_cb;
 hc->unplug = acpi_ged_unplug_cb;
 
+adevc->ospm_status = acpi_ged_ospm_status;
 adevc->send_event = acpi_ged_send_event;
 }
 
-- 
2.33.0