Code using old bh APIs should be updated to account for whether the
bh is related to the target machine model or host QEMU operation,
for record/replay to work properly.

Add some assertions in the old APIs to catch unconverted code when
record/replay is active. Some of the bh APIs like cancel and delete
also don't seem to be implemented in the replay-event driver, so
try to catch these being used too.

This fixes one bug in IDE code that was caught by the assertions.
That fixes the x86-64 q35 non-virtio record/replay avocado test for
me, so not entirely academic.
---
 include/block/aio.h                |  2 +-
 block.c                            |  4 +++-
 hw/ide/core.c                      |  2 +-
 hw/scsi/scsi-bus.c                 |  6 +++---
 monitor/monitor.c                  |  2 +-
 monitor/qmp.c                      |  5 +++--
 qapi/qmp-dispatch.c                |  4 ++--
 replay/replay-events.c             | 25 +++++++++++--------------
 util/aio-wait.c                    |  2 +-
 util/async.c                       | 23 ++++++++++++++++++++++-
 util/main-loop.c                   |  2 +-
 util/thread-pool.c                 |  8 ++++----
 scripts/block-coroutine-wrapper.py |  2 +-
 13 files changed, 54 insertions(+), 33 deletions(-)

diff --git a/include/block/aio.h b/include/block/aio.h
index 23c5543506..b877c6070d 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -308,7 +308,7 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, 
QEMUBHFunc *cb, void *opaque,
  */
 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \
     aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)), \
-                                 QEMU_CLOCK_REALTIME)
+                                 QEMU_CLOCK_MAX)
 
 /**
  * aio_bh_new_full: Allocate a new bottom half structure.
diff --git a/block.c b/block.c
index c317de9eaa..67c88e8c68 100644
--- a/block.c
+++ b/block.c
@@ -7144,7 +7144,9 @@ void bdrv_schedule_unref(BlockDriverState *bs)
     if (!bs) {
         return;
     }
-    aio_bh_schedule_oneshot(qemu_get_aio_context(), bdrv_schedule_unref_bh, 
bs);
+    aio_bh_schedule_oneshot_event(qemu_get_aio_context(),
+                                  bdrv_schedule_unref_bh, bs,
+                                  QEMU_CLOCK_REALTIME);
 }
 
 struct BdrvOpBlocker {
diff --git a/hw/ide/core.c b/hw/ide/core.c
index fa7fee61d9..ee77200f77 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -2780,7 +2780,7 @@ static void ide_restart_cb(void *opaque, bool running, 
RunState state)
 
     if (!bus->bh) {
         bus->bh = qemu_bh_new(ide_restart_bh, bus);
-        qemu_bh_schedule(bus->bh);
+        qemu_bh_schedule_event(bus->bh, QEMU_CLOCK_VIRTUAL);
     }
 }
 
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index 53eff5dd3d..ef36f12031 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -166,9 +166,9 @@ static void scsi_device_for_each_req_async(SCSIDevice *s,
 
     /* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() 
*/
     blk_inc_in_flight(s->conf.blk);
-    aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
-                            scsi_device_for_each_req_async_bh,
-                            data);
+    aio_bh_schedule_oneshot_event(blk_get_aio_context(s->conf.blk),
+                                  scsi_device_for_each_req_async_bh,
+                                  data, QEMU_CLOCK_REALTIME);
 }
 
 static void scsi_device_realize(SCSIDevice *s, Error **errp)
diff --git a/monitor/monitor.c b/monitor/monitor.c
index db52a9c7ef..eae4d4e7f4 100644
--- a/monitor/monitor.c
+++ b/monitor/monitor.c
@@ -580,7 +580,7 @@ void monitor_resume(Monitor *mon)
             ctx = qemu_get_aio_context();
         }
 
-        aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
+        aio_bh_schedule_oneshot_event(ctx, monitor_accept_input, mon, 
QEMU_CLOCK_REALTIME);
     }
 
     trace_monitor_suspend(mon, -1);
diff --git a/monitor/qmp.c b/monitor/qmp.c
index 5e538f34c0..c6fec04860 100644
--- a/monitor/qmp.c
+++ b/monitor/qmp.c
@@ -541,8 +541,9 @@ void monitor_init_qmp(Chardev *chr, bool pretty, Error 
**errp)
          * since chardev might be running in the monitor I/O
          * thread.  Schedule a bottom half.
          */
-        aio_bh_schedule_oneshot(iothread_get_aio_context(mon_iothread),
-                                monitor_qmp_setup_handlers_bh, mon);
+        aio_bh_schedule_oneshot_event(iothread_get_aio_context(mon_iothread),
+                                      monitor_qmp_setup_handlers_bh, mon,
+                                      QEMU_CLOCK_REALTIME);
         /* The bottom half will add @mon to @mon_list */
     } else {
         qemu_chr_fe_set_handlers(&mon->common.chr, monitor_can_read,
diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c
index 176b549473..cd46a0830c 100644
--- a/qapi/qmp-dispatch.c
+++ b/qapi/qmp-dispatch.c
@@ -254,8 +254,8 @@ QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList 
*cmds, QObject *requ
             .errp       = &err,
             .co         = qemu_coroutine_self(),
         };
-        aio_bh_schedule_oneshot(iohandler_get_aio_context(), 
do_qmp_dispatch_bh,
-                                &data);
+        aio_bh_schedule_oneshot_event(iohandler_get_aio_context(), 
do_qmp_dispatch_bh,
+                                &data, QEMU_CLOCK_REALTIME);
         qemu_coroutine_yield();
     }
     qobject_unref(args);
diff --git a/replay/replay-events.c b/replay/replay-events.c
index af0721cc1a..5ac974fcac 100644
--- a/replay/replay-events.c
+++ b/replay/replay-events.c
@@ -132,23 +132,17 @@ void replay_add_event(ReplayAsyncEventKind event_kind,
 
 void replay_bh_schedule_event(QEMUBH *bh)
 {
-    if (events_enabled) {
-        uint64_t id = replay_get_current_icount();
-        replay_add_event(REPLAY_ASYNC_EVENT_BH, bh, NULL, id);
-    } else {
-        qemu_bh_schedule(bh);
-    }
+    uint64_t id = replay_get_current_icount();
+    assert(events_enabled);
+    replay_add_event(REPLAY_ASYNC_EVENT_BH, bh, NULL, id);
 }
 
-void replay_bh_schedule_oneshot_event(AioContext *ctx,
+void replay_bh_oneshot_event(AioContext *ctx,
     QEMUBHFunc *cb, void *opaque)
 {
-    if (events_enabled) {
-        uint64_t id = replay_get_current_icount();
-        replay_add_event(REPLAY_ASYNC_EVENT_BH_ONESHOT, cb, opaque, id);
-    } else {
-        aio_bh_schedule_oneshot(ctx, cb, opaque);
-    }
+    uint64_t id = replay_get_current_icount();
+    assert(events_enabled);
+    replay_add_event(REPLAY_ASYNC_EVENT_BH_ONESHOT, cb, opaque, id);
 }
 
 void replay_add_input_event(struct InputEvent *event)
@@ -166,7 +160,10 @@ void replay_block_event(QEMUBH *bh, uint64_t id)
     if (events_enabled) {
         replay_add_event(REPLAY_ASYNC_EVENT_BLOCK, bh, NULL, id);
     } else {
-        qemu_bh_schedule(bh);
+        /*
+         * Block can be used before events come up.
+         */
+        qemu_bh_schedule_event(bh, QEMU_CLOCK_REALTIME);
     }
 }
 
diff --git a/util/aio-wait.c b/util/aio-wait.c
index b5336cf5fd..32298d41b2 100644
--- a/util/aio-wait.c
+++ b/util/aio-wait.c
@@ -51,7 +51,7 @@ void aio_wait_kick(void)
     smp_mb();
 
     if (qatomic_read(&global_aio_wait.num_waiters)) {
-        aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
+        aio_bh_schedule_oneshot_event(qemu_get_aio_context(), dummy_bh_cb, 
NULL, QEMU_CLOCK_REALTIME);
     }
 }
 
diff --git a/util/async.c b/util/async.c
index 97ed40048d..6893609d8c 100644
--- a/util/async.c
+++ b/util/async.c
@@ -57,6 +57,9 @@ enum {
 
     /* Schedule periodically when the event loop is idle */
     BH_IDLE      = (1 << 4),
+
+    /* BH being handled by replay machinery */
+    BH_REPLAY    = (1 << 4),
 };
 
 struct QEMUBH {
@@ -144,6 +147,10 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, 
QEMUBHFunc *cb,
                                   void *opaque, const char *name,
                                   QEMUClockType clock_type)
 {
+    if (clock_type == QEMU_CLOCK_MAX) {
+        assert(replay_mode == REPLAY_MODE_NONE);
+        clock_type = QEMU_CLOCK_REALTIME;
+    }
     switch (clock_type) {
     case QEMU_CLOCK_VIRTUAL:
     case QEMU_CLOCK_VIRTUAL_RT:
@@ -177,6 +184,12 @@ void aio_bh_call(QEMUBH *bh)
 {
     bool last_engaged_in_io = false;
 
+    if (bh->flags & BH_REPLAY) {
+        assert(!(bh->flags & BH_SCHEDULED));
+        assert(!(bh->flags & BH_DELETED));
+        assert(!(bh->flags & BH_PENDING));
+        bh->flags &= ~BH_REPLAY;
+    }
     /* Make a copy of the guard-pointer as cb may free the bh */
     MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard;
     if (reentrancy_guard) {
@@ -263,11 +276,15 @@ void qemu_bh_schedule_event(QEMUBH *bh, QEMUClockType 
clock_type)
 
 void qemu_bh_schedule_idle(QEMUBH *bh)
 {
+    /* No mechanism for scheduling idle replay-scheduled bh at the moment */
+    assert(replay_mode == REPLAY_MODE_NONE);
     aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
 }
 
 void qemu_bh_schedule(QEMUBH *bh)
 {
+    /* Callers should be converted to use qemu_bh_schedule_event */
+    assert(replay_mode == REPLAY_MODE_NONE);
     aio_bh_enqueue(bh, BH_SCHEDULED);
 }
 
@@ -275,6 +292,8 @@ void qemu_bh_schedule(QEMUBH *bh)
  */
 void qemu_bh_cancel(QEMUBH *bh)
 {
+    /* No mechanism for canceling replay-scheduled bh at the moment */
+    assert(!(bh->flags & BH_REPLAY));
     qatomic_and(&bh->flags, ~BH_SCHEDULED);
 }
 
@@ -283,6 +302,8 @@ void qemu_bh_cancel(QEMUBH *bh)
  */
 void qemu_bh_delete(QEMUBH *bh)
 {
+    /* No mechanism for deleting replay-scheduled bh at the moment */
+    assert(!(bh->flags & BH_REPLAY));
     aio_bh_enqueue(bh, BH_DELETED);
 }
 
@@ -683,7 +704,7 @@ void aio_co_schedule(AioContext *ctx, Coroutine *co)
 
     QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
                               co, co_scheduled_next);
-    qemu_bh_schedule(ctx->co_schedule_bh);
+    qemu_bh_schedule_event(ctx->co_schedule_bh, QEMU_CLOCK_REALTIME);
 
     aio_context_unref(ctx);
 }
diff --git a/util/main-loop.c b/util/main-loop.c
index a0386cfeb6..6180a183f5 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -148,7 +148,7 @@ void qemu_notify_event(void)
     if (!qemu_aio_context) {
         return;
     }
-    qemu_bh_schedule(qemu_notify_bh);
+    qemu_bh_schedule_event(qemu_notify_bh, QEMU_CLOCK_REALTIME);
 }
 
 static GArray *gpollfds;
diff --git a/util/thread-pool.c b/util/thread-pool.c
index 27eb777e85..010eb4ad9a 100644
--- a/util/thread-pool.c
+++ b/util/thread-pool.c
@@ -115,7 +115,7 @@ static void *worker_thread(void *opaque)
         smp_wmb();
         req->state = THREAD_DONE;
 
-        qemu_bh_schedule(pool->completion_bh);
+        qemu_bh_schedule_event(pool->completion_bh, QEMU_CLOCK_REALTIME);
         qemu_mutex_lock(&pool->lock);
     }
 
@@ -167,7 +167,7 @@ static void spawn_thread(ThreadPool *pool)
      * inherit the correct affinity instead of the vcpu affinity.
      */
     if (!pool->pending_threads) {
-        qemu_bh_schedule(pool->new_thread_bh);
+        qemu_bh_schedule_event(pool->new_thread_bh, QEMU_CLOCK_REALTIME);
     }
 }
 
@@ -195,7 +195,7 @@ restart:
             /* Schedule ourselves in case elem->common.cb() calls aio_poll() to
              * wait for another request that completed at the same time.
              */
-            qemu_bh_schedule(pool->completion_bh);
+            qemu_bh_schedule_event(pool->completion_bh, QEMU_CLOCK_REALTIME);
 
             elem->common.cb(elem->common.opaque, elem->ret);
 
@@ -225,7 +225,7 @@ static void thread_pool_cancel(BlockAIOCB *acb)
     QEMU_LOCK_GUARD(&pool->lock);
     if (elem->state == THREAD_QUEUED) {
         QTAILQ_REMOVE(&pool->request_list, elem, reqs);
-        qemu_bh_schedule(pool->completion_bh);
+        qemu_bh_schedule_event(pool->completion_bh, QEMU_CLOCK_REALTIME);
 
         elem->state = THREAD_DONE;
         elem->ret = -ECANCELED;
diff --git a/scripts/block-coroutine-wrapper.py 
b/scripts/block-coroutine-wrapper.py
index dbbde99e39..f28e7b9200 100644
--- a/scripts/block-coroutine-wrapper.py
+++ b/scripts/block-coroutine-wrapper.py
@@ -292,7 +292,7 @@ def gen_no_co_wrapper(func: FuncDecl) -> str:
     }};
     assert(qemu_in_coroutine());
 
-    aio_bh_schedule_oneshot(qemu_get_aio_context(), {name}_bh, &s);
+    aio_bh_schedule_oneshot_event(qemu_get_aio_context(), {name}_bh, &s, 
QEMU_CLOCK_REALTIME);
     qemu_coroutine_yield();
 
     {func.ret}
-- 
2.45.2


Reply via email to