Introduce event-tap callbacks to functions which actually fire outputs
at net/block layer.  By synchronizing VMs before outputs are fired, we
can failover to the receiver upon failure.

Signed-off-by: Yoshiaki Tamura <tamura.yoshi...@lab.ntt.co.jp>
---
 block.c     |   22 ++++++++++++++++++++++
 block.h     |    4 ++++
 net/queue.c |   18 ++++++++++++++++++
 net/queue.h |    3 +++
 4 files changed, 47 insertions(+), 0 deletions(-)

diff --git a/block.c b/block.c
index 31d1ba4..cf73c47 100644
--- a/block.c
+++ b/block.c
@@ -59,6 +59,8 @@ BlockDriverState *bdrv_first;
 
 static BlockDriver *first_drv;
 
+static int (*bdrv_event_tap)(void);
+
 /* If non-zero, use only whitelisted block drivers */
 static int use_bdrv_whitelist;
 
@@ -787,6 +789,10 @@ int bdrv_write(BlockDriverState *bs, int64_t sector_num,
         set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
     }
 
+    if (bdrv_event_tap != NULL) {
+        bdrv_event_tap();
+    }
+
     return drv->bdrv_write(bs, sector_num, buf, nb_sectors);
 }
 
@@ -1851,6 +1857,10 @@ int bdrv_aio_multiwrite(BlockDriverState *bs, 
BlockRequest *reqs, int num_reqs)
     MultiwriteCB *mcb;
     int i;
 
+    if (bdrv_event_tap != NULL) {
+        bdrv_event_tap();
+    }
+
     if (num_reqs == 0) {
         return 0;
     }
@@ -2277,3 +2287,15 @@ int64_t bdrv_get_dirty_count(BlockDriverState *bs)
 {
     return bs->dirty_count;
 }
+
+void bdrv_event_tap_register(int (*cb)(void))
+{
+    if (bdrv_event_tap == NULL) {
+        bdrv_event_tap = cb;
+    }
+}
+
+void bdrv_event_tap_unregister(void)
+{
+    bdrv_event_tap = NULL;
+}
diff --git a/block.h b/block.h
index edf5704..b5139db 100644
--- a/block.h
+++ b/block.h
@@ -207,4 +207,8 @@ int bdrv_get_dirty(BlockDriverState *bs, int64_t sector);
 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
                       int nr_sectors);
 int64_t bdrv_get_dirty_count(BlockDriverState *bs);
+
+void bdrv_event_tap_register(int (*cb)(void));
+void bdrv_event_tap_unregister(void);
+
 #endif
diff --git a/net/queue.c b/net/queue.c
index 2ea6cd0..a542efe 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -57,6 +57,8 @@ struct NetQueue {
     unsigned delivering : 1;
 };
 
+static int (*net_event_tap)(void);
+
 NetQueue *qemu_new_net_queue(NetPacketDeliver *deliver,
                              NetPacketDeliverIOV *deliver_iov,
                              void *opaque)
@@ -151,6 +153,8 @@ static ssize_t qemu_net_queue_deliver(NetQueue *queue,
     ssize_t ret = -1;
 
     queue->delivering = 1;
+    if (net_event_tap)
+        net_event_tap();
     ret = queue->deliver(sender, flags, data, size, queue->opaque);
     queue->delivering = 0;
 
@@ -166,6 +170,8 @@ static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue,
     ssize_t ret = -1;
 
     queue->delivering = 1;
+    if (net_event_tap)
+        net_event_tap();
     ret = queue->deliver_iov(sender, flags, iov, iovcnt, queue->opaque);
     queue->delivering = 0;
 
@@ -258,3 +264,15 @@ void qemu_net_queue_flush(NetQueue *queue)
         qemu_free(packet);
     }
 }
+
+void qemu_net_event_tap_register(int (*cb)(void))
+{
+    if (net_event_tap == NULL) {
+        net_event_tap = cb;
+    }
+}
+
+void qemu_net_event_tap_unregister(void)
+{
+    net_event_tap = NULL;
+}
diff --git a/net/queue.h b/net/queue.h
index a31958e..5b031c1 100644
--- a/net/queue.h
+++ b/net/queue.h
@@ -68,4 +68,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
 void qemu_net_queue_purge(NetQueue *queue, VLANClientState *from);
 void qemu_net_queue_flush(NetQueue *queue);
 
+void qemu_net_event_tap_register(int (*cb)(void));
+void qemu_net_event_tap_unregister(void);
+
 #endif /* QEMU_NET_QUEUE_H */
-- 
1.7.0.31.g1df487


Reply via email to