QEMU is rather aggressive about exhausting the wait period when selecting.
This is fine when the wait period is low and when there is significant delays
in-between selects as it improves IO throughput.

With the IO thread, there is a very small delay between selects and our wait
period for select is very large.  This patch changes main_loop_wait to only
select once before doing the various other things in the main loop.  This
generally improves responsiveness of things like SDL but also improves
individual file descriptor throughput quite dramatically.

This patch is relies on my io-thread-timerfd.patch.

Signed-off-by: Anthony Liguori <[EMAIL PROTECTED]>

diff --git a/qemu/qemu-kvm.c b/qemu/qemu-kvm.c
index 0c7f49f..31c7ca7 100644
--- a/qemu/qemu-kvm.c
+++ b/qemu/qemu-kvm.c
@@ -401,24 +401,6 @@ void qemu_kvm_notify_work(void)
         pthread_kill(io_thread, SIGUSR1);
 }
 
-static int received_signal;
-
-/* QEMU relies on periodically breaking out of select via EINTR to poll for IO
-   and timer signals.  Since we're now using a file descriptor to handle
-   signals, select() won't be interrupted by a signal.  We need to forcefully
-   break the select() loop when a signal is received hence
-   kvm_check_received_signal(). */
-
-int kvm_check_received_signal(void)
-{
-    if (received_signal) {
-       received_signal = 0;
-       return 1;
-    }
-
-    return 0;
-}
-
 #if defined(SYS_signalfd)
 #if !defined(HAVE_signalfd)
 #include <linux/signalfd.h>
@@ -466,8 +448,6 @@ static void sigfd_handler(void *opaque)
        if (info.ssi_signo == SIGUSR2)
            pthread_cond_signal(&qemu_aio_cond); 
     }
-
-    received_signal = 1;
 }
 
 static int setup_signal_handlers(int nr_signals, ...)
@@ -576,8 +556,6 @@ static void sigfd_handler(void *opaque)
        if (signo == SIGUSR2)
            pthread_cond_signal(&qemu_aio_cond); 
     }
-
-    received_signal = 1;
 }
 
 static int setup_signal_handlers(int nr_signals, ...)
diff --git a/qemu/qemu-kvm.h b/qemu/qemu-kvm.h
index bcab82c..5109c64 100644
--- a/qemu/qemu-kvm.h
+++ b/qemu/qemu-kvm.h
@@ -112,13 +112,4 @@ static inline void kvm_sleep_end(void)
        kvm_mutex_lock();
 }
 
-int kvm_check_received_signal(void);
-
-static inline int kvm_received_signal(void)
-{
-    if (kvm_enabled())
-       return kvm_check_received_signal();
-    return 0;
-}
-
 #endif
diff --git a/qemu/vl.c b/qemu/vl.c
index 1192759..bcf893f 100644
--- a/qemu/vl.c
+++ b/qemu/vl.c
@@ -7936,23 +7936,18 @@ void main_loop_wait(int timeout)
         slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
     }
 #endif
- moreio:
     ret = qemu_select(nfds + 1, &rfds, &wfds, &xfds, &tv);
     if (ret > 0) {
         IOHandlerRecord **pioh;
-        int more = 0;
 
         for(ioh = first_io_handler; ioh != NULL; ioh = ioh->next) {
             if (!ioh->deleted && ioh->fd_read && FD_ISSET(ioh->fd, &rfds)) {
                 ioh->fd_read(ioh->opaque);
-                if (!ioh->fd_read_poll || ioh->fd_read_poll(ioh->opaque))
-                    more = 1;
-                else
+                if (!(ioh->fd_read_poll && ioh->fd_read_poll(ioh->opaque)))
                     FD_CLR(ioh->fd, &rfds);
             }
             if (!ioh->deleted && ioh->fd_write && FD_ISSET(ioh->fd, &wfds)) {
                 ioh->fd_write(ioh->opaque);
-                more = 1;
             }
         }
 
@@ -7966,8 +7961,6 @@ void main_loop_wait(int timeout)
             } else
                 pioh = &ioh->next;
         }
-        if (more && !kvm_received_signal())
-            goto moreio;
     }
 #if defined(CONFIG_SLIRP)
     if (slirp_inited) {

-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference 
Don't miss this year's exciting event. There's still time to save $100. 
Use priority code J8TL2D2. 
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to