Now that QEMU does all necessary locking itself, get rid of the global mutex
lock.
Allows more than one thread inside QEMU simultaneously.
Index: kvm-userspace.io/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.io.orig/qemu/qemu-kvm.c
+++ kvm-userspace.io/qemu/qemu-kvm.c
@@ -145,7 +145,6 @@ static int try_push_interrupts(void *opa
static void post_kvm_run(void *opaque, int vcpu)
{
- pthread_mutex_lock(&qemu_mutex);
kvm_arch_post_kvm_run(opaque, vcpu);
}
@@ -157,7 +156,6 @@ static int pre_kvm_run(void *opaque, int
if (env->interrupt_request & CPU_INTERRUPT_EXIT)
return 1;
- pthread_mutex_unlock(&qemu_mutex);
return 0;
}
@@ -228,7 +226,6 @@ static int kvm_eat_signal(struct qemu_kv
if (r == -1 && (errno == EAGAIN || errno == EINTR) && !timeout)
return 0;
e = errno;
- pthread_mutex_lock(&qemu_mutex);
if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
printf("sigtimedwait: %s\n", strerror(e));
exit(1);
@@ -241,7 +238,6 @@ static int kvm_eat_signal(struct qemu_kv
vcpu_info[env->cpu_index].stopped = 1;
pthread_kill(io_thread, SIGUSR1);
}
- pthread_mutex_unlock(&qemu_mutex);
return ret;
}
@@ -264,9 +260,7 @@ static void kvm_eat_signals(CPUState *en
static void kvm_main_loop_wait(CPUState *env, int timeout)
{
- pthread_mutex_unlock(&qemu_mutex);
kvm_eat_signals(env, timeout);
- pthread_mutex_lock(&qemu_mutex);
vcpu_info[env->cpu_index].signalled = 0;
}
@@ -339,7 +333,6 @@ static int kvm_main_loop_cpu(CPUState *e
struct vcpu_info *info = &vcpu_info[env->cpu_index];
setup_kernel_sigmask(env);
- pthread_mutex_lock(&qemu_mutex);
kvm_qemu_init_env(env);
env->ready_for_interrupt_injection = 1;
@@ -367,7 +360,6 @@ static int kvm_main_loop_cpu(CPUState *e
kvm_arch_load_regs(env);
}
}
- pthread_mutex_unlock(&qemu_mutex);
return 0;
}
@@ -456,12 +448,10 @@ void qemu_kvm_notify_work(void)
int kvm_main_loop(void)
{
io_thread = pthread_self();
- pthread_mutex_unlock(&qemu_mutex);
while (1) {
if (get_cpu_env())
hw_error("io thread has valid env\n");
kvm_eat_signal(&io_signal_table, NULL, 1000);
- pthread_mutex_lock(&qemu_mutex);
main_loop_wait(0);
if (qemu_shutdown_requested())
break;
@@ -471,10 +461,8 @@ int kvm_main_loop(void)
pthread_kill(vcpu_info[0].thread, SIG_IPI);
qemu_kvm_reset_requested = 1;
}
- pthread_mutex_unlock(&qemu_mutex);
}
- pthread_mutex_unlock(&qemu_mutex);
return 0;
}
@@ -614,7 +602,6 @@ int kvm_qemu_init()
if (!kvm_context) {
return -1;
}
- pthread_mutex_lock(&qemu_mutex);
return 0;
}
@@ -833,12 +820,13 @@ void qemu_kvm_aio_wait(void)
/* io thread */
if (!env) {
- pthread_mutex_unlock(&qemu_mutex);
kvm_eat_signal(&io_signal_table, NULL, 1000);
- pthread_mutex_lock(&qemu_mutex);
/* vcpu thread */
- } else
+ } else {
+ pthread_mutex_lock(&qemu_mutex);
pthread_cond_wait(&qemu_aio_cond, &qemu_mutex);
+ pthread_mutex_unlock(&qemu_mutex);
+ }
}
void qemu_kvm_aio_wait_end(void)
--
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
Don't miss this year's exciting event. There's still time to save $100.
Use priority code J8TL2D2.
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel