Marcelo Tosatti wrote:
> diff --git a/qemu/qemu-kvm-ia64.c b/qemu/qemu-kvm-ia64.c
> index 4d0ddd7..d227d22 100644
> --- a/qemu/qemu-kvm-ia64.c
> +++ b/qemu/qemu-kvm-ia64.c
> @@ -61,3 +61,7 @@ int kvm_arch_try_push_interrupts(void *opaque)
> void kvm_arch_update_regs_for_sipi(CPUState *env)
> {
> }
> +
> +void kvm_arch_cpu_reset(CPUState *env)
> +{
> +}
> diff --git a/qemu/qemu-kvm-powerpc.c b/qemu/qemu-kvm-powerpc.c
> index 14ed945..024b18c 100644
> --- a/qemu/qemu-kvm-powerpc.c
> +++ b/qemu/qemu-kvm-powerpc.c
> @@ -213,3 +213,7 @@ int handle_powerpc_dcr_write(int vcpu, uint32_t dcrn,
> uint32_t data)
>
> return 0; /* XXX ignore failed DCR ops */
> }
> +
> +void kvm_arch_cpu_reset(CPUState *env)
> +{
> +}
> diff --git a/qemu/qemu-kvm-x86.c b/qemu/qemu-kvm-x86.c
> index 9a771ff..28eb5c2 100644
> --- a/qemu/qemu-kvm-x86.c
> +++ b/qemu/qemu-kvm-x86.c
> @@ -689,3 +689,19 @@ int handle_tpr_access(void *opaque, int vcpu,
> kvm_tpr_access_report(cpu_single_env, rip, is_write);
> return 0;
> }
> +
> +void kvm_arch_cpu_reset(CPUState *env)
> +{
> + struct kvm_mp_state mp_state = { .mp_state = KVM_MP_STATE_UNINITIALIZED
> };
> +
> + kvm_arch_load_regs(env);
> + if (env->cpu_index != 0) {
> + if (kvm_irqchip_in_kernel(kvm_context))
> + kvm_set_mpstate(kvm_context, env->cpu_index, &mp_state);
> + else {
> + env->interrupt_request &= ~CPU_INTERRUPT_HARD;
> + env->hflags |= HF_HALTED_MASK;
> + env->exception_index = EXCP_HLT;
> + }
> + }
> +}
> diff --git a/qemu/qemu-kvm.c b/qemu/qemu-kvm.c
> index 3cc6d8e..1e1f065 100644
> --- a/qemu/qemu-kvm.c
> +++ b/qemu/qemu-kvm.c
> @@ -31,8 +31,6 @@ kvm_context_t kvm_context;
>
> extern int smp_cpus;
>
> -static int qemu_kvm_reset_requested;
> -
> pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER;
> pthread_cond_t qemu_aio_cond = PTHREAD_COND_INITIALIZER;
> pthread_cond_t qemu_vcpu_cond = PTHREAD_COND_INITIALIZER;
> @@ -52,7 +50,6 @@ struct vcpu_info {
> int signalled;
> int stop;
> int stopped;
> - int reload_regs;
> int created;
> } vcpu_info[256];
>
> @@ -242,21 +239,29 @@ static void pause_all_threads(void)
> {
> int i;
>
> + if (cpu_single_env) {
> + fprintf(stderr, "qemu-kvm: pause_all_threads from vcpu context\n");
> + exit(0);
> + }
> +
> for (i = 0; i < smp_cpus; ++i) {
> vcpu_info[i].stop = 1;
> pthread_kill(vcpu_info[i].thread, SIG_IPI);
> }
> - while (!all_threads_paused()) {
> - CPUState *env = cpu_single_env;
> + while (!all_threads_paused())
> pthread_cond_wait(&qemu_pause_cond, &qemu_mutex);
> - cpu_single_env = env;
> - }
> + cpu_single_env = NULL;
> }
>
Personally, I prefer it the old way. All of the open-coded
cpu_single_env's are tough to understand and I believe error-prone. I
think a strategy of explicitly preserving cpu_single_env whenever we
drop qemu_mutex is more robust. Explicitly setting cpu_single_env =
NULL happens to work because this is only called from the io thread.
It's less clear to a casual reader why it's necessary.
In fact, I'd be much more inclined to see a wrapper around
pthread_cond_wait() so that we never explicitly had to set cpu_single_env.
Regards,
Anthony Liguori
> }
>
> void qemu_system_shutdown_request(void)
>
> -------------------------------------------------------------------------
> This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
> Don't miss this year's exciting event. There's still time to save $100.
> Use priority code J8TL2D2.
> http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
> _______________________________________________
> kvm-devel mailing list
> [email protected]
> https://lists.sourceforge.net/lists/listinfo/kvm-devel
>
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
Don't miss this year's exciting event. There's still time to save $100.
Use priority code J8TL2D2.
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel