On Sun, Dec 23, 2007 at 07:37:40PM +0200, Avi Kivity wrote:
> The sched_in notifier needs to enable interrupts (but it must disable
> preemption to avoid recursion).
Ok this update fixes the smp_call_function deadlock.
Signed-off-by: Andrea Arcangeli <[EMAIL PROTECTED]>
diff --git a/kernel/hack-module.awk b/kernel/hack-module.awk
index 7993aa2..5187c96 100644
--- a/kernel/hack-module.awk
+++ b/kernel/hack-module.awk
@@ -24,32 +24,6 @@
printf("MODULE_INFO(version, \"%s\");\n", version)
}
-/^static unsigned long vmcs_readl/ {
- in_vmcs_read = 1
-}
-
-/ASM_VMX_VMREAD_RDX_RAX/ && in_vmcs_read {
- printf("\tstart_special_insn();\n")
-}
-
-/return/ && in_vmcs_read {
- printf("\tend_special_insn();\n");
- in_vmcs_read = 0
-}
-
-/^static void vmcs_writel/ {
- in_vmcs_write = 1
-}
-
-/ASM_VMX_VMWRITE_RAX_RDX/ && in_vmcs_write {
- printf("\tstart_special_insn();\n")
-}
-
-/if/ && in_vmcs_write {
- printf("\tend_special_insn();\n");
- in_vmcs_write = 0
-}
-
/^static void vmx_load_host_state/ {
vmx_load_host_state = 1
}
@@ -74,15 +48,6 @@
print "\tspecial_reload_dr7();"
}
-/static void vcpu_put|static int __vcpu_run|static struct kvm_vcpu
\*vmx_create_vcpu/ {
- in_tricky_func = 1
-}
-
-/preempt_disable|get_cpu/ && in_tricky_func {
- printf("\tin_special_section();\n");
- in_tricky_func = 0
-}
-
/unsigned long flags;/ && vmx_load_host_state {
print "\tunsigned long gsbase;"
}
@@ -90,4 +55,3 @@
/local_irq_save/ && vmx_load_host_state {
print "\t\tgsbase = vmcs_readl(HOST_GS_BASE);"
}
-
diff --git a/kernel/preempt.c b/kernel/preempt.c
index 8bb0405..2582efa 100644
--- a/kernel/preempt.c
+++ b/kernel/preempt.c
@@ -6,8 +6,6 @@
static DEFINE_SPINLOCK(pn_lock);
static LIST_HEAD(pn_list);
-static DEFINE_PER_CPU(int, notifier_enabled);
-static DEFINE_PER_CPU(struct task_struct *, last_tsk);
#define dprintk(fmt) do { \
if (0) \
@@ -15,59 +13,105 @@ static DEFINE_PER_CPU(struct task_struct *, last_tsk);
current->pid, raw_smp_processor_id()); \
} while (0)
-static void preempt_enable_notifiers(void)
+static void preempt_enable_sched_out_notifiers(void)
{
- int cpu = raw_smp_processor_id();
-
- if (per_cpu(notifier_enabled, cpu))
- return;
-
- dprintk("\n");
- per_cpu(notifier_enabled, cpu) = 1;
asm volatile ("mov %0, %%db0" : : "r"(schedule));
- asm volatile ("mov %0, %%db7" : : "r"(0x702ul));
+ asm volatile ("mov %0, %%db7" : : "r"(0x701ul));
+#ifdef CONFIG_X86_64
+ current->thread.debugreg7 = 0ul;
+#else
+ current->thread.debugreg[7] = 0ul;
+#endif
+#ifdef TIF_DEBUG
+ clear_tsk_thread_flag(current, TIF_DEBUG);
+#endif
+}
+
+static void preempt_enable_sched_in_notifiers(void * addr)
+{
+ asm volatile ("mov %0, %%db0" : : "r"(addr));
+ asm volatile ("mov %0, %%db7" : : "r"(0x701ul));
+#ifdef CONFIG_X86_64
+ current->thread.debugreg0 = (unsigned long) addr;
+ current->thread.debugreg7 = 0x701ul;
+#else
+ current->thread.debugreg[0] = (unsigned long) addr;
+ current->thread.debugreg[7] = 0x701ul;
+#endif
+#ifdef TIF_DEBUG
+ set_tsk_thread_flag(current, TIF_DEBUG);
+#endif
}
void special_reload_dr7(void)
{
- asm volatile ("mov %0, %%db7" : : "r"(0x702ul));
+ asm volatile ("mov %0, %%db7" : : "r"(0x701ul));
}
EXPORT_SYMBOL_GPL(special_reload_dr7);
-static void preempt_disable_notifiers(void)
+static void __preempt_disable_notifiers(void)
{
- int cpu = raw_smp_processor_id();
-
- if (!per_cpu(notifier_enabled, cpu))
- return;
+ asm volatile ("mov %0, %%db7" : : "r"(0ul));
+}
- dprintk("\n");
- per_cpu(notifier_enabled, cpu) = 0;
- asm volatile ("mov %0, %%db7" : : "r"(0x400ul));
+static void preempt_disable_notifiers(void)
+{
+ __preempt_disable_notifiers();
+#ifdef CONFIG_X86_64
+ current->thread.debugreg7 = 0ul;
+#else
+ current->thread.debugreg[7] = 0ul;
+#endif
+#ifdef TIF_DEBUG
+ clear_tsk_thread_flag(current, TIF_DEBUG);
+#endif
}
-static void __attribute__((used)) preempt_notifier_trigger(void)
+static void fastcall __attribute__((used)) preempt_notifier_trigger(void ***
ip)
{
struct preempt_notifier *pn;
int cpu = raw_smp_processor_id();
int found = 0;
- unsigned long flags;
dprintk(" - in\n");
//dump_stack();
- spin_lock_irqsave(&pn_lock, flags);
+ spin_lock(&pn_lock);
list_for_each_entry(pn, &pn_list, link)
if (pn->tsk == current) {
found = 1;
break;
}
- spin_unlock_irqrestore(&pn_lock, flags);
- preempt_disable_notifiers();
+ spin_unlock(&pn_lock);
+
if (found) {
- dprintk("sched_out\n");
- pn->ops->sched_out(pn, NULL);
- per_cpu(last_tsk, cpu) = NULL;
- }
+ if ((void *) *ip != schedule) {
+ dprintk("sched_in\n");
+ preempt_enable_sched_out_notifiers();
+
+ preempt_disable();
+ local_irq_enable();
+ pn->ops->sched_in(pn, cpu);
+ local_irq_disable();
+ preempt_enable_no_resched();
+ } else {
+ void * sched_in_addr;
+ dprintk("sched_out\n");
+#ifdef CONFIG_X86_64
+ sched_in_addr = **(ip+3);
+#else
+ /* no special debug stack switch on x86 */
+ sched_in_addr = (void *) *(ip+3);
+#endif
+ preempt_enable_sched_in_notifiers(sched_in_addr);
+
+ preempt_disable();
+ local_irq_enable();
+ pn->ops->sched_out(pn, NULL);
+ local_irq_disable();
+ preempt_enable_no_resched();
+ }
+ } else
+ __preempt_disable_notifiers();
dprintk(" - out\n");
}
@@ -104,6 +148,11 @@ asm ("pn_int1_handler: \n\t"
"pop " TMP " \n\t"
"jz .Lnotme \n\t"
SAVE_REGS "\n\t"
+#ifdef CONFIG_X86_64
+ "leaq 120(%rsp),%rdi\n\t"
+#else
+ "leal 32(%esp),%eax\n\t"
+#endif
"call preempt_notifier_trigger \n\t"
RESTORE_REGS "\n\t"
#ifdef CONFIG_X86_64
@@ -121,75 +170,28 @@ asm ("pn_int1_handler: \n\t"
#endif
);
-void in_special_section(void)
-{
- struct preempt_notifier *pn;
- int cpu = raw_smp_processor_id();
- int found = 0;
- unsigned long flags;
-
- if (per_cpu(last_tsk, cpu) == current)
- return;
-
- dprintk(" - in\n");
- spin_lock_irqsave(&pn_lock, flags);
- list_for_each_entry(pn, &pn_list, link)
- if (pn->tsk == current) {
- found = 1;
- break;
- }
- spin_unlock_irqrestore(&pn_lock, flags);
- if (found) {
- dprintk("\n");
- per_cpu(last_tsk, cpu) = current;
- pn->ops->sched_in(pn, cpu);
- preempt_enable_notifiers();
- }
- dprintk(" - out\n");
-}
-EXPORT_SYMBOL_GPL(in_special_section);
-
-void start_special_insn(void)
-{
- preempt_disable();
- in_special_section();
-}
-EXPORT_SYMBOL_GPL(start_special_insn);
-
-void end_special_insn(void)
-{
- preempt_enable();
-}
-EXPORT_SYMBOL_GPL(end_special_insn);
-
void preempt_notifier_register(struct preempt_notifier *notifier)
{
- int cpu = get_cpu();
unsigned long flags;
dprintk(" - in\n");
spin_lock_irqsave(&pn_lock, flags);
- preempt_enable_notifiers();
+ preempt_enable_sched_out_notifiers();
notifier->tsk = current;
list_add(¬ifier->link, &pn_list);
spin_unlock_irqrestore(&pn_lock, flags);
- per_cpu(last_tsk, cpu) = current;
- put_cpu();
dprintk(" - out\n");
}
void preempt_notifier_unregister(struct preempt_notifier *notifier)
{
- int cpu = get_cpu();
unsigned long flags;
dprintk(" - in\n");
spin_lock_irqsave(&pn_lock, flags);
list_del(¬ifier->link);
spin_unlock_irqrestore(&pn_lock, flags);
- per_cpu(last_tsk, cpu) = NULL;
preempt_disable_notifiers();
- put_cpu();
dprintk(" - out\n");
}
@@ -238,7 +240,16 @@ void preempt_notifier_sys_init(void)
static void do_disable(void *blah)
{
- preempt_disable_notifiers();
+#ifdef TIF_DEBUG
+ if (!test_tsk_thread_flag(current, TIF_DEBUG))
+#else
+#ifdef CONFIG_X86_64
+ if (!current->thread.debugreg7)
+#else
+ if (!current->thread.debugreg[7])
+#endif
+#endif
+ __preempt_disable_notifiers();
}
void preempt_notifier_sys_exit(void)
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel