Module Name:    src
Committed By:   cherry
Date:           Sat Nov 19 17:13:39 UTC 2011

Modified Files:
        src/sys/arch/x86/include: cpu.h
        src/sys/arch/xen/include: hypervisor.h
        src/sys/arch/xen/x86: hypervisor_machdep.c
        src/sys/arch/xen/xen: evtchn.c

Log Message:
[merging from cherry-xenmp] bring in bouyer@'s changes via:
http://mail-index.netbsd.org/source-changes/2011/10/22/msg028271.html
>From the Log:
Log Message:
Various interrupt fixes, mainly:
keep a per-cpu mask of enabled events, and use it to get pending events.
A cpu-specific event (all of them at this time) should not be ever masked
by another CPU, because it may prevent the target CPU from seeing it
(the clock events all fires at once for example).


To generate a diff of this commit:
cvs rdiff -u -r1.42 -r1.43 src/sys/arch/x86/include/cpu.h
cvs rdiff -u -r1.34 -r1.35 src/sys/arch/xen/include/hypervisor.h
cvs rdiff -u -r1.16 -r1.17 src/sys/arch/xen/x86/hypervisor_machdep.c
cvs rdiff -u -r1.55 -r1.56 src/sys/arch/xen/xen/evtchn.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/x86/include/cpu.h
diff -u src/sys/arch/x86/include/cpu.h:1.42 src/sys/arch/x86/include/cpu.h:1.43
--- src/sys/arch/x86/include/cpu.h:1.42	Thu Nov 10 00:12:05 2011
+++ src/sys/arch/x86/include/cpu.h	Sat Nov 19 17:13:39 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.42 2011/11/10 00:12:05 jym Exp $	*/
+/*	$NetBSD: cpu.h,v 1.43 2011/11/19 17:13:39 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1990 The Regents of the University of California.
@@ -126,6 +126,7 @@ struct cpu_info {
 
 #ifdef XEN
 	struct iplsource  *ci_isources[NIPL];
+	u_long ci_evtmask[NR_EVENT_CHANNELS]; /* events allowed on this CPU */
 #else
 	struct intrsource *ci_isources[MAX_INTR_SOURCES];
 #endif

Index: src/sys/arch/xen/include/hypervisor.h
diff -u src/sys/arch/xen/include/hypervisor.h:1.34 src/sys/arch/xen/include/hypervisor.h:1.35
--- src/sys/arch/xen/include/hypervisor.h:1.34	Sun Nov  6 11:40:47 2011
+++ src/sys/arch/xen/include/hypervisor.h	Sat Nov 19 17:13:39 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: hypervisor.h,v 1.34 2011/11/06 11:40:47 cherry Exp $	*/
+/*	$NetBSD: hypervisor.h,v 1.35 2011/11/19 17:13:39 cherry Exp $	*/
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -134,6 +134,7 @@ void do_hypervisor_callback(struct intrf
 void hypervisor_enable_event(unsigned int);
 
 /* hypervisor_machdep.c */
+void hypervisor_send_event(struct cpu_info *, unsigned int);
 void hypervisor_unmask_event(unsigned int);
 void hypervisor_mask_event(unsigned int);
 void hypervisor_clear_event(unsigned int);

Index: src/sys/arch/xen/x86/hypervisor_machdep.c
diff -u src/sys/arch/xen/x86/hypervisor_machdep.c:1.16 src/sys/arch/xen/x86/hypervisor_machdep.c:1.17
--- src/sys/arch/xen/x86/hypervisor_machdep.c:1.16	Tue Sep 20 00:12:24 2011
+++ src/sys/arch/xen/x86/hypervisor_machdep.c	Sat Nov 19 17:13:39 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: hypervisor_machdep.c,v 1.16 2011/09/20 00:12:24 jym Exp $	*/
+/*	$NetBSD: hypervisor_machdep.c,v 1.17 2011/11/19 17:13:39 cherry Exp $	*/
 
 /*
  *
@@ -54,7 +54,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.16 2011/09/20 00:12:24 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.17 2011/11/19 17:13:39 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -109,6 +109,7 @@ evt_iterate_bits(struct cpu_info *ci, vo
 		l1 &= ~(1UL << l1i);
 
 		l2 = pendingl2[l1i] & (mask != NULL ? ~mask[l1i] : -1UL);
+		l2 &= ci->ci_evtmask[l1i];
 
 		if (mask != NULL) xen_atomic_setbits_l(&mask[l1i], l2);
 		xen_atomic_clearbits_l(&pendingl2[l1i], l2);
@@ -140,8 +141,8 @@ evt_set_pending(struct cpu_info *ci, uns
 	int *ret = args;
 
 	if (evtsource[port]) {
-		hypervisor_set_ipending(ci, evtsource[port]->ev_imask,
-		    l1i, l2i);
+		hypervisor_set_ipending(evtsource[port]->ev_cpu,
+		    evtsource[port]->ev_imask, l1i, l2i);
 		evtsource[port]->ev_evcnt.ev_count++;
 		if (*ret == 0 && ci->ci_ilevel <
 		    evtsource[port]->ev_maxlevel)
@@ -281,6 +282,36 @@ do_hypervisor_callback(struct intrframe 
 }
 
 void
+hypervisor_send_event(struct cpu_info *ci, unsigned int ev)
+{
+	KASSERT(ci != NULL);
+
+	volatile shared_info_t *s = HYPERVISOR_shared_info;
+	volatile struct vcpu_info *vci = ci->ci_vcpu;
+
+#ifdef PORT_DEBUG
+	if (ev == PORT_DEBUG)
+		printf("hypervisor_send_event %d\n", ev);
+#endif
+
+	xen_atomic_set_bit(&s->evtchn_pending[0], ev);
+	xen_atomic_set_bit(&vci->evtchn_pending_sel,
+			   ev >> LONG_SHIFT);
+
+	xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0);
+
+	xen_atomic_clear_bit(&s->evtchn_mask[0], ev);
+
+	if (__predict_true(ci == curcpu())) {
+		hypervisor_force_callback();
+	} else {
+		if (xen_send_ipi(ci, XEN_IPI_HVCB)) {
+			panic("xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n", (int) ci->ci_cpuid);
+		}
+	}
+}
+
+void
 hypervisor_unmask_event(unsigned int ev)
 {
 	volatile shared_info_t *s = HYPERVISOR_shared_info;
@@ -375,6 +406,13 @@ hypervisor_set_ipending(struct cpu_info 
 	KASSERT(ci->ci_isources[ipl] != NULL);
 	ci->ci_isources[ipl]->ipl_evt_mask1 |= 1UL << l1;
 	ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
+	if (__predict_false(ci != curcpu())) {
+		if (xen_send_ipi(ci, XEN_IPI_HVCB)) {
+			panic("hypervisor_set_ipending: "
+			    "xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n",
+			    (int) ci->ci_cpuid);
+		}
+	}
 }
 
 void

Index: src/sys/arch/xen/xen/evtchn.c
diff -u src/sys/arch/xen/xen/evtchn.c:1.55 src/sys/arch/xen/xen/evtchn.c:1.56
--- src/sys/arch/xen/xen/evtchn.c:1.55	Wed Sep 21 15:26:47 2011
+++ src/sys/arch/xen/xen/evtchn.c	Sat Nov 19 17:13:39 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: evtchn.c,v 1.55 2011/09/21 15:26:47 cegger Exp $	*/
+/*	$NetBSD: evtchn.c,v 1.56 2011/11/19 17:13:39 cherry Exp $	*/
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -54,7 +54,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.55 2011/09/21 15:26:47 cegger Exp $");
+__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.56 2011/11/19 17:13:39 cherry Exp $");
 
 #include "opt_xen.h"
 #include "isa.h"
@@ -192,6 +192,7 @@ events_init(void)
 	 * be called.
 	 */
 	evtsource[debug_port] = (void *)-1;
+	xen_atomic_set_bit(&curcpu()->ci_evtmask[0], debug_port);
 	hypervisor_enable_event(debug_port);
 
 	x86_enable_intr();		/* at long last... */
@@ -269,8 +270,13 @@ evtchn_do_event(int evtch, struct intrfr
 	ci->ci_data.cpu_nintr++;
 	evtsource[evtch]->ev_evcnt.ev_count++;
 	ilevel = ci->ci_ilevel;
-	if (evtsource[evtch]->ev_maxlevel <= ilevel ||
-	    evtsource[evtch]->ev_cpu != ci /* XXX: get stats */) {
+
+	if (evtsource[evtch]->ev_cpu != ci /* XXX: get stats */) {
+		hypervisor_send_event(evtsource[evtch]->ev_cpu, evtch);
+		return 0;
+	}
+
+	if (evtsource[evtch]->ev_maxlevel <= ilevel) {
 #ifdef IRQ_DEBUG
 		if (evtch == IRQ_DEBUG)
 		    printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n",
@@ -281,15 +287,8 @@ evtchn_do_event(int evtch, struct intrfr
 					evtch >> LONG_SHIFT,
 					evtch & LONG_MASK);
 
-		if (evtsource[evtch]->ev_cpu != ci) {
-			/* facilitate spllower() on remote cpu */
-			struct cpu_info *rci = evtsource[evtch]->ev_cpu;
-			if (xen_send_ipi(rci, XEN_IPI_KICK) != 0) {
-				panic("xen_send_ipi(%s, XEN_IPI_KICK) failed\n", cpu_name(rci));
-			}
-		}
-
 		/* leave masked */
+
 		return 0;
 	}
 	ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
@@ -298,8 +297,16 @@ evtchn_do_event(int evtch, struct intrfr
 	mutex_spin_enter(&evtlock[evtch]);
 	ih = evtsource[evtch]->ev_handlers;
 	while (ih != NULL) {
-		if (ih->ih_level <= ilevel ||
-		   ih->ih_cpu != ci) {
+		if (ih->ih_cpu != ci) {
+			hypervisor_set_ipending(ih->ih_cpu, 1 << ih->ih_level,
+			    evtch >> LONG_SHIFT, evtch & LONG_MASK);
+			iplmask &= ~IUNMASK(ci, ih->ih_level);
+			ih = ih->ih_evt_next;
+			continue;
+		}
+		if (ih->ih_level <= ilevel) {
+			hypervisor_set_ipending(ih->ih_cpu, iplmask,
+			    evtch >> LONG_SHIFT, evtch & LONG_MASK);
 #ifdef IRQ_DEBUG
 		if (evtch == IRQ_DEBUG)
 		    printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
@@ -411,7 +418,6 @@ bind_virq_to_evtch(int virq)
 		return -1;
 	}
 
-	/* Get event channel from VIRQ */
 	if (virq == VIRQ_TIMER) {
 		evtchn = virq_timer_to_evtch[ci->ci_cpuid];
 	} else {
@@ -471,7 +477,11 @@ unbind_virq_from_evtch(int virq)
 		if (HYPERVISOR_event_channel_op(&op) != 0)
 			panic("Failed to unbind virtual IRQ %d\n", virq);
 
-		virq_to_evtch[virq] = -1;
+		if (virq == VIRQ_TIMER) {
+			virq_timer_to_evtch[ci->ci_cpuid] = -1;
+		} else {
+			virq_to_evtch[virq] = -1;
+		}
 	}
 
 	mutex_spin_exit(&evtchn_lock);
@@ -550,7 +560,11 @@ pirq_establish(int pirq, int evtch, int 
 		return NULL;
 	}
 
-	event_set_handler(evtch, pirq_interrupt, ih, level, evname);
+	if (event_set_handler(evtch, pirq_interrupt, ih, level, evname) != 0) {
+		free(ih, M_DEVBUF);
+		return NULL;
+	}
+
 	ih->pirq = pirq;
 	ih->evtch = evtch;
 	ih->func = func;
@@ -593,9 +607,10 @@ pirq_interrupt(void *arg)
  * Recalculate the interrupt from scratch for an event source.
  */
 static void
-intr_calculatemasks(struct evtsource *evts, int evtch)
+intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci)
 {
 	struct intrhand *ih;
+	int cpu_receive = 0;
 
 #ifdef MULTIPROCESSOR
 	KASSERT(!mutex_owned(&evtlock[evtch]));
@@ -607,7 +622,13 @@ intr_calculatemasks(struct evtsource *ev
 		if (ih->ih_level > evts->ev_maxlevel)
 			evts->ev_maxlevel = ih->ih_level;
 		evts->ev_imask |= (1 << ih->ih_level);
+		if (ih->ih_cpu == ci)
+			cpu_receive = 1;
 	}
+	if (cpu_receive)
+		xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch);
+	else
+		xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch);
 	mutex_spin_exit(&evtlock[evtch]);
 }
 
@@ -706,7 +727,7 @@ event_set_handler(int evtch, int (*func)
 		mutex_spin_exit(&evtlock[evtch]);
 	}
 
-	intr_calculatemasks(evts, evtch);
+	intr_calculatemasks(evts, evtch, ci);
 	splx(s);
 
 	return 0;
@@ -743,7 +764,7 @@ event_remove_handler(int evtch, int (*fu
 	struct evtsource *evts;
 	struct intrhand *ih;
 	struct intrhand **ihp;
-	struct cpu_info *ci = curcpu();
+	struct cpu_info *ci;
 
 	evts = evtsource[evtch];
 	if (evts == NULL)
@@ -760,6 +781,7 @@ event_remove_handler(int evtch, int (*fu
 		mutex_spin_exit(&evtlock[evtch]);
 		return ENOENT;
 	}
+	ci = ih->ih_cpu;
 	*ihp = ih->ih_evt_next;
 	mutex_spin_exit(&evtlock[evtch]);
 
@@ -775,11 +797,12 @@ event_remove_handler(int evtch, int (*fu
 	*ihp = ih->ih_ipl_next;
 	free(ih, M_DEVBUF);
 	if (evts->ev_handlers == NULL) {
+		xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch);
 		evcnt_detach(&evts->ev_evcnt);
 		free(evts, M_DEVBUF);
 		evtsource[evtch] = NULL;
 	} else {
-		intr_calculatemasks(evts, evtch);
+		intr_calculatemasks(evts, evtch, ci);
 	}
 	return 0;
 }

Reply via email to