Module Name:    src
Committed By:   cherry
Date:           Wed Aug 10 21:46:02 UTC 2011

Modified Files:
        src/sys/arch/xen/include: hypervisor.h
        src/sys/arch/xen/x86: hypervisor_machdep.c
        src/sys/arch/xen/xen: evtchn.c

Log Message:
refactor the bitstring/mask operations to be behind an API. Make pending 
interrupt marking cpu aware.


To generate a diff of this commit:
cvs rdiff -u -r1.31 -r1.32 src/sys/arch/xen/include/hypervisor.h
cvs rdiff -u -r1.14 -r1.15 src/sys/arch/xen/x86/hypervisor_machdep.c
cvs rdiff -u -r1.48 -r1.49 src/sys/arch/xen/xen/evtchn.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/xen/include/hypervisor.h
diff -u src/sys/arch/xen/include/hypervisor.h:1.31 src/sys/arch/xen/include/hypervisor.h:1.32
--- src/sys/arch/xen/include/hypervisor.h:1.31	Mon Oct 19 18:41:10 2009
+++ src/sys/arch/xen/include/hypervisor.h	Wed Aug 10 21:46:02 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: hypervisor.h,v 1.31 2009/10/19 18:41:10 bouyer Exp $	*/
+/*	$NetBSD: hypervisor.h,v 1.32 2011/08/10 21:46:02 cherry Exp $	*/
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -91,6 +91,7 @@
 #include <xen/xen3-public/io/netif.h>
 #include <xen/xen3-public/io/blkif.h>
 
+#include <machine/cpu.h>
 #include <machine/hypercalls.h>
 
 #undef u8
@@ -136,7 +137,8 @@
 void hypervisor_mask_event(unsigned int);
 void hypervisor_clear_event(unsigned int);
 void hypervisor_enable_ipl(unsigned int);
-void hypervisor_set_ipending(uint32_t, int, int);
+void hypervisor_set_ipending(struct cpu_info *, 
+			     uint32_t, int, int);
 void hypervisor_machdep_attach(void);
 
 /* 

Index: src/sys/arch/xen/x86/hypervisor_machdep.c
diff -u src/sys/arch/xen/x86/hypervisor_machdep.c:1.14 src/sys/arch/xen/x86/hypervisor_machdep.c:1.15
--- src/sys/arch/xen/x86/hypervisor_machdep.c:1.14	Wed Mar 30 21:53:58 2011
+++ src/sys/arch/xen/x86/hypervisor_machdep.c	Wed Aug 10 21:46:02 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: hypervisor_machdep.c,v 1.14 2011/03/30 21:53:58 jym Exp $	*/
+/*	$NetBSD: hypervisor_machdep.c,v 1.15 2011/08/10 21:46:02 cherry Exp $	*/
 
 /*
  *
@@ -54,7 +54,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.14 2011/03/30 21:53:58 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.15 2011/08/10 21:46:02 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -86,13 +86,79 @@
 // #define PORT_DEBUG 4
 // #define EARLY_DEBUG_EVENT
 
+/* callback function type */
+typedef void (*iterate_func_t)(struct cpu_info *, unsigned int,
+			       unsigned int, unsigned int, void *);
+
+static inline void
+evt_iterate_bits(struct cpu_info *ci, volatile unsigned long *pendingl1,
+		 volatile unsigned long *pendingl2, 
+		 volatile unsigned long *mask,
+		 iterate_func_t iterate_pending, void *iterate_args)
+{
+
+	KASSERT(pendingl1 != NULL);
+	KASSERT(pendingl2 != NULL);
+	
+	unsigned long l1, l2;
+	unsigned int l1i, l2i, port;
+
+	l1 = xen_atomic_xchg(pendingl1, 0);
+	while ((l1i = xen_ffs(l1)) != 0) {
+		l1i--;
+		l1 &= ~(1UL << l1i);
+
+		l2 = pendingl2[l1i] & (mask != NULL ? ~mask[l1i] : -1UL);
+
+		if (mask != NULL) xen_atomic_setbits_l(&mask[l1i], l2);
+		xen_atomic_clearbits_l(&pendingl2[l1i], l2);
+
+		while ((l2i = xen_ffs(l2)) != 0) {
+			l2i--;
+			l2 &= ~(1UL << l2i);
+
+			port = (l1i << LONG_SHIFT) + l2i;
+
+			iterate_pending(ci, port, l1i, l2i, iterate_args);
+		}
+	}
+}
+
+/*
+ * Set per-cpu "pending" information for outstanding events that
+ * cannot be processed now.
+ */
+   
+static inline void
+evt_set_pending(struct cpu_info *ci, unsigned int port, unsigned int l1i,
+		unsigned int l2i, void *args)
+{
+
+	KASSERT(args != NULL);
+	KASSERT(ci != NULL);
+
+	int *ret = args;
+
+	if (evtsource[port]) {
+		hypervisor_set_ipending(ci, evtsource[port]->ev_imask,
+		    l1i, l2i);
+		evtsource[port]->ev_evcnt.ev_count++;
+		if (*ret == 0 && ci->ci_ilevel <
+		    evtsource[port]->ev_maxlevel)
+			*ret = 1;
+	}
+#ifdef DOM0OPS
+	else  {
+		/* set pending event */
+		xenevt_setipending(l1i, l2i);
+	}
+#endif
+}
+
 int stipending(void);
 int
 stipending(void)
 {
-	unsigned long l1;
-	unsigned long l2;
-	unsigned int l1i, l2i, port;
 	volatile shared_info_t *s = HYPERVISOR_shared_info;
 	struct cpu_info *ci;
 	volatile struct vcpu_info *vci;
@@ -120,45 +186,16 @@
 	 * we're only called after STIC, so we know that we'll have to
 	 * STI at the end
 	 */
+
 	while (vci->evtchn_upcall_pending) {
 		cli();
+
 		vci->evtchn_upcall_pending = 0;
-		/* NB. No need for a barrier here -- XCHG is a barrier
-		 * on x86. */
-		l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);
-		while ((l1i = xen_ffs(l1)) != 0) {
-			l1i--;
-			l1 &= ~(1UL << l1i);
-
-			l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
-			/*
-			 * mask and clear event. More efficient than calling
-			 * hypervisor_mask/clear_event for each event.
-			 */
-			xen_atomic_setbits_l(&s->evtchn_mask[l1i], l2);
-			xen_atomic_clearbits_l(&s->evtchn_pending[l1i], l2);
-			while ((l2i = xen_ffs(l2)) != 0) {
-				l2i--;
-				l2 &= ~(1UL << l2i);
-
-				port = (l1i << LONG_SHIFT) + l2i;
-				if (evtsource[port]) {
-					hypervisor_set_ipending(
-					    evtsource[port]->ev_imask,
-					    l1i, l2i);
-					evtsource[port]->ev_evcnt.ev_count++;
-					if (ret == 0 && ci->ci_ilevel <
-					    evtsource[port]->ev_maxlevel)
-						ret = 1;
-				}
-#ifdef DOM0OPS
-				else  {
-					/* set pending event */
-					xenevt_setipending(l1i, l2i);
-				}
-#endif
-			}
-		}
+
+		evt_iterate_bits(ci, &vci->evtchn_pending_sel,
+		    s->evtchn_pending, s->evtchn_mask,
+		    evt_set_pending, &ret);
+
 		sti();
 	}
 
@@ -173,12 +210,42 @@
 	return (ret);
 }
 
+/* Iterate through pending events and call the event handler */
+
+static inline void
+evt_do_hypervisor_callback(struct cpu_info *ci, unsigned int port,
+			   unsigned int l1i, unsigned int l2i, void *args)
+{
+	KASSERT(args != NULL);
+	KASSERT(ci == curcpu());
+
+	struct intrframe *regs = args;
+
+#ifdef PORT_DEBUG
+	if (port == PORT_DEBUG)
+		printf("do_hypervisor_callback event %d\n", port);
+#endif
+	if (evtsource[port])
+		call_evtchn_do_event(port, regs);
+#ifdef DOM0OPS
+	else  {
+		if (ci->ci_ilevel < IPL_HIGH) {
+			/* fast path */
+			int oipl = ci->ci_ilevel;
+			ci->ci_ilevel = IPL_HIGH;
+			call_xenevt_event(port);
+			ci->ci_ilevel = oipl;
+		} else {
+			/* set pending event */
+			xenevt_setipending(l1i, l2i);
+		}
+	}
+#endif
+}
+
 void
 do_hypervisor_callback(struct intrframe *regs)
 {
-	unsigned long l1;
-	unsigned long l2;
-	unsigned int l1i, l2i, port;
 	volatile shared_info_t *s = HYPERVISOR_shared_info;
 	struct cpu_info *ci;
 	volatile struct vcpu_info *vci;
@@ -199,51 +266,10 @@
 
 	while (vci->evtchn_upcall_pending) {
 		vci->evtchn_upcall_pending = 0;
-		/* NB. No need for a barrier here -- XCHG is a barrier
-		 * on x86. */
-		l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);
-		while ((l1i = xen_ffs(l1)) != 0) {
-			l1i--;
-			l1 &= ~(1UL << l1i);
-
-			l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
-			/*
-			 * mask and clear the pending events.
-			 * Doing it here for all event that will be processed
-			 * avoids a race with stipending (which can be called
-			 * though evtchn_do_event->splx) that could cause an
-			 * event to be both processed and marked pending.
-			 */
-			xen_atomic_setbits_l(&s->evtchn_mask[l1i], l2);
-			xen_atomic_clearbits_l(&s->evtchn_pending[l1i], l2);
-
-			while ((l2i = xen_ffs(l2)) != 0) {
-				l2i--;
-				l2 &= ~(1UL << l2i);
 
-				port = (l1i << LONG_SHIFT) + l2i;
-#ifdef PORT_DEBUG
-				if (port == PORT_DEBUG)
-					printf("do_hypervisor_callback event %d\n", port);
-#endif
-				if (evtsource[port])
-					call_evtchn_do_event(port, regs);
-#ifdef DOM0OPS
-				else  {
-					if (ci->ci_ilevel < IPL_HIGH) {
-						/* fast path */
-						int oipl = ci->ci_ilevel;
-						ci->ci_ilevel = IPL_HIGH;
-						call_xenevt_event(port);
-						ci->ci_ilevel = oipl;
-					} else {
-						/* set pending event */
-						xenevt_setipending(l1i, l2i);
-					}
-				}
-#endif
-			}
-		}
+		evt_iterate_bits(ci, &vci->evtchn_pending_sel,
+		    s->evtchn_pending, s->evtchn_mask,
+		    evt_do_hypervisor_callback, regs);
 	}
 
 #ifdef DIAGNOSTIC
@@ -303,11 +329,18 @@
 	xen_atomic_clear_bit(&s->evtchn_pending[0], ev);
 }
 
+static inline void
+evt_enable_event(struct cpu_info *ci, unsigned int port,  
+		 unsigned int l1i, unsigned int l2i, void *args)
+{
+	KASSERT(ci != NULL);
+	KASSERT(args == NULL);
+	hypervisor_enable_event(port);
+}
+
 void
 hypervisor_enable_ipl(unsigned int ipl)
 {
-	u_long l1, l2;
-	int l1i, l2i;
 	struct cpu_info *ci = curcpu();
 
 	/*
@@ -316,30 +349,16 @@
 	 * we know that all callback for this event have been processed.
 	 */
 
-	l1 = ci->ci_isources[ipl]->ipl_evt_mask1;
-	ci->ci_isources[ipl]->ipl_evt_mask1 = 0;
-	while ((l1i = xen_ffs(l1)) != 0) {
-		l1i--;
-		l1 &= ~(1UL << l1i);
-		l2 = ci->ci_isources[ipl]->ipl_evt_mask2[l1i];
-		ci->ci_isources[ipl]->ipl_evt_mask2[l1i] = 0;
-		while ((l2i = xen_ffs(l2)) != 0) {
-			int evtch;
-
-			l2i--;
-			l2 &= ~(1UL << l2i);
+	evt_iterate_bits(ci, &ci->ci_isources[ipl]->ipl_evt_mask1,
+	    ci->ci_isources[ipl]->ipl_evt_mask2, NULL, 
+	    evt_enable_event, NULL);
 
-			evtch = (l1i << LONG_SHIFT) + l2i;
-			hypervisor_enable_event(evtch);
-		}
-	}
 }
 
 void
-hypervisor_set_ipending(uint32_t iplmask, int l1, int l2)
+hypervisor_set_ipending(struct cpu_info *ci, uint32_t iplmask, int l1, int l2)
 {
 	int ipl;
-	struct cpu_info *ci = curcpu();
 
 	/* set pending bit for the appropriate IPLs */	
 	ci->ci_ipending |= iplmask;
@@ -352,6 +371,8 @@
 	ipl = ffs(iplmask);
 	KASSERT(ipl > 0);
 	ipl--;
+	KASSERT(ipl < NIPL);
+	KASSERT(ci->ci_isources[ipl] != NULL);
 	ci->ci_isources[ipl]->ipl_evt_mask1 |= 1UL << l1;
 	ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
 }

Index: src/sys/arch/xen/xen/evtchn.c
diff -u src/sys/arch/xen/xen/evtchn.c:1.48 src/sys/arch/xen/xen/evtchn.c:1.49
--- src/sys/arch/xen/xen/evtchn.c:1.48	Sat Jul  2 19:07:56 2011
+++ src/sys/arch/xen/xen/evtchn.c	Wed Aug 10 21:46:02 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: evtchn.c,v 1.48 2011/07/02 19:07:56 jym Exp $	*/
+/*	$NetBSD: evtchn.c,v 1.49 2011/08/10 21:46:02 cherry Exp $	*/
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -54,7 +54,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.48 2011/07/02 19:07:56 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.49 2011/08/10 21:46:02 cherry Exp $");
 
 #include "opt_xen.h"
 #include "isa.h"
@@ -223,7 +223,7 @@
 		    printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n",
 		    evtch, evtsource[evtch]->ev_maxlevel, ilevel);
 #endif
-		hypervisor_set_ipending(evtsource[evtch]->ev_imask,
+		hypervisor_set_ipending(ci, evtsource[evtch]->ev_imask,
 		    evtch >> LONG_SHIFT, evtch & LONG_MASK);
 		/* leave masked */
 		return 0;
@@ -239,7 +239,7 @@
 		    printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
 #endif
 			cli();
-			hypervisor_set_ipending(iplmask,
+			hypervisor_set_ipending(ci, iplmask,
 			    evtch >> LONG_SHIFT, evtch & LONG_MASK);
 			/* leave masked */
 			goto splx;

Reply via email to