This adds support for establishing virtual interrupts via Xen
event channel ports.

During boot, Xen will use polling mode, but once the system
enables interrupts after cpu_configure(), xen_intr_enable
will be called from the mountrook hook to unmask event ports.

xen_intr is the combined interrupt handler that is called
from the vector.S or the [upcoming] PCI device driver.

OK?

---
 sys/dev/pv/xen.c    | 262 +++++++++++++++++++++++++++++++++++++++++++++++++++-
 sys/dev/pv/xenreg.h | 115 +++++++++++++++++++++++
 sys/dev/pv/xenvar.h |  27 ++++++
 3 files changed, 403 insertions(+), 1 deletion(-)

diff --git sys/dev/pv/xen.c sys/dev/pv/xen.c
index 8643636..2fa7283 100644
--- sys/dev/pv/xen.c
+++ sys/dev/pv/xen.c
@@ -39,13 +39,15 @@ void        xen_find_base(struct xen_softc *);
 int    xen_init_hypercall(struct xen_softc *);
 int    xen_getversion(struct xen_softc *);
 int    xen_getfeatures(struct xen_softc *);
 int    xen_init_info_page(struct xen_softc *);
 int    xen_init_cbvec(struct xen_softc *);
+int    xen_init_interrupts(struct xen_softc *);
 
 int    xen_match(struct device *, void *, void *);
 void   xen_attach(struct device *, struct device *, void *);
+void   xen_deferred(void *);
 void   xen_resume(struct device *);
 int    xen_activate(struct device *, int);
 
 struct cfdriver xen_cd = {
        NULL, "xen", DV_DULL
@@ -93,10 +95,29 @@ xen_attach(struct device *parent, struct device *self, void 
*aux)
 
        if (xen_init_info_page(sc))
                return;
 
        xen_init_cbvec(sc);
+
+       if (xen_init_interrupts(sc))
+               return;
+
+       mountroothook_establish(xen_deferred, sc);
+}
+
+void
+xen_deferred(void *arg)
+{
+       struct xen_softc *sc = arg;
+
+       if (!sc->sc_cbvec) {
+               DPRINTF("%s: callback vector hasn't been established\n",
+                   sc->sc_dev.dv_xname);
+               return;
+       }
+
+       xen_intr_enable();
 }
 
 void
 xen_resume(struct device *self)
 {
@@ -447,10 +468,249 @@ xen_init_cbvec(struct xen_softc *sc)
        sc->sc_cbvec = 1;
 
        return (0);
 }
 
+int
+xen_init_interrupts(struct xen_softc *sc)
+{
+       int i;
+
+       sc->sc_irq = LAPIC_XEN_VECTOR;
+       evcount_attach(&sc->sc_evcnt, sc->sc_dev.dv_xname, &sc->sc_irq);
+
+       /*
+        * Clear all pending events and mask all interrupts
+        */
+       for (i = 0; i < nitems(sc->sc_ipg->evtchn_pending); i++) {
+               sc->sc_ipg->evtchn_pending[i] = 0;
+               sc->sc_ipg->evtchn_mask[i] = ~0UL;
+               membar_producer();
+       }
+
+       SLIST_INIT(&sc->sc_intrs);
+
+       return (0);
+}
+
+static inline struct xen_intsrc *
+xen_lookup_intsrc(struct xen_softc *sc, evtchn_port_t port)
+{
+       struct xen_intsrc *xi;
+
+       SLIST_FOREACH(xi, &sc->sc_intrs, xi_entry)
+               if (xi->xi_port == port)
+                       break;
+       return (xi);
+}
+
+void
+xen_intr_ack(void)
+{
+       struct xen_softc *sc = xen_sc;
+       struct shared_info *s = sc->sc_ipg;
+       struct vcpu_info *v = &s->vcpu_info[curcpu()->ci_cpuid];
+
+       v->evtchn_upcall_pending = 0;
+}
+
 void
 xen_intr(void)
 {
-       /* stub */
+       struct xen_softc *sc = xen_sc;
+       struct xen_intsrc *xi;
+       struct shared_info *s = sc->sc_ipg;
+       struct vcpu_info *v = &s->vcpu_info[curcpu()->ci_cpuid];
+       ulong pending, selector;
+       int port, bit, row;
+
+       sc->sc_evcnt.ec_count++;
+
+       v->evtchn_upcall_pending = 0;
+       selector = atomic_swap_ulong(&v->evtchn_pending_sel, 0);
+
+       for (row = 0; selector > 0; selector >>= 1, row++) {
+               if ((selector & 1) == 0)
+                       continue;
+               pending = sc->sc_ipg->evtchn_pending[row] &
+                   ~(sc->sc_ipg->evtchn_mask[row]);
+               for (bit = 0; pending > 0; pending >>= 1, bit++) {
+                       if ((pending & 1) == 0)
+                               continue;
+                       sc->sc_ipg->evtchn_pending[row] &= ~(1 << bit);
+                       membar_producer();
+                       port = (row * LONG_BIT) + bit;
+                       if ((xi = xen_lookup_intsrc(sc, port)) == NULL)
+                               continue;
+                       xi->xi_evcnt.ec_count++;
+
+                       if (xi->xi_handler)
+                               xi->xi_handler(xi->xi_arg);
+               }
+       }
+}
+
+void
+xen_intr_signal(xen_intr_handle_t xih)
+{
+       struct xen_softc *sc = xen_sc;
+       struct xen_intsrc *xi;
+       struct evtchn_send es;
+
+       if ((xi = xen_lookup_intsrc(sc, (evtchn_port_t)xih)) != NULL) {
+               es.port = xi->xi_port;
+               xen_hypercall(sc, event_channel_op, 2, EVTCHNOP_send, &es);
+       }
+}
+
+int
+xen_intr_establish(evtchn_port_t port, xen_intr_handle_t *xih,
+    void (*handler)(void *), void *arg, char *name)
+{
+       struct xen_softc *sc = xen_sc;
+       struct xen_intsrc *xi;
+       struct evtchn_alloc_unbound eau;
+       struct evtchn_unmask eu;
+#if notyet
+       struct evtchn_bind_vcpu ebv;
+#endif
+#ifdef XEN_DEBUG
+       struct evtchn_status es;
+#endif
+
+       if (port && xen_lookup_intsrc(sc, port)) {
+               printf("%s: interrupt handler has already been established "
+                   "for port %u\n", sc->sc_dev.dv_xname, port);
+               return (-1);
+       }
+
+       xi = malloc(sizeof(*xi), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (xi == NULL)
+               return (-1);
+
+       xi->xi_handler = handler;
+       xi->xi_arg = arg;
+       xi->xi_port = (evtchn_port_t)*xih;
+
+       if (port == 0) {
+               /* We're being asked to allocate a new event port */
+               memset(&eau, 0, sizeof(eau));
+               eau.dom = DOMID_SELF;
+               if (xen_hypercall(sc, event_channel_op, 2,
+                   EVTCHNOP_alloc_unbound, &eau) != 0) {
+                       DPRINTF("%s: failed to allocate new event port\n",
+                           sc->sc_dev.dv_xname);
+                       free(xi, M_DEVBUF, sizeof(*xi));
+                       return (-1);
+               }
+               *xih = xi->xi_port = eau.port;
+       } else {
+               *xih = xi->xi_port = port;
+               /*
+                * The Event Channel API didn't open this port, so it is not
+                * responsible for closing it automatically on unbind.
+                */
+               xi->xi_noclose = 1;
+       }
+
+#ifdef notyet
+       /* Bind interrupt to VCPU#0 */
+       memset(&ebv, 0, sizeof(ebv));
+       ebv.port = xi->xi_port;
+       ebv.vcpu = 0;
+       if (xen_hypercall(sc, event_channel_op, 2, EVTCHNOP_bind_vcpu, &ebv)) {
+               printf("%s: failed to bind interrupt on port %u to vcpu%d\n",
+                   sc->sc_dev.dv_xname, ebv.port, ebv.vcpu);
+       }
+#endif
+
+       evcount_attach(&xi->xi_evcnt, name, &sc->sc_irq);
+
+       SLIST_INSERT_HEAD(&sc->sc_intrs, xi, xi_entry);
+
+       if (!cold) {
+               eu.port = xi->xi_port;
+               if (xen_hypercall(sc, event_channel_op, 2, EVTCHNOP_unmask,
+                   &eu) || isset(sc->sc_ipg->evtchn_mask, port))
+                       printf("%s: unmasking port %u failed\n",
+                           sc->sc_dev.dv_xname, port);
+       }
+
+#ifdef XEN_DEBUG
+       memset(&es, 0, sizeof(es));
+       es.dom = DOMID_SELF;
+       es.port = xi->xi_port;
+       if (xen_hypercall(sc, event_channel_op, 2, EVTCHNOP_status, &es)) {
+               printf("%s: failed to obtain status for port %d\n",
+                   sc->sc_dev.dv_xname, es.port);
+       }
+       printf("%s: port %u bound to vcpu%u",
+           sc->sc_dev.dv_xname, es.port, es.vcpu);
+       if (es.status == EVTCHNSTAT_interdomain)
+               printf(": domain %d port %u\n", es.u.interdomain.dom,
+                   es.u.interdomain.port);
+       else if (es.status == EVTCHNSTAT_unbound)
+               printf(": domain %d\n", es.u.unbound.dom);
+       else if (es.status == EVTCHNSTAT_pirq)
+               printf(": pirq %u\n", es.u.pirq);
+       else if (es.status == EVTCHNSTAT_virq)
+               printf(": virq %u\n", es.u.virq);
+       else
+               printf("\n");
+#endif
+
+       return (0);
+}
+
+int
+xen_intr_disestablish(xen_intr_handle_t xih)
+{
+       struct xen_softc *sc = xen_sc;
+       evtchn_port_t port = (evtchn_port_t)xih;
+       struct evtchn_close ec;
+       struct xen_intsrc *xi;
+
+       if (xen_lookup_intsrc(sc, port) != NULL) {
+               DPRINTF("%s: failed to lookup an established interrupt handler "
+                   "for port %u\n", sc->sc_dev.dv_xname, port);
+               return (-1);
+       }
+
+       evcount_detach(&xi->xi_evcnt);
+
+       SLIST_REMOVE(&sc->sc_intrs, xi, xen_intsrc, xi_entry);
+
+       setbit((char *)sc->sc_ipg->evtchn_mask, xi->xi_port);
+       clrbit((char *)sc->sc_ipg->evtchn_pending[0], xi->xi_port);
+       membar_producer();
+
+       if (!xi->xi_noclose) {
+               ec.port = xi->xi_port;
+               if (xen_hypercall(sc, event_channel_op, 2, EVTCHNOP_close,
+                   &ec)) {
+                       DPRINTF("%s: failed to close event port %u\n",
+                           sc->sc_dev.dv_xname, xi->xi_port);
+               }
+       }
+
+       free(xi, M_DEVBUF, sizeof(*xi));
+       return (0);
+}
+
+void
+xen_intr_enable(void)
+{
+       struct xen_softc *sc = xen_sc;
+       struct xen_intsrc *xi;
+       struct evtchn_unmask eu;
+
+       SLIST_FOREACH(xi, &sc->sc_intrs, xi_entry) {
+               if (!xi->xi_masked) {
+                       eu.port = xi->xi_port;
+                       if (xen_hypercall(sc, event_channel_op, 2,
+                           EVTCHNOP_unmask, &eu) ||
+                           isset(sc->sc_ipg->evtchn_mask, xi->xi_port))
+                               printf("%s: unmasking port %u failed\n",
+                                   sc->sc_dev.dv_xname, xi->xi_port);
+               }
+       }
 }
diff --git sys/dev/pv/xenreg.h sys/dev/pv/xenreg.h
index ec45722..24d9fea 100644
--- sys/dev/pv/xenreg.h
+++ sys/dev/pv/xenreg.h
@@ -412,10 +412,125 @@ enum {
 #define HVM_CALLBACK_VECTOR(vector) \
        (((uint64_t)HVM_CB_TYPE_VECTOR << HVM_CB_TYPE_SHIFT) | \
         (((vector) & HVM_CB_GSI_GSI_MASK) << HVM_CB_GSI_GSI_SHIFT))
 
 
+/*
+ * interface/event_channel.h
+ *
+ * Event channels between domains.
+ */
+
+#define EVTCHNOP_bind_interdomain      0
+#define EVTCHNOP_bind_virq             1
+#define EVTCHNOP_bind_pirq             2
+#define EVTCHNOP_close                 3
+#define EVTCHNOP_send                  4
+#define EVTCHNOP_status                        5
+#define EVTCHNOP_alloc_unbound         6
+#define EVTCHNOP_bind_ipi              7
+#define EVTCHNOP_bind_vcpu             8
+#define EVTCHNOP_unmask                        9
+#define EVTCHNOP_reset                 10
+
+typedef uint32_t evtchn_port_t;
+
+/*
+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
+ * is allocated in <dom> and returned as <port>.
+ * NOTES:
+ *  1. If the caller is unprivileged then <dom> must be DOMID_SELF.
+ *  2. <rdom> may be DOMID_SELF, allowing loopback connections.
+ */
+struct evtchn_alloc_unbound {
+       /* IN parameters */
+       domid_t dom, remote_dom;
+       /* OUT parameters */
+       evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
+ * interdomain then the remote end is placed in the unbound state
+ * (EVTCHNSTAT_unbound), awaiting a new connection.
+ */
+struct evtchn_close {
+       /* IN parameters. */
+       evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
+ * endpoint is <port>.
+ */
+struct evtchn_send {
+       /* IN parameters. */
+       evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_status: Get the current status of the communication channel which
+ * has an endpoint at <dom, port>.
+ * NOTES:
+ *  1. <dom> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may obtain the status of an event
+ *     channel for which <dom> is not DOMID_SELF.
+ */
+struct evtchn_status {
+       /* IN parameters */
+       domid_t  dom;
+       evtchn_port_t port;
+       /* OUT parameters */
+#define EVTCHNSTAT_closed       0  /* Channel is not in use.                 */
+#define EVTCHNSTAT_unbound      1  /* Channel is waiting interdom connection.*/
+#define EVTCHNSTAT_interdomain  2  /* Channel is connected to remote domain. */
+#define EVTCHNSTAT_pirq         3  /* Channel is bound to a phys IRQ line.   */
+#define EVTCHNSTAT_virq         4  /* Channel is bound to a virtual IRQ line */
+#define EVTCHNSTAT_ipi          5  /* Channel is bound to a virtual IPI line */
+       uint32_t status;
+       uint32_t vcpu;                 /* VCPU to which this channel is bound.  
 */
+       union {
+               struct {
+                       domid_t dom;
+               } unbound;                 /* EVTCHNSTAT_unbound */
+               struct {
+                       domid_t dom;
+                       evtchn_port_t port;
+               } interdomain;             /* EVTCHNSTAT_interdomain */
+               uint32_t pirq;             /* EVTCHNSTAT_pirq        */
+               uint32_t virq;             /* EVTCHNSTAT_virq        */
+       } u;
+};
+
+/*
+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
+ * event is pending.
+ * NOTES:
+ *  1. IPI-bound channels always notify the vcpu specified at bind time.
+ *     This binding cannot be changed.
+ *  2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
+ *     This binding cannot be changed.
+ *  3. All other channels notify vcpu0 by default. This default is set when
+ *     the channel is allocated (a port that is freed and subsequently reused
+ *     has its binding reset to vcpu0).
+ */
+struct evtchn_bind_vcpu {
+       /* IN parameters. */
+       evtchn_port_t port;
+       uint32_t vcpu;
+};
+
+/*
+ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
+ * a notification to the appropriate VCPU if an event is pending.
+ */
+struct evtchn_unmask {
+       /* IN parameters. */
+       evtchn_port_t port;
+};
+
 
 /*
  * interface/features.h
  *
  * Feature flags, reported by XENVER_get_features.
diff --git sys/dev/pv/xenvar.h sys/dev/pv/xenvar.h
index 7c5d244..e58d233 100644
--- sys/dev/pv/xenvar.h
+++ sys/dev/pv/xenvar.h
@@ -23,30 +23,57 @@
 #define DPRINTF(x...)          printf(x)
 #else
 #define DPRINTF(x...)
 #endif
 
+struct xen_intsrc {
+       SLIST_ENTRY(xen_intsrc)   xi_entry;
+       void                    (*xi_handler)(void *);
+       void                     *xi_arg;
+       struct evcount            xi_evcnt;
+       evtchn_port_t             xi_port;
+       int                       xi_noclose;
+       int                       xi_masked;
+};
+
 struct xen_softc {
        struct device            sc_dev;
        uint32_t                 sc_base;
        void                    *sc_hc;
        uint32_t                 sc_features;
 #define  XENFEAT_CBVEC         (1<<8)
 
        struct shared_info      *sc_ipg;        /* HYPERVISOR_shared_info */
 
        int                      sc_cbvec;      /* callback was installed */
+       uint64_t                 sc_irq;        /* IDT vector number */
+       struct evcount           sc_evcnt;      /* upcall counter */
+       SLIST_HEAD(, xen_intsrc) sc_intrs;
 };
 
 extern struct xen_softc *xen_sc;
 
 /*
  *  Hypercalls
  */
 #define memory_op              12
 #define xen_version            17
+#define event_channel_op       32
 #define hvm_op                 34
 
 int    xen_hypercall(struct xen_softc *, int, int, ...);
 int    xen_hypercallv(struct xen_softc *, int, int, ulong *);
 
+/*
+ *  Interrupts
+ */
+typedef uint32_t xen_intr_handle_t;
+
+void   xen_intr(void);
+void   xen_intr_ack(void);
+void   xen_intr_signal(xen_intr_handle_t);
+int    xen_intr_establish(evtchn_port_t, xen_intr_handle_t *, void (*)(void *),
+           void *, char *);
+int    xen_intr_disestablish(xen_intr_handle_t);
+void   xen_intr_enable(void);
+
 #endif /* _XENVAR_H_ */
-- 
2.6.3

Reply via email to