On 25/03/15 12:31, Feng Wu wrote:
When guest changes its interrupt configuration (such as, vector, etc.)
for direct-assigned devices, we need to update the associated IRTE
with the new guest vector, so external interrupts from the assigned
devices can be injected to guests without VM-Exit.
For lowest-priority interrupts, we use vector-hashing mechamisn to find
the destination vCPU. This follows the hardware behavior, since modern
Intel CPUs use vector hashing to handle the lowest-priority interrupt.
For multicase/broadcast vCPU, we cannot handle it via interrupt posting,
still use interrupt remapping.
Signed-off-by: Feng Wu <feng...@intel.com>
---
xen/drivers/passthrough/io.c | 77 +++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 76 insertions(+), 1 deletion(-)
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index ae050df..1d9a132 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -26,6 +26,7 @@
#include <asm/hvm/iommu.h>
#include <asm/hvm/support.h>
#include <xen/hvm/irq.h>
+#include <asm/io_apic.h>
static DEFINE_PER_CPU(struct list_head, dpci_list);
@@ -199,6 +200,61 @@ void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci)
xfree(dpci);
}
+/*
+ * Here we handle the following cases:
+ * - For lowest-priority interrupts, we find the destination vCPU from the
+ * guest vector using vector-hashing mechamisn and return true. This follows
+ * the hardware behavior, since modern Intel CPUs use vector hashing to
+ * handle the lowest-priority interrupt.
What is the hashing algorithm, or can I have some hint as to where to
find it in a manual?
+ * - Otherwise, for single destination interrupt, it is straightforward to
+ * find the destination vCPU and return true.
+ * - For multicase/broadcast vCPU, we cannot handle it via interrupt posting,
+ * so return false.
+ */
+static bool_t pi_find_dest_vcpu(struct domain *d, uint8_t dest_id,
+ uint8_t dest_mode, uint8_t deliver_mode,
+ uint32_t gvec, struct vcpu **dest_vcpu)
+{
+ struct vcpu *v, **dest_vcpu_array;
+ unsigned int dest_vcpu_num = 0;
+ int ret;
+
+ if ( deliver_mode == dest_LowestPrio )
+ dest_vcpu_array = xzalloc_array(struct vcpu *, d->max_vcpus);
This allocation can fail, but you really should see about avoiding it
entirely, if possible.
+
+ for_each_vcpu ( d, v )
+ {
+ if ( !vlapic_match_dest(vcpu_vlapic(v), NULL, 0,
+ dest_id, dest_mode) )
+ continue;
+
+ dest_vcpu_num++;
+
+ if ( deliver_mode == dest_LowestPrio )
+ dest_vcpu_array[dest_vcpu_num] = v;
+ else
+ *dest_vcpu = v;
+ }
+
+ if ( deliver_mode == dest_LowestPrio )
+ {
+ if ( dest_vcpu_num != 0 )
+ {
+ *dest_vcpu = dest_vcpu_array[gvec % dest_vcpu_num];
+ ret = 1;
+ }
+ else
+ ret = 0;
+
+ xfree(dest_vcpu_array);
+ return ret;
+ }
+ else if ( dest_vcpu_num == 1 )
+ return 1;
+ else
+ return 0;
+}
+
int pt_irq_create_bind(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
@@ -257,7 +313,7 @@ int pt_irq_create_bind(
{
case PT_IRQ_TYPE_MSI:
{
- uint8_t dest, dest_mode;
+ uint8_t dest, dest_mode, deliver_mode;
int dest_vcpu_id;
if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
@@ -330,11 +386,30 @@ int pt_irq_create_bind(
/* Calculate dest_vcpu_id for MSI-type pirq migration. */
dest = pirq_dpci->gmsi.gflags & VMSI_DEST_ID_MASK;
dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK);
+ deliver_mode = (pirq_dpci->gmsi.gflags >> GFLAGS_SHIFT_DELIV_MODE) &
+ VMSI_DELIV_MASK;
s/deliver/delivery/
Also, you should be able to use MASK_EXTR() rather than manual shifts
and masks.
dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
spin_unlock(&d->event_lock);
if ( dest_vcpu_id >= 0 )
hvm_migrate_pirqs(d->vcpu[dest_vcpu_id]);
+
+ /* Use interrupt posting if it is supported */
+ if ( iommu_intpost )
+ {
+ struct vcpu *vcpu = NULL;
+
+ if ( !pi_find_dest_vcpu(d, dest, dest_mode, deliver_mode,
+ pirq_dpci->gmsi.gvec, &vcpu) )
+ break;
+
+ if ( pi_update_irte( vcpu, info, pirq_dpci->gmsi.gvec ) != 0 )
+ {
+ dprintk(XENLOG_G_INFO, "failed to update PI IRTE\n");
Please put far more information this error message.
+ return -EBUSY;
Under what circumstances can this happen. I don't think it is valid to
fail the userspace bind hypercall in this case.
~Andrew
+ }
+ }
+
break;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel