The INVALL command instructs an ITS to invalidate the configuration
data for all LPIs associated with a given redistributor (read: VCPU).
This is nasty to emulate exactly with our architecture, so we just
iterate over all mapped LPIs and filter for those from that particular
VCPU.
Signed-off-by: Andre Przywara
---
xen/arch/arm/vgic-v3-its.c | 66 ++
1 file changed, 66 insertions(+)
diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index 14fde83..76e0467 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -516,6 +516,69 @@ out_unlock_its:
return ret;
}
+/*
+ * INVALL updates the per-LPI configuration status for every LPI mapped to
+ * a particular redistributor.
+ * We iterate over all mapped LPIs in our radix tree and update those.
+ */
+static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr)
+{
+uint32_t collid = its_cmd_get_collection(cmdptr);
+struct vcpu *vcpu;
+struct pending_irq *pirqs[16];
+uint64_t vlpi = 0; /* 64-bit to catch overflows */
+unsigned int nr_lpis, i;
+unsigned long flags;
+int ret = 0;
+
+/*
+ * As this implementation walks over all mapped LPIs, it might take
+ * too long for a real guest, so we might want to revisit this
+ * implementation for DomUs.
+ * However this command is very rare, also we don't expect many
+ * LPIs to be actually mapped, so it's fine for Dom0 to use.
+ */
+ASSERT(is_hardware_domain(its->d));
+
+spin_lock(&its->its_lock);
+vcpu = get_vcpu_from_collection(its, collid);
+spin_unlock(&its->its_lock);
+
+spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
+read_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
+
+do
+{
+nr_lpis = radix_tree_gang_lookup(&its->d->arch.vgic.pend_lpi_tree,
+ (void **)pirqs, vlpi,
+ ARRAY_SIZE(pirqs));
+
+for ( i = 0; i < nr_lpis; i++ )
+{
+/* We only care about LPIs on our VCPU. */
+if ( pirqs[i]->lpi_vcpu_id != vcpu->vcpu_id )
+continue;
+
+vlpi = pirqs[i]->irq;
+/* If that fails for a single LPI, carry on to handle the rest. */
+ret = update_lpi_property(its->d, pirqs[i]);
+if ( !ret )
+update_lpi_vgic_status(vcpu, pirqs[i]);
+}
+/*
+ * Loop over the next gang of pending_irqs until we reached the end of
+ * a (fully populated) tree or the lookup function returns less LPIs than
+ * it has been asked for.
+ */
+} while ( (++vlpi < its->d->arch.vgic.nr_lpis) &&
+ (nr_lpis == ARRAY_SIZE(pirqs)) );
+
+read_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
+spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
+
+return ret;
+}
+
/* Must be called with the ITS lock held. */
static int its_discard_event(struct virt_its *its,
uint32_t vdevid, uint32_t vevid)
@@ -858,6 +921,9 @@ static int vgic_its_handle_cmds(struct domain *d, struct
virt_its *its)
case GITS_CMD_INV:
ret = its_handle_inv(its, command);
break;
+case GITS_CMD_INVALL:
+ret = its_handle_invall(its, command);
+break;
case GITS_CMD_MAPC:
ret = its_handle_mapc(its, command);
break;
--
2.9.0
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel