On Fri, Feb 21, 2025 at 11:35:27AM +0000, Jean-Philippe Brucker wrote:
> > +static int viommu_attach_identity_domain(struct iommu_domain *domain,
> > + struct device *dev)
> > +{
> > + int ret = 0;
> > + struct virtio_iommu_req_attach req;
> > + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
> > + struct viommu_domain *vdomain = to_viommu_domain(domain);
> > +
> > + req = (struct virtio_iommu_req_attach) {
> > + .head.type = VIRTIO_IOMMU_T_ATTACH,
> > + .domain = cpu_to_le32(vdev->viommu->identity_domain_id),
> > + .flags = cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS),
> > + };
> > +
> > + ret = viommu_send_attach_req(vdev->viommu, dev, &req);
> > + if (ret)
> > + return ret;
> > +
> > + if (vdev->vdomain) {
> > + vdev->vdomain->nr_endpoints--;
> > + vdomain->nr_endpoints++;
> > + vdev->vdomain = vdomain;
>
> These two need to be unconditional
Woops yes
> > +static struct viommu_domain viommu_identity_domain = {
> > + .domain = { .type = IOMMU_DOMAIN_IDENTITY,
> > + .ops =
> > + &(const struct iommu_domain_ops){
> > + .attach_dev = viommu_attach_identity_domain,
> > + } }
> > +};
>
> nit: how about
>
> static struct viommu_domain viommu_identity_domain = {
> .domain = {
> .type = IOMMU_DOMAIN_IDENTITY,
> .ops = &(const struct iommu_domain_ops) {
> .attach_dev = viommu_attach_identity_domain,
> },
> },
> };
Done
> > + /* Reserve an ID to use as the bypass domain */
> > + if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
> > + viommu->identity_domain_id = viommu->first_domain;
> > + viommu->first_domain++;
> > + } else {
> > + /*
> > + * Assume the VMM is sensible and it either supports bypass on
> > + * all instances or no instances.
> > + */
>
> Maybe also a WARN_ON(!viommu_ops.identity_domain) above?
It starts working in the following patch because the core will call
viommu_domain_alloc_identity() that can make a correct
per-device/per-viommu determination of bypass support.
Let me fold this into the next patch to make that clearer:
@@ -998,7 +998,7 @@ static void viommu_get_resv_regions(struct device *dev,
struct list_head *head)
iommu_dma_get_resv_regions(dev, head);
}
-static struct iommu_ops viommu_ops;
+static const struct iommu_ops viommu_ops;
static struct virtio_driver virtio_iommu_drv;
static int viommu_match_node(struct device *dev, const void *data)
@@ -1086,8 +1086,7 @@ static bool viommu_capable(struct device *dev, enum
iommu_cap cap)
}
}
-static struct iommu_ops viommu_ops = {
- .identity_domain = &viommu_identity_domain.domain,
+static const struct iommu_ops viommu_ops = {
.capable = viommu_capable,
.domain_alloc_identity = viommu_domain_alloc_identity,
.domain_alloc_paging = viommu_domain_alloc_paging,
@@ -1216,12 +1215,6 @@ static int viommu_probe(struct virtio_device *vdev)
if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
viommu->identity_domain_id = viommu->first_domain;
viommu->first_domain++;
- } else {
- /*
- * Assume the VMM is sensible and it either supports bypass on
- * all instances or no instances.
- */
- viommu_ops.identity_domain = NULL;
}
Jason