> From: Peter Xu <pet...@redhat.com>
> Sent: Wednesday, March 25, 2020 2:34 AM
> To: Liu, Yi L <yi.l....@intel.com>
> Subject: Re: [PATCH v1 20/22] intel_iommu: propagate PASID-based iotlb
> invalidation to host
> 
> On Sun, Mar 22, 2020 at 05:36:17AM -0700, Liu Yi L wrote:
> > This patch propagates PASID-based iotlb invalidation to host.
> >
> > Intel VT-d 3.0 supports nested translation in PASID granular.
> > Guest SVA support could be implemented by configuring nested
> > translation on specific PASID. This is also known as dual stage DMA
> > translation.
> >
> > Under such configuration, guest owns the GVA->GPA translation which is
> > configured as first level page table in host side for a specific
> > pasid, and host owns GPA->HPA translation. As guest owns first level
> > translation table, piotlb invalidation should be propagated to host
> > since host IOMMU will cache first level page table related mappings
> > during DMA address translation.
> >
> > This patch traps the guest PASID-based iotlb flush and propagate it to
> > host.
> >
> > Cc: Kevin Tian <kevin.t...@intel.com>
> > Cc: Jacob Pan <jacob.jun....@linux.intel.com>
> > Cc: Peter Xu <pet...@redhat.com>
> > Cc: Yi Sun <yi.y....@linux.intel.com>
> > Cc: Paolo Bonzini <pbonz...@redhat.com>
> > Cc: Richard Henderson <r...@twiddle.net>
> > Cc: Eduardo Habkost <ehabk...@redhat.com>
> > Signed-off-by: Liu Yi L <yi.l....@intel.com>
> > ---
> >  hw/i386/intel_iommu.c          | 139
> +++++++++++++++++++++++++++++++++++++++++
> >  hw/i386/intel_iommu_internal.h |   7 +++
> >  2 files changed, 146 insertions(+)
> >
> > diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index
> > b9ac07d..10d314d 100644
> > --- a/hw/i386/intel_iommu.c
> > +++ b/hw/i386/intel_iommu.c
> > @@ -3134,15 +3134,154 @@ static bool
> vtd_process_pasid_desc(IntelIOMMUState *s,
> >      return (ret == 0) ? true : false;  }
> >
> > +/**
> > + * Caller of this function should hold iommu_lock.
> > + */
> > +static void vtd_invalidate_piotlb(IntelIOMMUState *s,
> > +                                  VTDBus *vtd_bus,
> > +                                  int devfn,
> > +                                  DualIOMMUStage1Cache *stage1_cache)
> > +{
> > +    VTDHostIOMMUContext *vtd_dev_icx;
> > +    HostIOMMUContext *host_icx;
> > +
> > +    vtd_dev_icx = vtd_bus->dev_icx[devfn];
> > +    if (!vtd_dev_icx) {
> > +        goto out;
> > +    }
> > +    host_icx = vtd_dev_icx->host_icx;
> > +    if (!host_icx) {
> > +        goto out;
> > +    }
> > +    if (host_iommu_ctx_flush_stage1_cache(host_icx, stage1_cache)) {
> > +        error_report("Cache flush failed");
> 
> I think this should not easily be triggered by the guest, but just in case... 
> Let's use
> error_report_once() to be safe.

Agreed.

> > +    }
> > +out:
> > +    return;
> > +}
> > +
> > +static inline bool vtd_pasid_cache_valid(
> > +                          VTDPASIDAddressSpace *vtd_pasid_as) {
> > +    return vtd_pasid_as->iommu_state &&
> 
> This check can be dropped because always true?
> 
> If you agree with both the changes, please add:
> 
> Reviewed-by: Peter Xu <pet...@redhat.com>

I think the code should ensure all the pasid_as in hash table is valid. And
we can since all the operations are under protection of iommu_lock.

Thanks,
Yi Liu

> > +           (vtd_pasid_as->iommu_state->pasid_cache_gen
> > +             == vtd_pasid_as->pasid_cache_entry.pasid_cache_gen);
> > +}
> > +
> > +/**
> > + * This function is a loop function for the s->vtd_pasid_as
> > + * list with VTDPIOTLBInvInfo as execution filter. It propagates
> > + * the piotlb invalidation to host. Caller of this function
> > + * should hold iommu_lock.
> > + */
> > +static void vtd_flush_pasid_iotlb(gpointer key, gpointer value,
> > +                                  gpointer user_data) {
> > +    VTDPIOTLBInvInfo *piotlb_info = user_data;
> > +    VTDPASIDAddressSpace *vtd_pasid_as = value;
> > +    uint16_t did;
> > +
> > +    /*
> > +     * Needs to check whether the pasid entry cache stored in
> > +     * vtd_pasid_as is valid or not. "invalid" means the pasid
> > +     * cache has been flushed, thus host should have done piotlb
> > +     * invalidation together with a pasid cache invalidation, so
> > +     * no need to pass down piotlb invalidation to host for better
> > +     * performance. Only when pasid entry cache is "valid", should
> > +     * a piotlb invalidation be propagated to host since it means
> > +     * guest just modified a mapping in its page table.
> > +     */
> > +    if (!vtd_pasid_cache_valid(vtd_pasid_as)) {
> > +        return;
> > +    }
> > +
> > +    did = vtd_pe_get_domain_id(
> > +                &(vtd_pasid_as->pasid_cache_entry.pasid_entry));
> > +
> > +    if ((piotlb_info->domain_id == did) &&
> > +        (piotlb_info->pasid == vtd_pasid_as->pasid)) {
> > +        vtd_invalidate_piotlb(vtd_pasid_as->iommu_state,
> > +                              vtd_pasid_as->vtd_bus,
> > +                              vtd_pasid_as->devfn,
> > +                              piotlb_info->stage1_cache);
> > +    }
> > +
> > +    /*
> > +     * TODO: needs to add QEMU piotlb flush when QEMU piotlb
> > +     * infrastructure is ready. For now, it is enough for passthru
> > +     * devices.
> > +     */
> > +}
> > +
> >  static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
> >                                          uint16_t domain_id,
> >                                          uint32_t pasid)  {
> > +    VTDPIOTLBInvInfo piotlb_info;
> > +    DualIOMMUStage1Cache *stage1_cache;
> > +    struct iommu_cache_invalidate_info *cache_info;
> > +
> > +    stage1_cache = g_malloc0(sizeof(*stage1_cache));
> > +    stage1_cache->pasid = pasid;
> > +
> > +    cache_info = &stage1_cache->cache_info;
> > +    cache_info->version = IOMMU_UAPI_VERSION;
> > +    cache_info->cache = IOMMU_CACHE_INV_TYPE_IOTLB;
> > +    cache_info->granularity = IOMMU_INV_GRANU_PASID;
> > +    cache_info->pasid_info.pasid = pasid;
> > +    cache_info->pasid_info.flags = IOMMU_INV_PASID_FLAGS_PASID;
> > +
> > +    piotlb_info.domain_id = domain_id;
> > +    piotlb_info.pasid = pasid;
> > +    piotlb_info.stage1_cache = stage1_cache;
> > +
> > +    vtd_iommu_lock(s);
> > +    /*
> > +     * Here loops all the vtd_pasid_as instances in s->vtd_pasid_as
> > +     * to find out the affected devices since piotlb invalidation
> > +     * should check pasid cache per architecture point of view.
> > +     */
> > +    g_hash_table_foreach(s->vtd_pasid_as,
> > +                         vtd_flush_pasid_iotlb, &piotlb_info);
> > +    vtd_iommu_unlock(s);
> > +    g_free(stage1_cache);
> >  }
> >
> >  static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t 
> > domain_id,
> >                               uint32_t pasid, hwaddr addr, uint8_t am,
> > bool ih)  {
> > +    VTDPIOTLBInvInfo piotlb_info;
> > +    DualIOMMUStage1Cache *stage1_cache;
> > +    struct iommu_cache_invalidate_info *cache_info;
> > +
> > +    stage1_cache = g_malloc0(sizeof(*stage1_cache));
> > +    stage1_cache->pasid = pasid;
> > +
> > +    cache_info = &stage1_cache->cache_info;
> > +    cache_info->version = IOMMU_UAPI_VERSION;
> > +    cache_info->cache = IOMMU_CACHE_INV_TYPE_IOTLB;
> > +    cache_info->granularity = IOMMU_INV_GRANU_ADDR;
> > +    cache_info->addr_info.flags = IOMMU_INV_ADDR_FLAGS_PASID;
> > +    cache_info->addr_info.flags |= ih ? IOMMU_INV_ADDR_FLAGS_LEAF : 0;
> > +    cache_info->addr_info.pasid = pasid;
> > +    cache_info->addr_info.addr = addr;
> > +    cache_info->addr_info.granule_size = 1 << (12 + am);
> > +    cache_info->addr_info.nb_granules = 1;
> > +
> > +    piotlb_info.domain_id = domain_id;
> > +    piotlb_info.pasid = pasid;
> > +    piotlb_info.stage1_cache = stage1_cache;
> > +
> > +    vtd_iommu_lock(s);
> > +    /*
> > +     * Here loops all the vtd_pasid_as instances in s->vtd_pasid_as
> > +     * to find out the affected devices since piotlb invalidation
> > +     * should check pasid cache per architecture point of view.
> > +     */
> > +    g_hash_table_foreach(s->vtd_pasid_as,
> > +                         vtd_flush_pasid_iotlb, &piotlb_info);
> > +    vtd_iommu_unlock(s);
> > +    g_free(stage1_cache);
> >  }
> >
> >  static bool vtd_process_piotlb_desc(IntelIOMMUState *s, diff --git
> > a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
> > index 314e2c4..967cc4f 100644
> > --- a/hw/i386/intel_iommu_internal.h
> > +++ b/hw/i386/intel_iommu_internal.h
> > @@ -560,6 +560,13 @@ struct VTDPASIDCacheInfo {
> >                                        VTD_PASID_CACHE_DEVSI)  typedef
> > struct VTDPASIDCacheInfo VTDPASIDCacheInfo;
> >
> > +struct VTDPIOTLBInvInfo {
> > +    uint16_t domain_id;
> > +    uint32_t pasid;
> > +    DualIOMMUStage1Cache *stage1_cache; }; typedef struct
> > +VTDPIOTLBInvInfo VTDPIOTLBInvInfo;
> > +
> >  /* PASID Table Related Definitions */  #define
> > VTD_PASID_DIR_BASE_ADDR_MASK  (~0xfffULL)  #define
> > VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL)
> > --
> > 2.7.4
> >
> 
> --
> Peter Xu

Reply via email to