From: Yi Liu <[email protected]> This traps the guest PASID-based iotlb invalidation request and propagate it to host.
Intel VT-d 3.0 supports nested translation in PASID granularity. Guest SVA support could be implemented by configuring nested translation on specific pasid. This is also known as dual stage DMA translation. Under such configuration, guest owns the GVA->GPA translation which is configured as first stage page table on host side for a specific pasid, and host owns GPA->HPA translation. As guest owns first stage translation table, piotlb invalidation should be propagated to host since host IOMMU will cache first level page table related mappings during DMA address translation. Signed-off-by: Yi Liu <[email protected]> Signed-off-by: Yi Sun <[email protected]> Signed-off-by: Zhenzhong Duan <[email protected]> Reviewed-by: Eric Auger <[email protected]> Reviewed-by: Yi Liu <[email protected]> Reviewed-by: Michael S. Tsirkin <[email protected]> Link: https://lore.kernel.org/qemu-devel/[email protected] Signed-off-by: Cédric Le Goater <[email protected]> --- hw/i386/intel_iommu_accel.h | 10 +++++ hw/i386/intel_iommu_internal.h | 6 +++ hw/i386/intel_iommu.c | 11 ++++-- hw/i386/intel_iommu_accel.c | 69 ++++++++++++++++++++++++++++++++++ 4 files changed, 93 insertions(+), 3 deletions(-) diff --git a/hw/i386/intel_iommu_accel.h b/hw/i386/intel_iommu_accel.h index 30696765af61b8864b3577984c5d9bcd774d4212..6dec8788f18d0196550b3d0047dc5a5b29ed2c6b 100644 --- a/hw/i386/intel_iommu_accel.h +++ b/hw/i386/intel_iommu_accel.h @@ -17,6 +17,9 @@ bool vtd_check_hiod_accel(IntelIOMMUState *s, VTDHostIOMMUDevice *vtd_hiod, Error **errp); VTDHostIOMMUDevice *vtd_find_hiod_iommufd(VTDAddressSpace *as); bool vtd_propagate_guest_pasid(VTDAddressSpace *vtd_as, Error **errp); +void vtd_flush_host_piotlb_all_locked(IntelIOMMUState *s, uint16_t domain_id, + uint32_t pasid, hwaddr addr, + uint64_t npages, bool ih); #else static inline bool vtd_check_hiod_accel(IntelIOMMUState *s, VTDHostIOMMUDevice *vtd_hiod, @@ -37,5 +40,12 @@ static inline bool vtd_propagate_guest_pasid(VTDAddressSpace *vtd_as, { return true; } + +static inline void vtd_flush_host_piotlb_all_locked(IntelIOMMUState *s, + uint16_t domain_id, + uint32_t pasid, hwaddr addr, + uint64_t npages, bool ih) +{ +} #endif #endif diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index e987322e93ab05db2873d550de78abca268ca8c9..a2ca79f925c22928432e4ca7e2381c4f535fbdf0 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -622,6 +622,12 @@ typedef struct VTDPASIDCacheInfo { uint32_t pasid; } VTDPASIDCacheInfo; +typedef struct VTDPIOTLBInvInfo { + uint16_t domain_id; + uint32_t pasid; + struct iommu_hwpt_vtd_s1_invalidate *inv_data; +} VTDPIOTLBInvInfo; + /* PASID Table Related Definitions */ #define VTD_PASID_DIR_BASE_ADDR_MASK (~0xfffULL) #define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL) diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index f9b80e3257b4e382fd0eb9f946f6b6691a3d09ab..2889c29102b1c487f760906e4038274491da1a44 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -2967,6 +2967,8 @@ static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s, vtd_iommu_lock(s); g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_pasid, &info); + vtd_flush_host_piotlb_all_locked(s, domain_id, pasid, 0, (uint64_t)-1, + false); vtd_iommu_unlock(s); QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { @@ -2986,7 +2988,8 @@ static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s, } static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, - uint32_t pasid, hwaddr addr, uint8_t am) + uint32_t pasid, hwaddr addr, uint8_t am, + bool ih) { VTDIOTLBPageInvInfo info; @@ -2998,6 +3001,7 @@ static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, vtd_iommu_lock(s); g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page_piotlb, &info); + vtd_flush_host_piotlb_all_locked(s, domain_id, pasid, addr, 1 << am, ih); vtd_iommu_unlock(s); vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am, pasid); @@ -3029,7 +3033,8 @@ static bool vtd_process_piotlb_desc(IntelIOMMUState *s, case VTD_INV_DESC_PIOTLB_PSI_IN_PASID: am = VTD_INV_DESC_PIOTLB_AM(inv_desc->val[1]); addr = (hwaddr) VTD_INV_DESC_PIOTLB_ADDR(inv_desc->val[1]); - vtd_piotlb_page_invalidate(s, domain_id, pasid, addr, am); + vtd_piotlb_page_invalidate(s, domain_id, pasid, addr, am, + VTD_INV_DESC_PIOTLB_IH(inv_desc)); break; default: @@ -5176,7 +5181,7 @@ static int vtd_pri_perform_implicit_invalidation(VTDAddressSpace *vtd_as, ret = 0; switch (pgtt) { case VTD_SM_PASID_ENTRY_FST: - vtd_piotlb_page_invalidate(s, domain_id, vtd_as->pasid, addr, 0); + vtd_piotlb_page_invalidate(s, domain_id, vtd_as->pasid, addr, 0, false); break; /* Room for other pgtt values */ default: diff --git a/hw/i386/intel_iommu_accel.c b/hw/i386/intel_iommu_accel.c index 69f82e8831f5f834126b9aa1550a6da8f2cc94c5..1f068005d8fa8b2161247e1e2386db5a60dbbdc5 100644 --- a/hw/i386/intel_iommu_accel.c +++ b/hw/i386/intel_iommu_accel.c @@ -182,3 +182,72 @@ bool vtd_propagate_guest_pasid(VTDAddressSpace *vtd_as, Error **errp) return vtd_device_detach_iommufd(vtd_hiod, vtd_as, errp); } + +/* + * This function is a loop function for the s->vtd_address_spaces + * list with VTDPIOTLBInvInfo as execution filter. It propagates + * the piotlb invalidation to host. + */ +static void vtd_flush_host_piotlb_locked(gpointer key, gpointer value, + gpointer user_data) +{ + VTDPIOTLBInvInfo *piotlb_info = user_data; + VTDAddressSpace *vtd_as = value; + VTDHostIOMMUDevice *vtd_hiod = vtd_find_hiod_iommufd(vtd_as); + VTDPASIDCacheEntry *pc_entry = &vtd_as->pasid_cache_entry; + uint16_t did; + + if (!vtd_hiod) { + return; + } + + assert(vtd_as->pasid == PCI_NO_PASID); + + /* Nothing to do if there is no first stage HWPT attached */ + if (!pc_entry->valid || + !vtd_pe_pgtt_is_fst(&pc_entry->pasid_entry)) { + return; + } + + did = VTD_SM_PASID_ENTRY_DID(&pc_entry->pasid_entry); + + if (piotlb_info->domain_id == did && piotlb_info->pasid == PASID_0) { + HostIOMMUDeviceIOMMUFD *idev = + HOST_IOMMU_DEVICE_IOMMUFD(vtd_hiod->hiod); + uint32_t entry_num = 1; /* Only implement one request for simplicity */ + Error *local_err = NULL; + struct iommu_hwpt_vtd_s1_invalidate *cache = piotlb_info->inv_data; + + if (!iommufd_backend_invalidate_cache(idev->iommufd, vtd_as->fs_hwpt_id, + IOMMU_HWPT_INVALIDATE_DATA_VTD_S1, + sizeof(*cache), &entry_num, cache, + &local_err)) { + /* Something wrong in kernel, but trying to continue */ + error_report_err(local_err); + } + } +} + +void vtd_flush_host_piotlb_all_locked(IntelIOMMUState *s, uint16_t domain_id, + uint32_t pasid, hwaddr addr, + uint64_t npages, bool ih) +{ + struct iommu_hwpt_vtd_s1_invalidate cache_info = { 0 }; + VTDPIOTLBInvInfo piotlb_info; + + cache_info.addr = addr; + cache_info.npages = npages; + cache_info.flags = ih ? IOMMU_VTD_INV_FLAGS_LEAF : 0; + + piotlb_info.domain_id = domain_id; + piotlb_info.pasid = pasid; + piotlb_info.inv_data = &cache_info; + + /* + * Go through each vtd_as instance in s->vtd_address_spaces, find out + * affected host devices which need host piotlb invalidation. Piotlb + * invalidation should check pasid cache per architecture point of view. + */ + g_hash_table_foreach(s->vtd_address_spaces, + vtd_flush_host_piotlb_locked, &piotlb_info); +} -- 2.52.0
