The page freed from the domain should be on held, until the Device-TLB flush is completed. The page previously associated with the freed portion of GPA should not be reallocated for another purpose until the appropriate invalidations have been performed. Otherwise, the original page owner can still access freed page though DMA.
Held on The page until the Device-TLB flush is completed. - Unlink the page from the original owner. - Remove the page from the page_list of domain. - Decrease the total pages count of domain. - Add the page to qi_hold_page_list. The page will be put in Queued Invalidation(QI) interrupt handler if the Device-TLB flush is completed. Signed-off-by: Quan Xu <quan...@intel.com> --- xen/drivers/passthrough/vtd/iommu.c | 35 +++++++++++++++++++++++++++++++++++ xen/include/xen/hvm/iommu.h | 8 ++++++++ 2 files changed, 43 insertions(+) diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index fda9a84..5c03e41 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -1117,6 +1117,39 @@ static void _qi_msi_mask(struct iommu *iommu) spin_unlock_irqrestore(&iommu->register_lock, flags); } +/* + * The page freed from the domain should be on held, until the + * Device-TLB flush is completed. The page previously associated + * with the freed portion of GPA should not be reallocated for + * another purpose until the appropriate invalidations have been + * performed. Otherwise, the original page owner can still access + * freed page though DMA. + * + * Held on The page until the Device-TLB flush is completed. + * - Unlink the page from the original owner. + * - Remove the page from the page_list of domain. + * - Decrease the total pages count of domain. + * - Add the page to qi_hold_page_list. + * + * The page will be put in Queued Invalidation(QI) interrupt + * handler if the Device-TLB flush is completed. + */ +void qi_hold_page(struct domain *d, struct page_info *pg) +{ + spin_lock(&d->page_alloc_lock); + page_set_owner(pg, NULL); + page_list_del(pg, &d->page_list); + d->tot_pages--; + spin_unlock(&d->page_alloc_lock); + + INTEL_IOMMU_DEBUG("IOMMU: Hold on page mfn : %"PRIx64"\n", + page_to_mfn(pg)); + + spin_lock(&qi_page_lock(d)); + page_list_add_tail(pg, &qi_hold_page_list(d)); + spin_unlock(&qi_page_lock(d)); +} + static void _do_iommu_qi(struct iommu *iommu) { unsigned long nr_dom, i; @@ -1449,6 +1482,8 @@ static int intel_iommu_domain_init(struct domain *d) struct hvm_iommu *hd = domain_hvm_iommu(d); hd->arch.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH); + INIT_PAGE_LIST_HEAD(&qi_hold_page_list(d)); + spin_lock_init(&qi_page_lock(d)); return 0; } diff --git a/xen/include/xen/hvm/iommu.h b/xen/include/xen/hvm/iommu.h index e40fc7b..5dc0033 100644 --- a/xen/include/xen/hvm/iommu.h +++ b/xen/include/xen/hvm/iommu.h @@ -53,11 +53,15 @@ struct hvm_iommu { struct qi_talbe talbe; bool_t qi_flag; + struct page_list_head qi_hold_page_list; + spinlock_t qi_lock; + /* Features supported by the IOMMU */ DECLARE_BITMAP(features, IOMMU_FEAT_count); }; void do_qi_flushing(struct domain *d); +void qi_hold_page(struct domain *d, struct page_info *pg); #define iommu_set_feature(d, f) set_bit((f), domain_hvm_iommu(d)->features) #define iommu_clear_feature(d, f) clear_bit((f), domain_hvm_iommu(d)->features) @@ -68,5 +72,9 @@ void do_qi_flushing(struct domain *d); (d->arch.hvm_domain.hvm_iommu.talbe.qi_table_poll_slot) #define QI_FLUSHING(d) \ (d->arch.hvm_domain.hvm_iommu.qi_flag) +#define qi_hold_page_list(d) \ + (d->arch.hvm_domain.hvm_iommu.qi_hold_page_list) +#define qi_page_lock(d) \ + (d->arch.hvm_domain.hvm_iommu.qi_lock) #endif /* __XEN_HVM_IOMMU_H__ */ -- 1.8.3.2 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel