Patch adds mapping for memory regions specified by dom0_iommu_rwmem regions. These special regions are not described by ACPI as RMRRs and have special memory type. Condition need_modify_vtd_table to perform iommu mapping is changed in order to take into account pte permission changes applied for this regions. Make set_typed_p2m_entry available for using in mapping routine.
See other patches in this series for details and discovery history. Signed-off-by: Elena Ufimtseva <elena.ufimts...@oracle.com> --- xen/arch/x86/mm/p2m-ept.c | 6 ++-- xen/arch/x86/mm/p2m.c | 2 +- xen/drivers/passthrough/iommu.c | 55 ++++++++++++++++++++++++++++++++++- xen/drivers/passthrough/vtd/iommu.c | 7 +++++ xen/include/asm-x86/p2m.h | 3 +- 5 files changed, 67 insertions(+), 6 deletions(-) diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c index d7d8ad1..9643183 100644 --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -730,7 +730,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, } if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) || - (p2mt == p2m_ram_paging_in) ) + (p2mt == p2m_ram_paging_in) || (p2mt == p2m_sys_rw) ) { int emt = epte_get_entry_emt(p2m->domain, gfn, mfn, i * EPT_TABLE_ORDER, &ipat, direct_mmio); @@ -750,8 +750,8 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, new_entry.mfn = mfn_x(mfn); /* Safe to read-then-write because we hold the p2m lock */ - if ( ept_entry->mfn == new_entry.mfn ) - need_modify_vtd_table = 0; + if ( ept_entry->mfn == new_entry.mfn && ept_entry->sa_p2mt == new_entry.sa_p2mt ) + need_modify_vtd_table = 0; ept_p2m_type_to_flags(&new_entry, p2mt, p2ma); } diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 6a06e9f..f9e55da 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -811,7 +811,7 @@ void p2m_change_type_range(struct domain *d, } /* Returns: 0 for success, -errno for failure */ -static int set_typed_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, +int set_typed_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t gfn_p2mt, p2m_access_t access) { int rc = 0; diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index cc12735..a7c7748 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -144,6 +144,59 @@ static void __hwdom_init check_hwdom_reqs(struct domain *d) iommu_dom0_strict = 1; } +int rwmem_change_entry_type(struct domain *d, unsigned long gfn, unsigned long nr_gfns, p2m_type_t nt) +{ + + p2m_type_t ot; + int rc = 0; + p2m_access_t a; + mfn_t omfn; + unsigned long i; + struct p2m_domain *p2m = p2m_get_hostp2m(d); + + if ( !paging_mode_translate(d) ) + return -EIO; + + i = gfn; + while ( i <= gfn + nr_gfns ) { + omfn = p2m->get_entry(p2m, i, &ot, &a, 0, NULL); + rc = set_typed_p2m_entry(d, i, omfn, nt, 0); + if ( rc != 0 ) { + printk(XENLOG_ERR "Unable to set entry for %"PRIx64"\n", i); + break; + } + i++; + } + return rc; +} + +static void rwmem_regions_map(struct domain *d) +{ + struct hvm_iommu *hd = domain_hvm_iommu(d); + struct rwmem_range *rwmem; + int rc = 0; + + list_for_each_entry ( rwmem, &hd->arch.rwmem_ranges, list ) + { + if ( rwmem->end >= rwmem->start ) + + rc = rwmem_change_entry_type(d, rwmem->start, rwmem->end - rwmem->start, + p2m_sys_rw); + if ( rc != 0 ) { + printk(XENLOG_ERR "Could not change type for region [%"PRIx64",%"PRIx64"]\n", + rwmem->start, rwmem->end); + return; + } + } + else + { + printk(XENLOG_ERR "Bad RW memory region, %"PRIx64" > %"PRIx64"\n", + rwmem->start, rwmem->end); + return; + } +} +} + void __hwdom_init iommu_hwdom_init(struct domain *d) { struct hvm_iommu *hd = domain_hvm_iommu(d); @@ -174,7 +227,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d) process_pending_softirqs(); } } - + rwmem_regions_map(d); return hd->platform_ops->hwdom_init(d); } diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index 2e113d7..2d0546e 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -1655,6 +1655,7 @@ static void iommu_domain_teardown(struct domain *d) { struct hvm_iommu *hd = domain_hvm_iommu(d); struct mapped_rmrr *mrmrr, *tmp; + struct rwmem_range *rwrange, *rwtmp; if ( list_empty(&acpi_drhd_units) ) return; @@ -1665,6 +1666,12 @@ static void iommu_domain_teardown(struct domain *d) xfree(mrmrr); } + list_for_each_entry_safe ( rwrange, rwtmp, &hd->arch.rwmem_ranges, list ) + { + list_del(&rwrange->list); + xfree(rwrange); + } + if ( iommu_use_hap_pt(d) ) return; diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index a95521d..5872d0a 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -527,7 +527,8 @@ void p2m_memory_type_changed(struct domain *d); int p2m_is_logdirty_range(struct p2m_domain *, unsigned long start, unsigned long end); - +int set_typed_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, + p2m_type_t gfn_p2mt, p2m_access_t access); /* Set mmio addresses in the p2m table (for pass-through) */ int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_access_t access); -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel