And just rely on arch_iommu_hwdom_init to setup the correct inclusive
mappings as it's done for Intel.
AMD has code in amd_iommu_hwdom_init to setup inclusive mappings up to
max_pdx, remove this since it's now a duplication of
arch_iommu_hwdom_init. Note that AMD mapped every page with a valid
mfn up to max_pdx, arch_iommu_hwdom_init will only do so for memory
below 4GB, so this is a functional change for AMD.
Move the default setting of iommu_hwdom_{inclusive/reserved} to
arch_iommu_hwdom_init since the defaults are now the same for both
Intel and AMD.
Reported-by: Paul Durrant
Signed-off-by: Roger Pau Monné
Reviewed-by: Jan Beulich
Reviewed-by: Kevin Tian
---
Cc: Suravee Suthikulpanit
Cc: Brian Woods
Cc: Kevin Tian
Cc: Jan Beulich
Cc: Paul Durrant
---
xen/drivers/passthrough/amd/pci_amd_iommu.c | 39 -
xen/drivers/passthrough/vtd/iommu.c | 7
xen/drivers/passthrough/x86/iommu.c | 8 -
3 files changed, 7 insertions(+), 47 deletions(-)
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 4a633ca940..821fe03df5 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -250,50 +250,11 @@ static int amd_iommu_add_device(u8 devfn, struct pci_dev
*pdev);
static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
{
-unsigned long i;
const struct amd_iommu *iommu;
-/* Inclusive IOMMU mappings are disabled by default on AMD hardware. */
-if ( iommu_hwdom_inclusive == -1 )
-iommu_hwdom_inclusive = 0;
-/* Reserved IOMMU mappings are disabled by default on AMD hardware. */
-if ( iommu_hwdom_reserved == -1 )
-iommu_hwdom_reserved = 0;
-
if ( allocate_domain_resources(dom_iommu(d)) )
BUG();
-if ( !iommu_hwdom_passthrough && !need_iommu(d) )
-{
-int rc = 0;
-
-/* Set up 1:1 page table for dom0 */
-for ( i = 0; i < max_pdx; i++ )
-{
-unsigned long pfn = pdx_to_pfn(i);
-
-/*
- * XXX Should we really map all non-RAM (above 4G)? Minimally
- * a pfn_valid() check would seem desirable here.
- */
-if ( mfn_valid(_mfn(pfn)) )
-{
-int ret = amd_iommu_map_page(d, pfn, pfn,
- IOMMUF_readable|IOMMUF_writable);
-
-if ( !rc )
-rc = ret;
-}
-
-if ( !(i & 0xf) )
-process_pending_softirqs();
-}
-
-if ( rc )
-AMD_IOMMU_DEBUG("d%d: IOMMU mapping failed: %d\n",
-d->domain_id, rc);
-}
-
for_each_amd_iommu ( iommu )
if ( iomem_deny_access(d, PFN_DOWN(iommu->mmio_base_phys),
PFN_DOWN(iommu->mmio_base_phys +
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index bb422ec58c..cf8a80d7a1 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1304,13 +1304,6 @@ static void __hwdom_init intel_iommu_hwdom_init(struct
domain *d)
{
struct acpi_drhd_unit *drhd;
-/* Inclusive mappings are enabled by default on Intel hardware for PV. */
-if ( iommu_hwdom_inclusive == -1 )
-iommu_hwdom_inclusive = is_pv_domain(d);
-/* Reserved IOMMU mappings are enabled by default on Intel hardware. */
-if ( iommu_hwdom_reserved == -1 )
-iommu_hwdom_reserved = 1;
-
setup_hwdom_pci_devices(d, setup_hwdom_device);
setup_hwdom_rmrr(d);
/* Make sure workarounds are applied before enabling the IOMMU(s). */
diff --git a/xen/drivers/passthrough/x86/iommu.c
b/xen/drivers/passthrough/x86/iommu.c
index 69e45b8e00..2de8822c59 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -210,7 +210,13 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
BUG_ON(!is_hardware_domain(d));
-ASSERT(iommu_hwdom_inclusive != -1 && iommu_hwdom_reserved != -1);
+/* Inclusive mappings are enabled by default for PV. */
+if ( iommu_hwdom_inclusive == -1 )
+iommu_hwdom_inclusive = is_pv_domain(d);
+/* Reserved IOMMU mappings are enabled by default. */
+if ( iommu_hwdom_reserved == -1 )
+iommu_hwdom_reserved = 1;
+
if ( iommu_hwdom_inclusive && !is_pv_domain(d) )
{
printk(XENLOG_WARNING
--
2.19.0
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel