Signed-off-by: Jan Beulich <jbeul...@suse.com>
---
v4: New.
--- a/xen/arch/arm/Kconfig
+++ b/xen/arch/arm/Kconfig
@@ -19,6 +19,7 @@ config ARM
select HAS_DEVICE_TREE
select HAS_PASSTHROUGH
select HAS_PDX
+ select IOMMU_MIXED
config ARCH_DEFCONFIG
string
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -938,7 +938,7 @@ static int construct_memop_from_reservat
return 0;
}
-#ifdef CONFIG_HAS_PASSTHROUGH
+#if defined(CONFIG_HAS_PASSTHROUGH) && !defined(CONFIG_IOMMU_MIXED)
struct get_reserved_device_memory {
struct xen_reserved_device_memory_map map;
unsigned int used_entries;
@@ -1550,7 +1550,7 @@ long do_memory_op(unsigned long cmd, XEN
break;
}
-#ifdef CONFIG_HAS_PASSTHROUGH
+#if defined(CONFIG_HAS_PASSTHROUGH) && !defined(CONFIG_IOMMU_MIXED)
case XENMEM_reserved_device_memory_map:
{
struct get_reserved_device_memory grdm;
--- a/xen/drivers/passthrough/Kconfig
+++ b/xen/drivers/passthrough/Kconfig
@@ -2,6 +2,9 @@
config HAS_PASSTHROUGH
bool
+config IOMMU_MIXED
+ bool
+
if ARM
config ARM_SMMU
bool "ARM SMMUv1 and v2 driver"
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -77,9 +77,11 @@ bool_t __read_mostly amd_iommu_perdev_in
DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb);
+#ifndef CONFIG_IOMMU_MIXED
DEFINE_SPINLOCK(iommu_pt_cleanup_lock);
PAGE_LIST_HEAD(iommu_pt_cleanup_list);
static struct tasklet iommu_pt_cleanup_tasklet;
+#endif
static int __init parse_iommu_param(const char *s)
{
@@ -246,7 +248,9 @@ void iommu_teardown(struct domain *d)
d->need_iommu = 0;
hd->platform_ops->teardown(d);
+#ifndef CONFIG_IOMMU_MIXED
tasklet_schedule(&iommu_pt_cleanup_tasklet);
+#endif
}
int iommu_construct(struct domain *d)
@@ -332,6 +336,7 @@ int iommu_unmap_page(struct domain *d, u
return rc;
}
+#ifndef CONFIG_IOMMU_MIXED
static void iommu_free_pagetables(unsigned long unused)
{
do {
@@ -348,6 +353,7 @@ static void iommu_free_pagetables(unsign
tasklet_schedule_on_cpu(&iommu_pt_cleanup_tasklet,
cpumask_cycle(smp_processor_id(),
&cpu_online_map));
}
+#endif
int iommu_iotlb_flush(struct domain *d, unsigned long gfn,
unsigned int page_count)
@@ -433,12 +439,15 @@ int __init iommu_setup(void)
iommu_hwdom_passthrough ? "Passthrough" :
iommu_hwdom_strict ? "Strict" : "Relaxed");
printk("Interrupt remapping %sabled\n", iommu_intremap ? "en" :
"dis");
+#ifndef CONFIG_IOMMU_MIXED
tasklet_init(&iommu_pt_cleanup_tasklet, iommu_free_pagetables, 0);
+#endif
}
return rc;
}
+#ifndef CONFIG_IOMMU_MIXED
int iommu_suspend()
{
if ( iommu_enabled )
@@ -453,27 +462,6 @@ void iommu_resume()
iommu_get_ops()->resume();
}
-int iommu_do_domctl(
- struct xen_domctl *domctl, struct domain *d,
- XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
-{
- int ret = -ENODEV;
-
- if ( !iommu_enabled )
- return -ENOSYS;
-
-#ifdef CONFIG_HAS_PCI
- ret = iommu_do_pci_domctl(domctl, d, u_domctl);
-#endif
-
-#ifdef CONFIG_HAS_DEVICE_TREE
- if ( ret == -ENODEV )
- ret = iommu_do_dt_domctl(domctl, d, u_domctl);
-#endif
-
- return ret;
-}
-
void iommu_share_p2m_table(struct domain* d)
{
if ( iommu_enabled && iommu_use_hap_pt(d) )
@@ -500,6 +488,28 @@ int iommu_get_reserved_device_memory(iom
return ops->get_reserved_device_memory(func, ctxt);
}
+#endif
+
+int iommu_do_domctl(
+ struct xen_domctl *domctl, struct domain *d,
+ XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
+{
+ int ret = -ENODEV;
+
+ if ( !iommu_enabled )
+ return -ENOSYS;
+
+#ifdef CONFIG_HAS_PCI
+ ret = iommu_do_pci_domctl(domctl, d, u_domctl);
+#endif
+
+#ifdef CONFIG_HAS_DEVICE_TREE
+ if ( ret == -ENODEV )
+ ret = iommu_do_dt_domctl(domctl, d, u_domctl);
+#endif
+
+ return ret;
+}
bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature)
{
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -147,7 +147,7 @@ struct iommu_ops {
int (*assign_device)(struct domain *, u8 devfn, device_t *dev, u32 flag);
int (*reassign_device)(struct domain *s, struct domain *t,
u8 devfn, device_t *dev);
-#ifdef CONFIG_HAS_PCI
+#if defined(CONFIG_HAS_PCI) && !defined(CONFIG_IOMMU_MIXED)
int (*get_device_group_id)(u16 seg, u8 bus, u8 devfn);
int (*update_ire_from_msi)(struct msi_desc *msi_desc, struct msi_msg
*msg);
void (*read_msi_from_ire)(struct msi_desc *msi_desc, struct msi_msg *msg);
@@ -157,6 +157,7 @@ struct iommu_ops {
int __must_check (*map_page)(struct domain *d, unsigned long gfn,
unsigned long mfn, unsigned int flags);
int __must_check (*unmap_page)(struct domain *d, unsigned long gfn);
+#ifndef CONFIG_IOMMU_MIXED
void (*free_page_table)(struct page_info *);
#ifdef CONFIG_X86
void (*update_ire_from_apic)(unsigned int apic, unsigned int reg,
unsigned int value);
@@ -167,10 +168,11 @@ struct iommu_ops {
void (*resume)(void);
void (*share_p2m)(struct domain *d);
void (*crash_shutdown)(void);
+ int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
+#endif
int __must_check (*iotlb_flush)(struct domain *d, unsigned long gfn,
unsigned int page_count);
int __must_check (*iotlb_flush_all)(struct domain *d);
- int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
void (*dump_p2m_table)(struct domain *d);
};