It appears from the experiments that some devices on few systems
may attempt to access memory ranges that are not RMRRs regions (and
are not reported by ACPI) and are reserved in the host e820 map.
These memory addresses appear to be the targets for DMA reads by
devices. Presumably, these devices may be the USB debug ports.
When devices issues DMA read, EPT violation in some cases is being
reported. Some particular machines do not report EPT violation and
become unresponsive (example Dell T5600).

This patch introduces xen boot option dom0_iommu_rwmem that allows
to specify these special ranges and perform mapping with required
access rights. For this purpose p2m type p2m_sys_rw was introduced.
For now it has RW permissions, though from experiments read permission
is enough.

dom0_iommu_rwmem has following format:
=<start:end>,<start:end>,...
where start and end are mfns (or pfn, as 1:1 mapping performed).
Ranges number is limited to 10 and can be changed.

TODO:
 - make sure the user defined regions do not conflict with disallowed
 io regions as ioapic etc;
 - comply with rmrr design;
 - naming convention;
 - only applicable for vtd for now, other arch is in question;

Signed-off-by: Elena Ufimtseva <elena.ufimts...@oracle.com>
---
 xen/arch/x86/domain_build.c |   49 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 49 insertions(+)

diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index a7bc2a4..1db513b 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -87,6 +87,9 @@ custom_param("dom0_mem", parse_dom0_mem);
 static unsigned int __initdata opt_dom0_max_vcpus_min = 1;
 static unsigned int __initdata opt_dom0_max_vcpus_max = UINT_MAX;
 
+static char __initdata opt_iommu_rwmem[100];
+string_param("dom0_iommu_rwmem", opt_iommu_rwmem);
+
 static void __init parse_dom0_max_vcpus(const char *s)
 {
     if (*s == '-')              /* -M */
@@ -777,6 +780,50 @@ static __init void setup_pv_physmap(struct domain *d, 
unsigned long pgtbl_pfn,
     unmap_domain_page(l4start);
 }
 
+static void __init parse_iommu_rwmem_ranges(struct domain *d, const char *s)
+{
+    struct rwmem_range *rwmem;
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+    unsigned int idx = 0;
+
+    const char *cur = s;
+
+    do {
+        if ( idx >= 10 )
+        {
+            printk(XENLOG_WARNING "dom0_iommu_rwmem: too many ranges 
specified.\n");
+            break;
+        }
+
+        rwmem = xzalloc(struct rwmem_range);
+        if ( !rwmem )
+            return;
+
+        rwmem->start = simple_strtoull(cur, &s, 0);
+        if ( cur == s )
+            break;
+
+        if ( *s == ':' ) {
+            rwmem->end = simple_strtoull(cur = s + 1, &s, 0);
+            if ( cur == s )
+                break;
+        }
+        else
+            rwmem->end = 0;
+
+        if ( rwmem->end >= rwmem->start ) {
+            list_add_tail(&rwmem->list, &hd->arch.rwmem_ranges);
+            idx++;
+            cur = s + 1;
+        }
+        else {
+            printk(XENLOG_WARNING "Bad rwmem range: start > end, %"PRIx64" > 
%"PRIx64"\n",
+                   rwmem->start, rwmem->end);
+            break;
+        }
+    } while ( *s == ',' );
+}
+
 int __init construct_dom0(
     struct domain *d,
     const module_t *image, unsigned long image_headroom,
@@ -1523,6 +1570,8 @@ int __init construct_dom0(
         printk(" Xen warning: dom0 kernel broken ELF: %s\n",
                elf_check_broken(&elf));
 
+    parse_iommu_rwmem_ranges(d, opt_iommu_rwmem);
+
     if ( is_pvh_domain(d) )
     {
         d->need_iommu = 1;
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to