CAUTION: This message has originated from an External Source. Please
use proper judgment and caution when opening attachments, clicking
links, or responding to this email.
A set of function ioremap_xxx are designed to map deivce memory or
remap part of memory temporarily for short-time special purpose, like
using ioremap_wc to temporarily remap guest kernel non-cacheable, for
copying it to guest memory.
As virtual translation is not supported in MPU, and we always follow the
rule of "map in demand" in MPU, we implement MPU version of ioremap_xxx,
through mapping the memory with a transient MPU memory region.
Signed-off-by: Penny Zheng <penny.zh...@arm.com>
Signed-off-by: Wei Chen <wei.c...@arm.com>
---
v3:
- adapt to the new rule of "map in demand"
---
xen/arch/arm/include/asm/arm64/mpu.h | 4 +
xen/arch/arm/include/asm/mm.h | 6 +
xen/arch/arm/mpu/mm.c | 185 +++++++++++++++++++++++++++
3 files changed, 195 insertions(+)
diff --git a/xen/arch/arm/include/asm/arm64/mpu.h
b/xen/arch/arm/include/asm/arm64/mpu.h
index aee7947223..c5e69f239a 100644
--- a/xen/arch/arm/include/asm/arm64/mpu.h
+++ b/xen/arch/arm/include/asm/arm64/mpu.h
@@ -121,6 +121,10 @@ static inline bool region_is_valid(pr_t *pr)
return pr->prlar.reg.en;
}
+static inline bool region_is_transient(pr_t *pr)
+{
+ return pr->prlar.reg.tran;
+}
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_MPU_H__ */
diff --git a/xen/arch/arm/include/asm/mm.h
b/xen/arch/arm/include/asm/mm.h
index cffbf8a595..0352182d99 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -227,6 +227,7 @@ void __iomem *ioremap_attr(paddr_t start, size_t
len, unsigned int attributes);
extern int map_staticmem_pages_to_xen(paddr_t start, paddr_t end);
extern int unmap_staticmem_pages_to_xen(paddr_t start, paddr_t end);
+#ifndef CONFIG_HAS_MPU
static inline void __iomem *ioremap_nocache(paddr_t start, size_t len)
{
return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
@@ -241,6 +242,11 @@ static inline void __iomem *ioremap_wc(paddr_t
start, size_t len)
{
return ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
}
+#else
+extern void __iomem *ioremap_nocache(paddr_t start, size_t len);
+extern void __iomem *ioremap_cache(paddr_t start, size_t len);
+extern void __iomem *ioremap_wc(paddr_t start, size_t len);
+#endif
/* XXX -- account for base */
#define mfn_valid(mfn)
({ \
diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
index 9d5c1da39c..3bb1a5c7c4 100644
--- a/xen/arch/arm/mpu/mm.c
+++ b/xen/arch/arm/mpu/mm.c
@@ -624,6 +624,191 @@ int __init unmap_staticmem_pages_to_xen(paddr_t
start, paddr_t end)
return xen_mpumap_update(start, end, 0);
}
+/*
+ * Check whether memory range [pa, pa + len) is mapped in Xen MPU
+ * memory mapping table xen_mpumap.
+ *
+ * If it is mapped, the associated index will be returned.
+ * If it is not mapped, INVALID_REGION_IDX will be returned.
+ */
+static uint8_t is_mm_range_mapped(paddr_t pa, paddr_t len)
+{
+ int rc;
+ uint8_t idx;
+
+ rc = mpumap_contain_region(xen_mpumap, max_xen_mpumap, pa, pa +
len - 1,
+ &idx);
+ if ( (rc == MPUMAP_REGION_FOUND) || (rc ==
MPUMAP_REGION_INCLUSIVE) )
+ return idx;
+
+ if ( rc == MPUMAP_REGION_OVERLAP )
+ panic("mpu: can not deal with overlapped MPU memory region\n");
+ /* Not mapped */
+ return INVALID_REGION_IDX;
+}
+
+static bool is_mm_attr_match(pr_t *region, unsigned int attributes)
+{
+ if ( region->prbar.reg.ap != PAGE_AP_MASK(attributes) )
+ {
+ printk(XENLOG_WARNING "region permission is not matched (0x%x
-> 0x%x)\n",
+ region->prbar.reg.ap, PAGE_AP_MASK(attributes));
+ return false;
+ }
+
+ if ( region->prbar.reg.xn != PAGE_XN_MASK(attributes) )
+ {
+ printk(XENLOG_WARNING "region execution permission is not
matched (0x%x -> 0x%x)\n",
+ region->prbar.reg.xn, PAGE_XN_MASK(attributes));
+ return false;
+ }
+
+ if ( region->prlar.reg.ai != PAGE_AI_MASK(attributes) )
+ {
+ printk(XENLOG_WARNING "region memory attributes is not
matched (0x%x -> 0x%x)\n",
+ region->prlar.reg.ai, PAGE_AI_MASK(attributes));
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Check whether memory range [pa, pa + len) is mapped with memory
+ * attributes #attr in Xen MPU memory mapping table xen_mpumap.
+ *
+ * If it is mapped but with different memory attributes, Errno -EINVAL
+ * will be returned.
+ * If it is not mapped at all, Errno -ENOENT will be returned.
+ */
+static int is_mm_range_mapped_with_attr(paddr_t pa, paddr_t len,
+ unsigned int attr)
+{
+ uint8_t idx;
+
+ idx = is_mm_range_mapped(pa, len);
+ if ( idx != INVALID_REGION_IDX )
+ {
+ pr_t *region;
+
+ region = &xen_mpumap[idx];
+ if ( !is_mm_attr_match(region, attr) )
+ return -EINVAL;
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * map_mm_range shall work with unmap_mm_range to map a chunk
+ * of memory with a transient MPU memory region for a period of short
time.
+ */
+static void *map_mm_range(paddr_t pa, size_t len, unsigned int
attributes)
+{
+ if ( xen_mpumap_update(pa, pa + len, attributes | _PAGE_TRANSIENT) )
+ printk(XENLOG_ERR "Failed to map_mm_range
0x%"PRIpaddr"-0x%"PRIpaddr"\n",
+ pa, pa + len);