On 21.04.20 12:03, 'Marco Solieri' via Jailhouse wrote:
From: Luca Miccio <[email protected]>
Add functions for colored page creation and destruction and initialize
coloring on the platform.
The story of the life of a coloring page can be summarized as follows.
1. Bits in the address that are useful for defining colors are computed,
and used for all mappings. The page size used to obtain the lower limit
is assumed to be aligned with the `PAGE_SIZE` constant defaulting at
4KiB, and also as the unit for the mapping operation, even when
consecutive pages would be possible.
2. The colored regions can then mapped with a new paging function and
destructed with the old one, because `paging_destroy*` acts on virtual
addresses while coloring happens on the physical ones.
Paging_create has to handle the remap to root_cell too when e.g.
destroying cells.
3. The colored unmap function is instead used only when destroying the
root cell mapping, since we assume that the root cell uses a 1:1 mapping
for memory regions.
Signed-off-by: Luca Miccio <[email protected]>
Signed-off-by: Marco Solieri <[email protected]>
---
hypervisor/include/jailhouse/paging.h | 11 ++
hypervisor/paging.c | 155 ++++++++++++++++++++++++++
2 files changed, 166 insertions(+)
diff --git a/hypervisor/include/jailhouse/paging.h
b/hypervisor/include/jailhouse/paging.h
index 5513c4ec..032a3a04 100644
--- a/hypervisor/include/jailhouse/paging.h
+++ b/hypervisor/include/jailhouse/paging.h
@@ -267,6 +267,17 @@ int paging_destroy(const struct paging_structures
*pg_structs,
unsigned long virt, unsigned long size,
unsigned long paging_flags);
+int paging_create_colored(const struct paging_structures *pg_structs,
+ unsigned long phys, unsigned long size,
+ unsigned long virt, unsigned long access_flags,
+ unsigned long paging_flags,
+ unsigned long *color_bitmask, bool identity_map);
+
+int paging_destroy_colored(const struct paging_structures *pg_structs,
+ unsigned long virt, unsigned long size,
+ unsigned long paging_flags,
+ unsigned long *color_bitmask);
+
void *paging_map_device(unsigned long phys, unsigned long size);
void paging_unmap_device(unsigned long phys, void *virt, unsigned long size);
diff --git a/hypervisor/paging.c b/hypervisor/paging.c
index 876f1521..e8f741c2 100644
--- a/hypervisor/paging.c
+++ b/hypervisor/paging.c
@@ -5,6 +5,8 @@
*
* Authors:
* Jan Kiszka <[email protected]>
+ * Luca Miccio <[email protected]> (cache coloring support)
+ * Marco Solieri <[email protected]> (cache coloring support)
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
@@ -14,6 +16,7 @@
#include <jailhouse/printk.h>
#include <jailhouse/string.h>
#include <jailhouse/control.h>
+#include <jailhouse/coloring.h>
#define BITS_PER_PAGE (PAGE_SIZE * 8)
@@ -438,6 +441,153 @@ int paging_destroy(const struct paging_structures *pg_structs,
return 0;
}
+/**
+ * Create or modify a colored page map.
+ * @param pg_structs Descriptor of paging structures to be used.
+ * @param phys Physical address of the region to be mapped.
+ * @param size Size of the region.
+ * @param virt Virtual address the region should be mapped to.
+ * @param access_flags Flags describing the permitted page access, see
+ * @ref PAGE_ACCESS_FLAGS.
+ * @param color_bitmask Bitmask specifying value of coloring.
+ * @param identity_map If true the mapping will be 1:1.
+ *
+ * @return 0 on success, negative error code otherwise.
+ *
+ * @note The function uses only 4 KiB page size for mapping.
+ *
+ * @see paging_destroy_colored
+ * @see paging_get_guest_pages
+ */
+int paging_create_colored(const struct paging_structures *pg_structs,
+ unsigned long phys, unsigned long size,
+ unsigned long virt, unsigned long access_flags,
+ unsigned long paging_flags,
+ unsigned long *color_bitmask, bool identity_map)
+{
+
+ phys &= PAGE_MASK;
+ virt &= PAGE_MASK;
+ size = PAGE_ALIGN(size);
+
+ while (size > 0) {
+ const struct paging *paging = pg_structs->root_paging;
+ page_table_t pt = pg_structs->root_table;
+ pt_entry_t pte;
+ int err;
+
+ phys = next_colored(phys, color_bitmask);
+ if (identity_map)
+ virt = phys;
+
+ while (1) {
+ pte = paging->get_entry(pt, virt);
+ if (paging->page_size == PAGE_SIZE) {
+ paging->set_terminal(pte, phys, access_flags);
+ flush_pt_entry(pte, paging_flags);
+ break;
+ }
+ /* Loop until 4K page size by splitting hugepages */
+ if (paging->entry_valid(pte, PAGE_PRESENT_FLAGS)) {
+ err = split_hugepage(pg_structs->hv_paging,
+ paging, pte, virt,
+ paging_flags);
+ if (err)
+ return err;
+ pt = paging_phys2hvirt(
+ paging->get_next_pt(pte));
+ } else {
+ pt = page_alloc(&mem_pool, 1);
+ if (!pt)
+ return -ENOMEM;
+
+ paging->set_next_pt(pte, paging_hvirt2phys(pt));
+ flush_pt_entry(pte, paging_flags);
+ }
+ paging++;
+ }
+ if (pg_structs == &hv_paging_structs)
+ arch_paging_flush_page_tlbs(virt);
+
+ phys += paging->page_size;
+ virt += paging->page_size;
+ size -= paging->page_size;
+ }
+ return 0;
+}
+
Isn't paging_create(...) the same as
paging_create_colored(..., color_bitmask=full, identity_map=dont-care)?
Same fore paging_destroy. This duplication of highly sensitive code must
be avoided.
+/**
+ * Destroy a colored page map.
+ * @param pg_structs Descriptor of paging structures to be used.
+ * @param virt Virtual address the region to be unmapped.
+ * @param size Size of the region.
+ * @param paging_flags Flags describing the paging mode, see @ref PAGING_FLAGS.
+ * @param color_bitmask Bitmask specifying value of coloring.
+ *
+ * @return 0 on success, negative error code otherwise.
+ *
+ * @see paging_create_colored
+ */
+int paging_destroy_colored(const struct paging_structures *pg_structs,
+ unsigned long virt, unsigned long size,
+ unsigned long paging_flags,
+ unsigned long *color_bitmask)
+{
+ size = PAGE_ALIGN(size);
+
+ while (size > 0) {
+ const struct paging *paging = pg_structs->root_paging;
+ page_table_t pt[MAX_PAGE_TABLE_LEVELS];
+ unsigned long page_size;
+ pt_entry_t pte;
+ int n = 0;
+ int err;
+
+ virt = next_colored(virt, color_bitmask);
+
+ /* walk down the page table, saving intermediate tables */
+ pt[0] = pg_structs->root_table;
+ while (1) {
+ pte = paging->get_entry(pt[n], virt);
+ if (!paging->entry_valid(pte, PAGE_PRESENT_FLAGS))
+ break;
+ if (paging->get_phys(pte, virt) != INVALID_PHYS_ADDR) {
+ if (paging->page_size == PAGE_SIZE)
+ break;
+
+ err = split_hugepage(pg_structs->hv_paging,
+ paging, pte, virt,
+ paging_flags);
+ if (err)
+ return err;
+ }
+ pt[++n] = paging_phys2hvirt(paging->get_next_pt(pte));
+ paging++;
+ }
+ /* advance by page size of current level paging */
+ page_size = paging->page_size ? paging->page_size : PAGE_SIZE;
+
+ /* walk up again, clearing entries, releasing empty tables */
+ while (1) {
+ paging->clear_entry(pte);
+ flush_pt_entry(pte, paging_flags);
+ if (n == 0 || !paging->page_table_empty(pt[n]))
+ break;
+ page_free(&mem_pool, pt[n], 1);
+ paging--;
+ pte = paging->get_entry(pt[--n], virt);
+ }
+ if (pg_structs == &hv_paging_structs)
+ arch_paging_flush_page_tlbs(virt);
+
+ if (page_size > size)
+ break;
+ virt += page_size;
+ size -= page_size;
+ }
+ return 0;
+}
+
static unsigned long
paging_gvirt2gphys(const struct guest_paging_structures *pg_structs,
unsigned long gvirt, unsigned long tmp_page,
@@ -702,6 +852,11 @@ int paging_init(void)
return err;
}
+ /* Setup coloring */
+ if (coloring_paging_init(system_config->platform_info.llc_way_size)) {
Fold the setup in here, it doesn't look arch-specific to me. Some of its
parameters may be, but that can be handled via arch includes.
+ printk("Error: Unable to init cache coloring data\n");
+ return -ENOMEM;
+ }
return 0;
}
Jan
--
Siemens AG, Corporate Technology, CT RDA IOT SES-DE
Corporate Competence Center Embedded Linux
--
You received this message because you are subscribed to the Google Groups
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
To view this discussion on the web visit
https://groups.google.com/d/msgid/jailhouse-dev/d492ee67-ee26-3533-5ca6-7c9e3e783abf%40siemens.com.