This commit pulls out generic init/teardown functionality out of
"p2m_init" and "p2m_teardown" into "p2m_init_one", "p2m_teardown_one",
and "p2m_flush_table" functions.  This allows our future implementation
to reuse existing code for the initialization/teardown of altp2m views.

Signed-off-by: Sergej Proskurin <prosku...@sec.in.tum.de>
---
Cc: Stefano Stabellini <sstabell...@kernel.org>
Cc: Julien Grall <julien.gr...@arm.com>
---
v2: Added the function p2m_flush_table to the previous version.

v3: Removed struct vttbr.

    Moved define INVALID_VTTBR to p2m.h.

    Exported function prototypes of "p2m_flush_table", "p2m_init_one",
    and "p2m_teardown_one" in p2m.h.

    Extended the function "p2m_flush_table" by additionally resetting
    the fields lowest_mapped_gfn and max_mapped_gfn.

    Added a "p2m_flush_tlb" call in "p2m_flush_table". On altp2m reset
    in function "altp2m_reset", it is important to flush the TLBs after
    clearing the root table pages and before clearing the intermediate
    altp2m page tables to prevent illegal access to stalled TLB entries
    on currently active VCPUs.

    Added a check checking whether p2m->root is NULL in p2m_flush_table.

    Renamed the function "p2m_free_one" to "p2m_teardown_one".

    Removed resetting p2m->vttbr in "p2m_teardown_one", as it the p2m
    will be destroyed afterwards.

    Moved call to "p2m_alloc_table" back to "p2m_init_one".

    Moved the introduction of the type p2m_class_t out of this patch.

    Moved the backpointer to the struct domain out of the struct
    p2m_domain.

v4: Replaced the former use of clear_and_clean_page in p2m_flush_table
    by a routine that invalidates every p2m entry atomically. This
    avoids inconsistencies on CPUs that continue to use the views that
    are to be flushed (e.g., see altp2m_reset).

    Removed unnecessary initializations in the functions "p2m_init_one"
    and "p2m_teardown_one".

    Removed the define INVALID_VTTBR as it is not used any more.

    Cosmetic fixes.
---
 xen/arch/arm/p2m.c        | 74 +++++++++++++++++++++++++++++++++++++++++++----
 xen/include/asm-arm/p2m.h |  9 ++++++
 2 files changed, 78 insertions(+), 5 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 5e86368010..3a1a38e7af 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1203,27 +1203,65 @@ static void p2m_free_vmid(struct domain *d)
     spin_unlock(&vmid_alloc_lock);
 }
 
-void p2m_teardown(struct domain *d)
+/* Reset this p2m table to be empty. */
+void p2m_flush_table(struct p2m_domain *p2m)
 {
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     struct page_info *pg;
+    unsigned int i, j;
+    lpae_t *table;
+
+    if ( p2m->root )
+    {
+        /* Clear all concatenated root level pages. */
+        for ( i = 0; i < P2M_ROOT_PAGES; i++ )
+        {
+            table = __map_domain_page(p2m->root + i);
+
+            for ( j = 0; j < LPAE_ENTRIES; j++ )
+            {
+                lpae_t *entry = table + j;
+
+                /*
+                 * Individual altp2m views can be flushed, whilst altp2m is
+                 * active. To avoid inconsistencies on CPUs that continue to
+                 * use the views to be flushed (e.g., see altp2m_reset), we
+                 * must remove every p2m entry atomically.
+                 */
+                p2m_remove_pte(entry, p2m->clean_pte);
+            }
+        }
+    }
+
+    /*
+     * Flush TLBs before releasing remaining intermediate p2m page tables to
+     * prevent illegal access to stalled TLB entries.
+     */
+    p2m_flush_tlb(p2m);
 
+    /* Free the rest of the trie pages back to the paging pool. */
     while ( (pg = page_list_remove_head(&p2m->pages)) )
         free_domheap_page(pg);
 
+    p2m->lowest_mapped_gfn = INVALID_GFN;
+    p2m->max_mapped_gfn = _gfn(0);
+}
+
+void p2m_teardown_one(struct p2m_domain *p2m)
+{
+    p2m_flush_table(p2m);
+
     if ( p2m->root )
         free_domheap_pages(p2m->root, P2M_ROOT_ORDER);
 
     p2m->root = NULL;
 
-    p2m_free_vmid(d);
+    p2m_free_vmid(p2m->domain);
 
     radix_tree_destroy(&p2m->mem_access_settings, NULL);
 }
 
-int p2m_init(struct domain *d)
+int p2m_init_one(struct domain *d, struct p2m_domain *p2m)
 {
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     int rc = 0;
     unsigned int cpu;
 
@@ -1268,6 +1306,32 @@ int p2m_init(struct domain *d)
     return rc;
 }
 
+static void p2m_teardown_hostp2m(struct domain *d)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+    p2m_teardown_one(p2m);
+}
+
+void p2m_teardown(struct domain *d)
+{
+    p2m_teardown_hostp2m(d);
+}
+
+static int p2m_init_hostp2m(struct domain *d)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+    p2m->p2m_class = p2m_host;
+
+    return p2m_init_one(d, p2m);
+}
+
+int p2m_init(struct domain *d)
+{
+    return p2m_init_hostp2m(d);
+}
+
 /*
  * The function will go through the p2m and remove page reference when it
  * is required. The mapping will be removed from the p2m.
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 332d74f11c..9bb38e689a 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -295,6 +295,15 @@ static inline int guest_physmap_add_page(struct domain *d,
 
 mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
 
+/* Flushes the page table held by the p2m. */
+void p2m_flush_table(struct p2m_domain *p2m);
+
+/* Initialize the p2m structure. */
+int p2m_init_one(struct domain *d, struct p2m_domain *p2m);
+
+/* Release resources held by the p2m structure. */
+void p2m_teardown_one(struct p2m_domain *p2m);
+
 /*
  * Populate-on-demand
  */
-- 
2.13.3


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

Reply via email to