The current implementation differentiates between flushing and destroying altp2m views. This commit adds the functions p2m_flush_altp2m, and p2m_flush_table, which allow to flush all or individual altp2m views without destroying the entire table. In this way, altp2m views can be reused at a later point in time.
In addition, the implementation clears all altp2m entries during the process of flushing. The same applies to hostp2m entries, when it is destroyed. In this way, further domain and p2m allocations will not unintentionally reuse old p2m mappings. Signed-off-by: Sergej Proskurin <prosku...@sec.in.tum.de> --- Cc: Stefano Stabellini <sstabell...@kernel.org> Cc: Julien Grall <julien.gr...@arm.com> --- xen/arch/arm/p2m.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/p2m.h | 15 ++++++++--- 2 files changed, 78 insertions(+), 4 deletions(-) diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index 4a745fd..ae789e6 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -2110,6 +2110,73 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx) return rc; } +/* Reset this p2m table to be empty */ +static void p2m_flush_table(struct p2m_domain *p2m) +{ + struct page_info *top, *pg; + mfn_t mfn; + unsigned int i; + + /* Check whether the p2m table has already been flushed before. */ + if ( p2m->root == NULL) + return; + + spin_lock(&p2m->lock); + + /* + * "Host" p2m tables can have shared entries &c that need a bit more care + * when discarding them + */ + ASSERT(!p2m_is_hostp2m(p2m)); + + /* Zap the top level of the trie */ + top = p2m->root; + + /* Clear all concatenated first level pages */ + for ( i = 0; i < P2M_ROOT_PAGES; i++ ) + { + mfn = _mfn(page_to_mfn(top + i)); + clear_domain_page(mfn); + } + + /* Free the rest of the trie pages back to the paging pool */ + while ( (pg = page_list_remove_head(&p2m->pages)) ) + if ( pg != top ) + { + /* + * Before freeing the individual pages, we clear them to prevent + * reusing old table entries in future p2m allocations. + */ + mfn = _mfn(page_to_mfn(pg)); + clear_domain_page(mfn); + free_domheap_page(pg); + } + + page_list_add(top, &p2m->pages); + + /* Invalidate VTTBR */ + p2m->vttbr.vttbr = 0; + p2m->vttbr.vttbr_baddr = INVALID_MFN; + + spin_unlock(&p2m->lock); +} + +void p2m_flush_altp2m(struct domain *d) +{ + unsigned int i; + + altp2m_lock(d); + + for ( i = 0; i < MAX_ALTP2M; i++ ) + { + p2m_flush_table(d->arch.altp2m_p2m[i]); + flush_tlb(); + d->arch.altp2m_vttbr[i] = INVALID_MFN; + } + + altp2m_unlock(d); +} + /* * Local variables: * mode: C diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h index 8ee78e0..51d784f 100644 --- a/xen/include/asm-arm/p2m.h +++ b/xen/include/asm-arm/p2m.h @@ -132,10 +132,7 @@ void p2m_altp2m_check(struct vcpu *v, uint16_t idx) struct p2m_domain *p2m_get_altp2m(struct vcpu *v); /* Flush all the alternate p2m's for a domain */ -static inline void p2m_flush_altp2m(struct domain *d) -{ - /* Not supported on ARM. */ -} +void p2m_flush_altp2m(struct domain *d); /* Make a specific alternate p2m valid */ int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx); @@ -289,6 +286,16 @@ static inline int get_page_and_type(struct page_info *page, /* get host p2m table */ #define p2m_get_hostp2m(d) (&(d)->arch.p2m) +static inline bool_t p2m_is_hostp2m(const struct p2m_domain *p2m) +{ + return p2m->p2m_class == p2m_host; +} + +static inline bool_t p2m_is_altp2m(const struct p2m_domain *p2m) +{ + return p2m->p2m_class == p2m_alternate; +} + /* vm_event and mem_access are supported on any ARM guest */ static inline bool_t p2m_mem_access_sanity_check(struct domain *d) { -- 2.8.3 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel