Re: [PATCH v2 1/5] x86/shim: map and unmap page tables in replace_va_mapping

2020-04-14 Thread Jan Beulich
On 14.04.2020 18:53, Hongyan Xia wrote:
> On Thu, 2020-04-09 at 11:42 +0200, Jan Beulich wrote:
>> On 08.04.2020 15:36, Hongyan Xia wrote:
>>> --- a/xen/arch/x86/pv/shim.c
>>> +++ b/xen/arch/x86/pv/shim.c
>>> @@ -168,16 +168,17 @@ const struct platform_bad_page *__init
>>> pv_shim_reserved_pages(unsigned int *size
>>>  static void __init replace_va_mapping(struct domain *d,
>>> l4_pgentry_t *l4start,
>>>unsigned long va, mfn_t mfn)
>>>  {
>>> -l4_pgentry_t *pl4e = l4start + l4_table_offset(va);
>>> -l3_pgentry_t *pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(va);
>>> -l2_pgentry_t *pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(va);
>>> -l1_pgentry_t *pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
>>> +l4_pgentry_t l4e = l4start[l4_table_offset(va)];
>>> +l3_pgentry_t l3e = l3e_from_l4e(l4e, l3_table_offset(va));
>>> +l2_pgentry_t l2e = l2e_from_l3e(l3e, l2_table_offset(va));
>>> +l1_pgentry_t *pl1e = map_l1t_from_l2e(l2e) +
>>> l1_table_offset(va);
>>>  struct page_info *page = mfn_to_page(l1e_get_mfn(*pl1e));
>>>  
>>>  put_page_and_type(page);
>>>  
>>>  *pl1e = l1e_from_mfn(mfn, (!is_pv_32bit_domain(d) ? L1_PROT
>>>:
>>> COMPAT_L1_PROT));
>>> +UNMAP_DOMAIN_PAGE(pl1e);
>>>  }
>>
>> As said before, here and below I think it should be
>> unmap_domain_page().
>>
>>> --- a/xen/include/asm-x86/page.h
>>> +++ b/xen/include/asm-x86/page.h
>>> @@ -196,6 +196,19 @@ static inline l4_pgentry_t
>>> l4e_from_paddr(paddr_t pa, unsigned int flags)
>>>  #define map_l2t_from_l3e(x)(l2_pgentry_t
>>> *)map_domain_page(l3e_get_mfn(x))
>>>  #define map_l3t_from_l4e(x)(l3_pgentry_t
>>> *)map_domain_page(l4e_get_mfn(x))
>>>  
>>> +/* Unlike lYe_to_lXe(), lXe_from_lYe() do not rely on the direct
>>> map. */
>>> +#define l2e_from_l3e(l3e, offset) ({\
>>> +const l2_pgentry_t *l2t = map_l2t_from_l3e(l3e);\
>>> +l2_pgentry_t l2e = l2t[offset]; \
>>> +UNMAP_DOMAIN_PAGE(l2t); \
>>> +l2e; })
>>> +
>>> +#define l3e_from_l4e(l4e, offset) ({\
>>> +const l3_pgentry_t *l3t = map_l3t_from_l4e(l4e);\
>>> +l3_pgentry_t l3e = l3t[offset]; \
>>> +UNMAP_DOMAIN_PAGE(l3t); \
>>> +l3e; })
>>
>> I think l1e_from_l2e() should be introduced at the same time, even
>> if for now it's unused. I also think, like we do elsewhere, that
>> macro-local variables would better have _ suffixes, to avoid
>> possible variable aliasing issues.
> 
> Shall I address the comments and send a new rev now, or is this small
> series still being reviewed?

I didn't get to look at patches 2 thru 5 yet, if this (partly)
answers the question.

Jan



Re: [PATCH v2 1/5] x86/shim: map and unmap page tables in replace_va_mapping

2020-04-14 Thread Hongyan Xia
On Thu, 2020-04-09 at 11:42 +0200, Jan Beulich wrote:
> On 08.04.2020 15:36, Hongyan Xia wrote:
> > --- a/xen/arch/x86/pv/shim.c
> > +++ b/xen/arch/x86/pv/shim.c
> > @@ -168,16 +168,17 @@ const struct platform_bad_page *__init
> > pv_shim_reserved_pages(unsigned int *size
> >  static void __init replace_va_mapping(struct domain *d,
> > l4_pgentry_t *l4start,
> >unsigned long va, mfn_t mfn)
> >  {
> > -l4_pgentry_t *pl4e = l4start + l4_table_offset(va);
> > -l3_pgentry_t *pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(va);
> > -l2_pgentry_t *pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(va);
> > -l1_pgentry_t *pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
> > +l4_pgentry_t l4e = l4start[l4_table_offset(va)];
> > +l3_pgentry_t l3e = l3e_from_l4e(l4e, l3_table_offset(va));
> > +l2_pgentry_t l2e = l2e_from_l3e(l3e, l2_table_offset(va));
> > +l1_pgentry_t *pl1e = map_l1t_from_l2e(l2e) +
> > l1_table_offset(va);
> >  struct page_info *page = mfn_to_page(l1e_get_mfn(*pl1e));
> >  
> >  put_page_and_type(page);
> >  
> >  *pl1e = l1e_from_mfn(mfn, (!is_pv_32bit_domain(d) ? L1_PROT
> >:
> > COMPAT_L1_PROT));
> > +UNMAP_DOMAIN_PAGE(pl1e);
> >  }
> 
> As said before, here and below I think it should be
> unmap_domain_page().
> 
> > --- a/xen/include/asm-x86/page.h
> > +++ b/xen/include/asm-x86/page.h
> > @@ -196,6 +196,19 @@ static inline l4_pgentry_t
> > l4e_from_paddr(paddr_t pa, unsigned int flags)
> >  #define map_l2t_from_l3e(x)(l2_pgentry_t
> > *)map_domain_page(l3e_get_mfn(x))
> >  #define map_l3t_from_l4e(x)(l3_pgentry_t
> > *)map_domain_page(l4e_get_mfn(x))
> >  
> > +/* Unlike lYe_to_lXe(), lXe_from_lYe() do not rely on the direct
> > map. */
> > +#define l2e_from_l3e(l3e, offset) ({\
> > +const l2_pgentry_t *l2t = map_l2t_from_l3e(l3e);\
> > +l2_pgentry_t l2e = l2t[offset]; \
> > +UNMAP_DOMAIN_PAGE(l2t); \
> > +l2e; })
> > +
> > +#define l3e_from_l4e(l4e, offset) ({\
> > +const l3_pgentry_t *l3t = map_l3t_from_l4e(l4e);\
> > +l3_pgentry_t l3e = l3t[offset]; \
> > +UNMAP_DOMAIN_PAGE(l3t); \
> > +l3e; })
> 
> I think l1e_from_l2e() should be introduced at the same time, even
> if for now it's unused. I also think, like we do elsewhere, that
> macro-local variables would better have _ suffixes, to avoid
> possible variable aliasing issues.

Shall I address the comments and send a new rev now, or is this small
series still being reviewed?

Hongyan




Re: [PATCH v2 1/5] x86/shim: map and unmap page tables in replace_va_mapping

2020-04-09 Thread Jan Beulich
On 08.04.2020 15:36, Hongyan Xia wrote:
> --- a/xen/arch/x86/pv/shim.c
> +++ b/xen/arch/x86/pv/shim.c
> @@ -168,16 +168,17 @@ const struct platform_bad_page *__init 
> pv_shim_reserved_pages(unsigned int *size
>  static void __init replace_va_mapping(struct domain *d, l4_pgentry_t 
> *l4start,
>unsigned long va, mfn_t mfn)
>  {
> -l4_pgentry_t *pl4e = l4start + l4_table_offset(va);
> -l3_pgentry_t *pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(va);
> -l2_pgentry_t *pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(va);
> -l1_pgentry_t *pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
> +l4_pgentry_t l4e = l4start[l4_table_offset(va)];
> +l3_pgentry_t l3e = l3e_from_l4e(l4e, l3_table_offset(va));
> +l2_pgentry_t l2e = l2e_from_l3e(l3e, l2_table_offset(va));
> +l1_pgentry_t *pl1e = map_l1t_from_l2e(l2e) + l1_table_offset(va);
>  struct page_info *page = mfn_to_page(l1e_get_mfn(*pl1e));
>  
>  put_page_and_type(page);
>  
>  *pl1e = l1e_from_mfn(mfn, (!is_pv_32bit_domain(d) ? L1_PROT
>: COMPAT_L1_PROT));
> +UNMAP_DOMAIN_PAGE(pl1e);
>  }

As said before, here and below I think it should be unmap_domain_page().

> --- a/xen/include/asm-x86/page.h
> +++ b/xen/include/asm-x86/page.h
> @@ -196,6 +196,19 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, 
> unsigned int flags)
>  #define map_l2t_from_l3e(x)(l2_pgentry_t 
> *)map_domain_page(l3e_get_mfn(x))
>  #define map_l3t_from_l4e(x)(l3_pgentry_t 
> *)map_domain_page(l4e_get_mfn(x))
>  
> +/* Unlike lYe_to_lXe(), lXe_from_lYe() do not rely on the direct map. */
> +#define l2e_from_l3e(l3e, offset) ({\
> +const l2_pgentry_t *l2t = map_l2t_from_l3e(l3e);\
> +l2_pgentry_t l2e = l2t[offset]; \
> +UNMAP_DOMAIN_PAGE(l2t); \
> +l2e; })
> +
> +#define l3e_from_l4e(l4e, offset) ({\
> +const l3_pgentry_t *l3t = map_l3t_from_l4e(l4e);\
> +l3_pgentry_t l3e = l3t[offset]; \
> +UNMAP_DOMAIN_PAGE(l3t); \
> +l3e; })

I think l1e_from_l2e() should be introduced at the same time, even
if for now it's unused. I also think, like we do elsewhere, that
macro-local variables would better have _ suffixes, to avoid
possible variable aliasing issues.

Jan



[PATCH v2 1/5] x86/shim: map and unmap page tables in replace_va_mapping

2020-04-08 Thread Hongyan Xia
From: Wei Liu 

Also, introduce lYe_from_lXe() macros which do not rely on the direct
map when walking page tables. Unfortunately, they cannot be inline
functions due to the header dependency on domain_page.h, so keep them as
macros just like map_lYt_from_lXe().

Signed-off-by: Wei Liu 
Signed-off-by: Hongyan Xia 

---
Changed in v2:
- unmap as early as possible instead of unmapping all at the end.
- use lYe_from_lXe() macros and lift them from a later patch to this
  patch.
- const qualify pointers in new macros.
---
 xen/arch/x86/pv/shim.c |  9 +
 xen/include/asm-x86/page.h | 13 +
 2 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index ed2ece8a8a..28d2076065 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -168,16 +168,17 @@ const struct platform_bad_page *__init 
pv_shim_reserved_pages(unsigned int *size
 static void __init replace_va_mapping(struct domain *d, l4_pgentry_t *l4start,
   unsigned long va, mfn_t mfn)
 {
-l4_pgentry_t *pl4e = l4start + l4_table_offset(va);
-l3_pgentry_t *pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(va);
-l2_pgentry_t *pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(va);
-l1_pgentry_t *pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
+l4_pgentry_t l4e = l4start[l4_table_offset(va)];
+l3_pgentry_t l3e = l3e_from_l4e(l4e, l3_table_offset(va));
+l2_pgentry_t l2e = l2e_from_l3e(l3e, l2_table_offset(va));
+l1_pgentry_t *pl1e = map_l1t_from_l2e(l2e) + l1_table_offset(va);
 struct page_info *page = mfn_to_page(l1e_get_mfn(*pl1e));
 
 put_page_and_type(page);
 
 *pl1e = l1e_from_mfn(mfn, (!is_pv_32bit_domain(d) ? L1_PROT
   : COMPAT_L1_PROT));
+UNMAP_DOMAIN_PAGE(pl1e);
 }
 
 static void evtchn_reserve(struct domain *d, unsigned int port)
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index c98d8f5ede..1e16f3980d 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -196,6 +196,19 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, 
unsigned int flags)
 #define map_l2t_from_l3e(x)(l2_pgentry_t 
*)map_domain_page(l3e_get_mfn(x))
 #define map_l3t_from_l4e(x)(l3_pgentry_t 
*)map_domain_page(l4e_get_mfn(x))
 
+/* Unlike lYe_to_lXe(), lXe_from_lYe() do not rely on the direct map. */
+#define l2e_from_l3e(l3e, offset) ({\
+const l2_pgentry_t *l2t = map_l2t_from_l3e(l3e);\
+l2_pgentry_t l2e = l2t[offset]; \
+UNMAP_DOMAIN_PAGE(l2t); \
+l2e; })
+
+#define l3e_from_l4e(l4e, offset) ({\
+const l3_pgentry_t *l3t = map_l3t_from_l4e(l4e);\
+l3_pgentry_t l3e = l3t[offset]; \
+UNMAP_DOMAIN_PAGE(l3t); \
+l3e; })
+
 /* Given a virtual address, get an entry offset into a page table. */
 #define l1_table_offset(a) \
 (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
-- 
2.24.1.AMZN