[PATCH v9 4/8] xen: add field "flags" to cover all internal CDF_XXX

2022-07-19 Thread Penny Zheng
With more and more CDF_xxx internal flags in and to save the space, this
commit introduces a new field "flags" in struct domain to store CDF_*
internal flags directly.

Another new CDF_xxx will be introduced in the next patch.

Signed-off-by: Penny Zheng 
Acked-by: Julien Grall 
---
v9 changes:
- no change
---
v8 changes:
- no change
---
v7 changes:
- no change
---
v6 changes:
- no change
---
v5 changes:
- no change
---
v4 changes:
- no change
---
v3 changes:
- change fixed width type uint32_t to unsigned int
- change "flags" to a more descriptive name "cdf"
---
v2 changes:
- let "flags" live in the struct domain. So other arch can take
advantage of it in the future
- fix coding style
---
 xen/arch/arm/domain.c | 2 --
 xen/arch/arm/include/asm/domain.h | 3 +--
 xen/common/domain.c   | 3 +++
 xen/include/xen/sched.h   | 3 +++
 4 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 2f8eaab7b5..4722988ee7 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -709,8 +709,6 @@ int arch_domain_create(struct domain *d,
 ioreq_domain_init(d);
 #endif
 
-d->arch.directmap = flags & CDF_directmap;
-
 /* p2m_init relies on some value initialized by the IOMMU subsystem */
 if ( (rc = iommu_domain_init(d, config->iommu_opts)) != 0 )
 goto fail;
diff --git a/xen/arch/arm/include/asm/domain.h 
b/xen/arch/arm/include/asm/domain.h
index cd9ce19b4b..26a8348eed 100644
--- a/xen/arch/arm/include/asm/domain.h
+++ b/xen/arch/arm/include/asm/domain.h
@@ -29,7 +29,7 @@ enum domain_type {
 #define is_64bit_domain(d) (0)
 #endif
 
-#define is_domain_direct_mapped(d) (d)->arch.directmap
+#define is_domain_direct_mapped(d) ((d)->cdf & CDF_directmap)
 
 /*
  * Is the domain using the host memory layout?
@@ -104,7 +104,6 @@ struct arch_domain
 void *tee;
 #endif
 
-bool directmap;
 }  __cacheline_aligned;
 
 struct arch_vcpu
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 618410e3b2..7062393e37 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -567,6 +567,9 @@ struct domain *domain_create(domid_t domid,
 /* Sort out our idea of is_system_domain(). */
 d->domain_id = domid;
 
+/* Holding CDF_* internal flags. */
+d->cdf = flags;
+
 /* Debug sanity. */
 ASSERT(is_system_domain(d) ? config == NULL : config != NULL);
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index b9515eb497..98e8001c89 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -596,6 +596,9 @@ struct domain
 struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
 } ioreq_server;
 #endif
+
+/* Holding CDF_* constant. Internal flags for domain creation. */
+unsigned int cdf;
 };
 
 static inline struct page_list_head *page_to_list(
-- 
2.25.1




[PATCH v9 2/8] xen: do not free reserved memory into heap

2022-07-19 Thread Penny Zheng
Pages used as guest RAM for static domain, shall be reserved to this
domain only.
So in case reserved pages being used for other purpose, users
shall not free them back to heap, even when last ref gets dropped.

This commit introduces a new helper free_domstatic_page to free
static page in runtime, and free_staticmem_pages will be called by it
in runtime, so let's drop the __init flag.

Signed-off-by: Penny Zheng 
---
v9 changes:
- move free_domheap_page into else-condition
- considering scrubbing static pages, domain dying case and opt_scrub_domheap
both donot apply to static pages.
- as unowned static pages don't make themselves to free_domstatic_page
at the moment, remove else-condition and add ASSERT(d) at the top of the
function
---
v8 changes:
- introduce new helper free_domstatic_page
- let put_page call free_domstatic_page for static page, when last ref
drops
- #define PGC_static zero when !CONFIG_STATIC_MEMORY, as it is used
outside page_alloc.c
---
v7 changes:
- protect free_staticmem_pages with heap_lock to match its reverse function
acquire_staticmem_pages
---
v6 changes:
- adapt to PGC_static
- remove #ifdef aroud function declaration
---
v5 changes:
- In order to avoid stub functions, we #define PGC_staticmem to non-zero only
when CONFIG_STATIC_MEMORY
- use "unlikely()" around pg->count_info & PGC_staticmem
- remove pointless "if", since mark_page_free() is going to set count_info
to PGC_state_free and by consequence clear PGC_staticmem
- move #define PGC_staticmem 0 to mm.h
---
v4 changes:
- no changes
---
v3 changes:
- fix possible racy issue in free_staticmem_pages()
- introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case
- move the change to free_heap_pages() to cover other potential call sites
- fix the indentation
---
v2 changes:
- new commit
---
---
 xen/arch/arm/include/asm/mm.h |  4 +++-
 xen/arch/arm/mm.c |  5 -
 xen/common/page_alloc.c   | 37 ---
 xen/include/xen/mm.h  |  7 +--
 4 files changed, 42 insertions(+), 11 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 8b2481c1f3..f1640bbda4 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -108,9 +108,11 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap PG_shift(2)
 #define PGC_xen_heap  PG_mask(1, 2)
-  /* Page is static memory */
+#ifdef CONFIG_STATIC_MEMORY
+/* Page is static memory */
 #define _PGC_staticPG_shift(3)
 #define PGC_static PG_mask(1, 3)
+#endif
 /* ... */
 /* Page is broken? */
 #define _PGC_broken   PG_shift(7)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 009b8cd9ef..9132fb9472 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1622,7 +1622,10 @@ void put_page(struct page_info *page)
 
 if ( unlikely((nx & PGC_count_mask) == 0) )
 {
-free_domheap_page(page);
+if ( unlikely(nx & PGC_static) )
+free_domstatic_page(page);
+else
+free_domheap_page(page);
 }
 }
 
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index ed56379b96..a12622e921 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,10 +151,6 @@
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
-#ifndef PGC_static
-#define PGC_static 0
-#endif
-
 /*
  * Comma-separated list of hexadecimal page numbers containing bad bytes.
  * e.g. 'badpage=0x3f45,0x8a321'.
@@ -2636,12 +2632,14 @@ struct domain *get_pg_owner(domid_t domid)
 
 #ifdef CONFIG_STATIC_MEMORY
 /* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
-void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
- bool need_scrub)
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+  bool need_scrub)
 {
 mfn_t mfn = page_to_mfn(pg);
 unsigned long i;
 
+spin_lock(&heap_lock);
+
 for ( i = 0; i < nr_mfns; i++ )
 {
 mark_page_free(&pg[i], mfn_add(mfn, i));
@@ -2652,9 +2650,34 @@ void __init free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 scrub_one_page(pg);
 }
 
-/* In case initializing page of static memory, mark it PGC_static. */
 pg[i].count_info |= PGC_static;
 }
+
+spin_unlock(&heap_lock);
+}
+
+void free_domstatic_page(struct page_info *page)
+{
+struct domain *d = page_get_owner(page);
+bool drop_dom_ref;
+
+ASSERT(d);
+
+ASSERT_ALLOC_CONTEXT();
+
+/* NB. May recursively lock from relinquish_memory(). */
+spin_lock_recursive(&d->page_alloc_lock);
+
+arch_free_heap_page(d, page);
+
+drop_dom_ref = !domain_adjust_tot_pages(d, -1);
+
+spin_unlock_recursive(&d->page_alloc_lock);
+
+free_staticmem_pages(page, 1, scrub_debug);
+
+if ( dro

[PATCH v9 3/8] xen: do not merge reserved pages in free_heap_pages()

2022-07-19 Thread Penny Zheng
The code in free_heap_pages() will try to merge pages with the
successor/predecessor if pages are suitably aligned. So if the pages
reserved are right next to the pages given to the heap allocator,
free_heap_pages() will merge them, and give the reserved pages to heap
allocator accidently as a result.

So in order to avoid the above scenario, this commit updates free_heap_pages()
to check whether the predecessor and/or successor has PGC_reserved set,
when trying to merge the about-to-be-freed chunk with the predecessor
and/or successor.

Suggested-by: Julien Grall 
Signed-off-by: Penny Zheng 
Reviewed-by: Jan Beulich 
Reviewed-by: Julien Grall 
---
v9 changes:
- no change
---
v8 changes:
- no change
---
v7 changes:
- no change
---
v6 changes:
- adapt to PGC_static
---
v5 changes:
- change PGC_reserved to adapt to PGC_staticmem
---
v4 changes:
- no changes
---
v3 changes:
- no changes
---
v2 changes:
- new commit
---
 xen/common/page_alloc.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index a12622e921..45bd88a685 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1475,6 +1475,7 @@ static void free_heap_pages(
 /* Merge with predecessor block? */
 if ( !mfn_valid(page_to_mfn(predecessor)) ||
  !page_state_is(predecessor, free) ||
+ (predecessor->count_info & PGC_static) ||
  (PFN_ORDER(predecessor) != order) ||
  (phys_to_nid(page_to_maddr(predecessor)) != node) )
 break;
@@ -1498,6 +1499,7 @@ static void free_heap_pages(
 /* Merge with successor block? */
 if ( !mfn_valid(page_to_mfn(successor)) ||
  !page_state_is(successor, free) ||
+ (successor->count_info & PGC_static) ||
  (PFN_ORDER(successor) != order) ||
  (phys_to_nid(page_to_maddr(successor)) != node) )
 break;
-- 
2.25.1




[PATCH v9 1/8] xen/arm: rename PGC_reserved to PGC_static

2022-07-19 Thread Penny Zheng
PGC_reserved could be ambiguous, and we have to tell what the pages are
reserved for, so this commit intends to rename PGC_reserved to
PGC_static, which clearly indicates the page is reserved for static
memory.

Signed-off-by: Penny Zheng 
Acked-by: Jan Beulich 
Acked-by: Julien Grall 
---
v8 changes:
- no change
---
v7 changes:
- no change
---
v6 changes:
- rename PGC_staticmem to PGC_static
---
v5 changes:
- new commit
---
 xen/arch/arm/include/asm/mm.h |  6 +++---
 xen/common/page_alloc.c   | 22 +++---
 2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index c4bc3cd1e5..8b2481c1f3 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -108,9 +108,9 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap PG_shift(2)
 #define PGC_xen_heap  PG_mask(1, 2)
-  /* Page is reserved */
-#define _PGC_reserved PG_shift(3)
-#define PGC_reserved  PG_mask(1, 3)
+  /* Page is static memory */
+#define _PGC_staticPG_shift(3)
+#define PGC_static PG_mask(1, 3)
 /* ... */
 /* Page is broken? */
 #define _PGC_broken   PG_shift(7)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index fe0e15429a..ed56379b96 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,8 +151,8 @@
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
-#ifndef PGC_reserved
-#define PGC_reserved 0
+#ifndef PGC_static
+#define PGC_static 0
 #endif
 
 /*
@@ -2286,7 +2286,7 @@ int assign_pages(
 
 for ( i = 0; i < nr; i++ )
 {
-ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
+ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_static)));
 if ( pg[i].count_info & PGC_extra )
 extra_pages++;
 }
@@ -2346,7 +2346,7 @@ int assign_pages(
 page_set_owner(&pg[i], d);
 smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
 pg[i].count_info =
-(pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 
1;
+(pg[i].count_info & (PGC_extra | PGC_static)) | PGC_allocated | 1;
 
 page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
 }
@@ -2652,8 +2652,8 @@ void __init free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 scrub_one_page(pg);
 }
 
-/* In case initializing page of static memory, mark it PGC_reserved. */
-pg[i].count_info |= PGC_reserved;
+/* In case initializing page of static memory, mark it PGC_static. */
+pg[i].count_info |= PGC_static;
 }
 }
 
@@ -2682,8 +2682,8 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 
 for ( i = 0; i < nr_mfns; i++ )
 {
-/* The page should be reserved and not yet allocated. */
-if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
+/* The page should be static and not yet allocated. */
+if ( pg[i].count_info != (PGC_state_free | PGC_static) )
 {
 printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
@@ -2697,10 +2697,10 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 &tlbflush_timestamp);
 
 /*
- * Preserve flag PGC_reserved and change page state
+ * Preserve flag PGC_static and change page state
  * to PGC_state_inuse.
  */
-pg[i].count_info = PGC_reserved | PGC_state_inuse;
+pg[i].count_info = PGC_static | PGC_state_inuse;
 /* Initialise fields which have other uses for free pages. */
 pg[i].u.inuse.type_info = 0;
 page_set_owner(&pg[i], NULL);
@@ -2722,7 +2722,7 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 
  out_err:
 while ( i-- )
-pg[i].count_info = PGC_reserved | PGC_state_free;
+pg[i].count_info = PGC_static | PGC_state_free;
 
 spin_unlock(&heap_lock);
 
-- 
2.25.1




[PATCH v9 0/8] populate/unpopulate memory when domain on static allocation

2022-07-19 Thread Penny Zheng
Today when a domain unpopulates the memory on runtime, they will always
hand the memory over to the heap allocator. And it will be a problem if it
is a static domain.
Pages used as guest RAM for static domain shall always be reserved to this
domain only, and not be used for any other purposes, so they shall never go
back to heap allocator.

This patch serie intends to fix this issue, by adding pages on the new list
resv_page_list after having taken them off the "normal" list, when unpopulating
memory, and retrieving pages from resv page list(resv_page_list) when
populating memory.

---
v9 changes:
- move free_domheap_page into else-condition
- considering scrubbing static pages, domain dying case and opt_scrub_domheap
both do not apply to static pages.
- as unowned static pages don't make themselves to free_domstatic_page
at the moment, remove else-condition and add ASSERT(d) at the top of the
function
- remove macro helper put_static_page, and just expand its code inside
free_domstatic_page
- Use ASSERT_ALLOC_CONTEXT() in acquire_reserved_page
- Add free_staticmem_pages to undo prepare_staticmem_pages when
assign_domstatic_pages fails
- Remove redundant static in error message
---
v8 changes:
- introduce new helper free_domstatic_page
- let put_page call free_domstatic_page for static page, when last ref
drops
- #define PGC_static zero when !CONFIG_STATIC_MEMORY, as it is used
outside page_alloc.c
- #ifdef-ary around is_domain_using_staticmem() is not needed anymore
- order as a parameter is not needed here, as all staticmem operations are
limited to order-0 regions
- move d->page_alloc_lock after operation on d->resv_page_list
- As concurrent free/allocate could modify the resv_page_list, we still
need the lock
---
v7 changes:
- protect free_staticmem_pages with heap_lock to match its reverse function
acquire_staticmem_pages
- IS_ENABLED(CONFIG_STATIC_MEMORY) would not be needed anymore
- add page on the rsv_page_list *after* it has been freed
- remove the lock, since we add the page to rsv_page_list after it has
been totally freed.
---
v6 changes:
- rename PGC_staticmem to PGC_static
- remove #ifdef aroud function declaration
- use domain instead of sub-systems
- move non-zero is_domain_using_staticmem() from ARM header to common
header
- move PGC_static !CONFIG_STATIC_MEMORY definition to common header
- drop the lock before returning
---
v5 changes:
- introduce three new commits
- In order to avoid stub functions, we #define PGC_staticmem to non-zero only
when CONFIG_STATIC_MEMORY
- use "unlikely()" around pg->count_info & PGC_staticmem
- remove pointless "if", since mark_page_free() is going to set count_info
to PGC_state_free and by consequence clear PGC_staticmem
- move #define PGC_staticmem 0 to mm.h
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
- extract common codes for assigning pages into a helper assign_domstatic_pages
- refine commit message
- remove stub function acquire_reserved_page
- Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock
---
v4 changes:
- commit message refinement
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
---
v3 changes:
- fix possible racy issue in free_staticmem_pages()
- introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case
- move the change to free_heap_pages() to cover other potential call sites
- change fixed width type uint32_t to unsigned int
- change "flags" to a more descriptive name "cdf"
- change name from "is_domain_static()" to "is_domain_using_staticmem"
- have page_list_del() just once out of the if()
- remove resv_pages counter
- make arch_free_heap_page be an expression, not a compound statement.
- move #ifndef is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- let "flags" live in the struct domain. So other arch can take
advantage of it in the future
- change name from "is_domain_on_static_allocation" to "is_domain_static()"
- put reserved pages on resv_page_list after having taken them off
the "normal" list
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
- fix coding style

Penny Zheng (8):
  xen/arm: rename PGC_reserved to PGC_static
  xen: do not free reserved memory into heap
  xen: do not merge reserved pages in free_heap_pages()
  xen: ad

RE: [PATCH v8 9/9] xen: retrieve reserved pages on populate_physmap

2022-07-17 Thread Penny Zheng
Hi Jan

> -Original Message-
> From: Jan Beulich 
> Sent: Friday, July 8, 2022 9:06 PM
> To: Penny Zheng 
> Cc: Wei Chen ; Andrew Cooper
> ; George Dunlap ;
> Julien Grall ; Stefano Stabellini ;
> Wei Liu ; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v8 9/9] xen: retrieve reserved pages on
> populate_physmap
> 
> On 07.07.2022 11:22, Penny Zheng wrote:
> > --- a/xen/common/memory.c
> > +++ b/xen/common/memory.c
> > @@ -245,6 +245,29 @@ static void populate_physmap(struct memop_args
> > *a)
> >
> >  mfn = _mfn(gpfn);
> >  }
> > +else if ( is_domain_using_staticmem(d) )
> > +{
> > +/*
> > + * No easy way to guarantee the retrieved pages are 
> > contiguous,
> > + * so forbid non-zero-order requests here.
> > + */
> > +if ( a->extent_order != 0 )
> > +{
> > +gdprintk(XENLOG_WARNING,
> > + "Cannot allocate static order-%u pages for 
> > static %pd\n",
> > + a->extent_order, d);
> 
> I'm probably wrong in thinking that I did point out before that there's no 
> real
> reason to have "static" twice in the message. Or am I mistaken in my
> understanding that only static domains can ever have static pages?
> 

Sorry for omitting the comment, I'll only keep one static here.
You're right, only static domains can have static pages at the moment.

> > @@ -2818,6 +2805,55 @@ int __init acquire_domstatic_pages(struct
> > domain *d, mfn_t smfn,
> >
> >  return 0;
> >  }
> > +
> > +/*
> > + * Acquire nr_mfns contiguous pages, starting at #smfn, of static
> > +memory,
> > + * then assign them to one specific domain #d.
> > + */
> > +int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
> > +   unsigned int nr_mfns, unsigned int
> > +memflags) {
> > +struct page_info *pg;
> > +
> > +ASSERT_ALLOC_CONTEXT();
> > +
> > +pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
> > +if ( !pg )
> > +return -ENOENT;
> > +
> > +if ( assign_domstatic_pages(d, pg, nr_mfns, memflags) )
> > +return -EINVAL;
> > +
> > +return 0;
> > +}
> > +
> > +/*
> > + * Acquire a page from reserved page list(resv_page_list), when
> > +populating
> > + * memory for static domain on runtime.
> > + */
> > +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> > +{
> > +struct page_info *page;
> 
> Use ASSERT_ALLOC_CONTEXT() here as well?
> 

Sure,

> > +/* Acquire a page from reserved page list(resv_page_list). */
> > +spin_lock(&d->page_alloc_lock);
> > +page = page_list_remove_head(&d->resv_page_list);
> > +spin_unlock(&d->page_alloc_lock);
> > +if ( unlikely(!page) )
> > +return INVALID_MFN;
> > +
> > +if ( !prepare_staticmem_pages(page, 1, memflags) )
> > +goto fail;
> 
> Don't you need to undo what this did if ...
> 
> > +if ( assign_domstatic_pages(d, page, 1, memflags) )
> > +goto fail;
> 
> ... this fails?

Yes, thanks for pointing out.
free_staticmem_pages is needed to do the reversal when it fails

> 
> Jan


RE: [PATCH v8 2/9] xen: do not free reserved memory into heap

2022-07-17 Thread Penny Zheng
Hi Jan

> -Original Message-
> From: Jan Beulich 
> Sent: Friday, July 8, 2022 8:48 PM
> To: Penny Zheng 
> Cc: Wei Chen ; Stefano Stabellini
> ; Julien Grall ; Bertrand Marquis
> ; Volodymyr Babchuk
> ; Andrew Cooper
> ; George Dunlap ;
> Wei Liu ; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v8 2/9] xen: do not free reserved memory into heap
> 
> On 07.07.2022 11:22, Penny Zheng wrote:
> > --- a/xen/arch/arm/mm.c
> > +++ b/xen/arch/arm/mm.c
> > @@ -1622,6 +1622,8 @@ void put_page(struct page_info *page)
> >
> >  if ( unlikely((nx & PGC_count_mask) == 0) )
> >  {
> > +if ( unlikely(nx & PGC_static) )
> > +free_domstatic_page(page);
> >  free_domheap_page(page);
> 
> Didn't you have "else" there in the proposal you made while discussing v7?
> You also don't alter free_domheap_page() to skip static pages.
> 

Yes, "else" is needed

> > @@ -2652,9 +2650,48 @@ void __init free_staticmem_pages(struct
> page_info *pg, unsigned long nr_mfns,
> >  scrub_one_page(pg);
> >  }
> >
> > -/* In case initializing page of static memory, mark it PGC_static. 
> > */
> >  pg[i].count_info |= PGC_static;
> >  }
> > +
> > +spin_unlock(&heap_lock);
> > +}
> > +
> > +void free_domstatic_page(struct page_info *page) {
> > +struct domain *d = page_get_owner(page);
> > +bool drop_dom_ref, need_scrub;
> > +
> > +ASSERT_ALLOC_CONTEXT();
> > +
> > +if ( likely(d) )
> > +{
> > +/* NB. May recursively lock from relinquish_memory(). */
> > +spin_lock_recursive(&d->page_alloc_lock);
> > +
> > +arch_free_heap_page(d, page);
> > +
> > +/*
> > + * Normally we expect a domain to clear pages before freeing them,
> > + * if it cares about the secrecy of their contents. However, after
> > + * a domain has died we assume responsibility for erasure. We do
> > + * scrub regardless if option scrub_domheap is set.
> > + */
> > +need_scrub = d->is_dying || scrub_debug || opt_scrub_domheap;
> 
> May I suggest that instead of copying the comment you simply add one here
> referring to the other one? Otoh I'm not sure about the "dying" case: What
> happens to a domain's static pages after its death? Isn't it that they cannot
> be re-used? If so, scrubbing is pointless. And whether the other reasons to
> scrub actually apply to static pages also isn't quite clear to me.
> 

Yes, Julien also raised the same question once before while we have been 
discussing
about how to make the scrubbing static pages asynchronously.

Right now, static memory is either reserved as guest memory or as shared memory,
which both cannot be re-used, so as you said, scrubbing is pointless at the 
moment.

So here I'll only keep the scrub_debug option, as synchronously scrubbing is 
already in
free_staticmem_pages.

> > +drop_dom_ref = !domain_adjust_tot_pages(d, -1);
> > +
> > +spin_unlock_recursive(&d->page_alloc_lock);
> > +}
> > +else
> > +{
> > +drop_dom_ref = false;
> > +need_scrub = true;
> > +}
> 
> Why this "else"? I can't see any way unowned paged would make it here.
> Instead you could e.g. have another ASSERT() at the top of the function.
> 

True, true. ASSERT(d) will be added

> Jan


RE: [PATCH v5 1/8] xen/arm: introduce static shared memory

2022-07-17 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Saturday, July 16, 2022 2:10 AM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk 
> Subject: Re: [PATCH v5 1/8] xen/arm: introduce static shared memory
> 
> Hi Penny,
> 
> On 29/06/2022 09:39, Penny Zheng wrote:
> >>> +for ( i = 0; i < mem->nr_banks; i++ )
> >>> +{
> >>> +/*
> >>> + * A static shared memory region could be shared between multiple
> >>> + * domains.
> >>> + */
> >>> +if ( paddr == mem->bank[i].start && size == mem->bank[i].size )
> >>> +break;
> >
> > Maybe I need to add a check on shm-id:
> > "
> >  /*
> >   * A static shared memory region could be shared between multiple
> >   * domains.
> >   */
> >  if ( strcmp(shm_id, mem->bank[i].shm_id) == 0 )
> >  {
> >  if ( paddr == mem->bank[i].start && size == mem->bank[i].size )
> >  break;
> >  else
> >  {
> >  printk("Warning: xen,shm-id %s does not match for all the 
> > nodes
> using the same region.\n",
> > shm_id);
> >  return -EINVAL;
> >  }
> >  }
> > "
> > Wdyt?
> 
> AFAICT, this would allow to region to overlap if they have different shm ID. I
> am not entirely sure the rest of your code would work properly in this case
> (what if the owner is different).
> 
> So I think we need the following checks:
>1) The shm ID matches *and* the region exactly match
>2) The shm ID doesn't match and the region doesn't overlap with an
> existing one
> 

Understood, true, the overlap shall also be checked.
"
@@ -451,6 +453,31 @@ static int __init process_shm_node(const void *fdt, int 
node,
 return -EINVAL;
 }
 }
+else
+{
+paddr_t end = paddr + size;
+paddr_t bank_end = mem->bank[i].start + mem->bank[i].size;
+
+if ( (paddr < mem->bank[i].start && end <= mem->bank[i].start) ||
+ (paddr >= bank_end && end > bank_end) )
+{
+if ( strncmp(shm_id, mem->bank[i].shm_id,
+ MAX_SHM_ID_LENGTH) != 0 )
+break;
+else
+{
+printk("fdt: different shared memory region could not 
share the same shm ID %s\n",
+   shm_id);
+return -EINVAL;
+}
+}
+else
+{
+printk("fdt: shared memory region overlap with an existing 
entry %#"PRIpaddr" - %#"PRIpaddr"\n",
+mem->bank[i].start, bank_end);
+return -EINVAL;
+}
+}
 }

 if ( i == mem->nr_banks )
"

> Cheers,
> 
> --
> Julien Grall


RE: [PATCH v5 1/8] xen/arm: introduce static shared memory

2022-07-12 Thread Penny Zheng
Hi Julien

Before submitting the v6 patch series, I would like to double confirm that ...

> -Original Message-
> From: Julien Grall 
> Sent: Wednesday, June 29, 2022 6:18 PM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk 
> Subject: Re: [PATCH v5 1/8] xen/arm: introduce static shared memory
> 
> 
> 
> On 29/06/2022 06:38, Penny Zheng wrote:
> > Hi Julien
> 
> Hi Penny,
> 
> >
> >> -Original Message-
> >> From: Julien Grall 
> >> Sent: Saturday, June 25, 2022 1:55 AM
> >> To: Penny Zheng ; xen-
> de...@lists.xenproject.org
> >> Cc: Wei Chen ; Stefano Stabellini
> >> ; Bertrand Marquis
> >> ; Volodymyr Babchuk
> >> 
> >> Subject: Re: [PATCH v5 1/8] xen/arm: introduce static shared memory
> >>
> >> Hi Penny,
> >>
> >> On 20/06/2022 06:11, Penny Zheng wrote:
> >>> From: Penny Zheng 
> >>>
> >>> This patch serie introduces a new feature: setting up static
> >>
> >> Typo: s/serie/series/
> >>
> >>> shared memory on a dom0less system, through device tree configuration.
> >>>
> >>> This commit parses shared memory node at boot-time, and reserve it
> >>> in bootinfo.reserved_mem to avoid other use.
> >>>
> >>> This commits proposes a new Kconfig CONFIG_STATIC_SHM to wrap
> >>> static-shm-related codes, and this option depends on static memory(
> >>> CONFIG_STATIC_MEMORY). That's because that later we want to reuse a
> >>> few helpers, guarded with CONFIG_STATIC_MEMORY, like
> >>> acquire_staticmem_pages, etc, on static shared memory.
> >>>
> >>> Signed-off-by: Penny Zheng 
> >>> Reviewed-by: Stefano Stabellini 
> >>> ---
> >>> v5 change:
> >>> - no change
> >>> ---
> >>> v4 change:
> >>> - nit fix on doc
> >>> ---
> >>> v3 change:
> >>> - make nr_shm_domain unsigned int
> >>> ---
> >>> v2 change:
> >>> - document refinement
> >>> - remove bitmap and use the iteration to check
> >>> - add a new field nr_shm_domain to keep the number of shared domain
> >>> ---
> >>>docs/misc/arm/device-tree/booting.txt | 120
> >> ++
> >>>xen/arch/arm/Kconfig  |   6 ++
> >>>xen/arch/arm/bootfdt.c|  68 +++
> >>>xen/arch/arm/include/asm/setup.h  |   3 +
> >>>4 files changed, 197 insertions(+)
> >>>
> >>> diff --git a/docs/misc/arm/device-tree/booting.txt
> >>> b/docs/misc/arm/device-tree/booting.txt
> >>> index 98253414b8..6467bc5a28 100644
> >>> --- a/docs/misc/arm/device-tree/booting.txt
> >>> +++ b/docs/misc/arm/device-tree/booting.txt
> >>> @@ -378,3 +378,123 @@ device-tree:
> >>>
> >>>This will reserve a 512MB region starting at the host physical address
> >>>0x3000 to be exclusively used by DomU1.
> >>> +
> >>> +Static Shared Memory
> >>> +
> >>> +
> >>> +The static shared memory device tree nodes allow users to
> >>> +statically set up shared memory on dom0less system, enabling
> >>> +domains to do shm-based communication.
> >>> +
> >>> +- compatible
> >>> +
> >>> +"xen,domain-shared-memory-v1"
> >>> +
> >>> +- xen,shm-id
> >>> +
> >>> +An 8-bit integer that represents the unique identifier of the
> >>> + shared
> >> memory
> >>> +region. The maximum identifier shall be "xen,shm-id = <0xff>".
> >>> +
> >>> +- xen,shared-mem
> >>> +
> >>> +An array takes a physical address, which is the base address of the
> >>> +shared memory region in host physical address space, a size,
> >>> + and a
> >> guest
> >>> +physical address, as the target address of the mapping. The
> >>> + number of
> >> cells
> >>> +for the host address (and size) is the same as the guest pseudo-
> physical
> >>> +address and they are inherited from the parent node.
> >>
> >> Sorry for jump in the discussion late. But as this is going to be a
> >> stable

RE: [PATCH 2/2] automation: arm64: Create a test job for testing static allocation on qemu

2022-07-11 Thread Penny Zheng
Hi Xenia

> -Original Message-
> From: Xenia Ragiadakou 
> Sent: Monday, July 11, 2022 11:29 PM
> To: Penny Zheng ; Stefano Stabellini
> ; Julien Grall 
> Cc: xen-devel@lists.xenproject.org; Doug Goldstein 
> Subject: Re: [PATCH 2/2] automation: arm64: Create a test job for testing
> static allocation on qemu
> 
> Hi Penny,
> 
> On 11/7/22 12:02, Penny Zheng wrote:
> > Hi Xenia
> >
> >> -Original Message-
> >> From: Xen-devel  On Behalf Of
> >> Xenia Ragiadakou
> >> Sent: Friday, July 8, 2022 5:54 PM
> >> To: Stefano Stabellini ; Julien Grall
> >> 
> >> Cc: xen-devel@lists.xenproject.org; Doug Goldstein
> >> 
> >> Subject: Re: [PATCH 2/2] automation: arm64: Create a test job for
> >> testing static allocation on qemu
> >>
> >> Hi Stefano,
> >>
> >> On 7/8/22 02:05, Stefano Stabellini wrote:
> >>> On Thu, 7 Jul 2022, Julien Grall wrote:
> >>>> Hi Xenia,
> >>>>
> >>>> On 07/07/2022 21:38, Xenia Ragiadakou wrote:
> >>>>> Add an arm subdirectory under automation/configs for the arm
> >>>>> specific configs and add a config that enables static allocation.
> >>>>>
> >>>>> Modify the build script to search for configs also in this
> >>>>> subdirectory and to keep the generated xen binary, suffixed with
> >>>>> the config file name, as artifact.
> >>>>>
> >>>>> Create a test job that
> >>>>> - boots xen on qemu with a single direct mapped dom0less guest
> >>>>> configured with statically allocated memory
> >
> > Although you said booting a single direct mapped dom0less guest
> > configured with statically allocated memory here, later in code, you
> > are only enabling statically allocated memory in the ImageBuilder
> > script, missing the direct-map property.
> >
> >>>>> - verifies that the memory ranges reported in the guest's logs are
> >>>>> the same with the provided static memory regions
> >>>>>
> >>>>> For guest kernel, use the 5.9.9 kernel from the tests-artifacts
> containers.
> >>>>> Use busybox-static package, to create the guest ramdisk.
> >>>>> To generate the u-boot script, use ImageBuilder.
> >>>>> Use the qemu from the tests-artifacts containers.
> >>>>>
> >>>>> Signed-off-by: Xenia Ragiadakou 
> >>>>> ---
> >>>>> automation/configs/arm/static_mem  |   3 +
> >>>>> automation/gitlab-ci/test.yaml |  24 +
> >>>>> automation/scripts/build   |   4 +
> >>>>> automation/scripts/qemu-staticmem-arm64.sh | 114
> >> +
> >>>>> 4 files changed, 145 insertions(+)
> >>>>> create mode 100644 automation/configs/arm/static_mem
> >>>>> create mode 100755 automation/scripts/qemu-staticmem-arm64.sh
> >>>>>
> >>>>> diff --git a/automation/configs/arm/static_mem
> >>>>> b/automation/configs/arm/static_mem
> >>>>> new file mode 100644
> >>>>> index 00..84675ddf4e
> >>>>> --- /dev/null
> >>>>> +++ b/automation/configs/arm/static_mem
> >>>>> @@ -0,0 +1,3 @@
> >>>>> +CONFIG_EXPERT=y
> >>>>> +CONFIG_UNSUPPORTED=y
> >>>>> +CONFIG_STATIC_MEMORY=y
> >>>>> \ No newline at end of file
> >>>>
> >>>> Any particular reason to build a new Xen rather enable
> >>>> CONFIG_STATIC_MEMORY in the existing build?
> >>>>
> >>>>> diff --git a/automation/scripts/build b/automation/scripts/build
> >>>>> index 21b3bc57c8..9c6196d9bd 100755
> >>>>> --- a/automation/scripts/build
> >>>>> +++ b/automation/scripts/build
> >>>>> @@ -83,6 +83,7 @@ fi
> >>>>> # Build all the configs we care about
> >>>>> case ${XEN_TARGET_ARCH} in
> >>>>> x86_64) arch=x86 ;;
> >>>>> +arm64) arch=arm ;;
> >>>>> *) exit 0 ;;
> >>>>> esac
> >>>>> @@ -93,4 +94,7 @@ for cfg in `ls ${cfg_dir}`; do
> >>>>> rm -f xen/.config
> >>>>> m

RE: [PATCH 2/2] automation: arm64: Create a test job for testing static allocation on qemu

2022-07-11 Thread Penny Zheng
below, it seems to be referring to the domain memory.
> >> If so, I would suggest to comment and rename to "domu_{base, size}".
> >>
> >>> +
> >>> +set -ex
> >>> +
> >>> +apt-get -qy update
> >>> +apt-get -qy install --no-install-recommends u-boot-qemu \
> >>> +u-boot-tools \
> >>> +device-tree-compiler \
> >>> +cpio \
> >>> +curl \
> >>> +busybox-static
> >>> +
> >>> +# DomU Busybox
> >>> +cd binaries
> >>> +mkdir -p initrd
> >>> +mkdir -p initrd/bin
> >>> +mkdir -p initrd/sbin
> >>> +mkdir -p initrd/etc
> >>> +mkdir -p initrd/dev
> >>> +mkdir -p initrd/proc
> >>> +mkdir -p initrd/sys
> >>> +mkdir -p initrd/lib
> >>> +mkdir -p initrd/var
> >>> +mkdir -p initrd/mnt
> >>> +cp /bin/busybox initrd/bin/busybox
> >>> +initrd/bin/busybox --install initrd/bin echo "#!/bin/sh
> >>> +
> >>> +mount -t proc proc /proc
> >>> +mount -t sysfs sysfs /sys
> >>> +mount -t devtmpfs devtmpfs /dev
> >>> +/bin/sh" > initrd/init
> >>> +chmod +x initrd/init
> >>> +cd initrd
> >>> +find . | cpio --create --format='newc' | gzip > ../initrd.cpio.gz
> >>> +cd ../..
> >>> +
> >>> +# XXX QEMU looks for "efi-virtio.rom" even if it is unneeded curl
> >>> +-fsSLO
> >>> +https://github.com/qemu/qemu/raw/v5.2.0/pc-bios/efi-virtio.rom
> >>> +
> >>> +./binaries/qemu-system-aarch64 -nographic \
> >>> +-M virtualization=true \
> >>> +-M virt \
> >>> +-M virt,gic-version=2 \
> >>> +-cpu cortex-a57 \
> >>> +-smp 2 \
> >>> +-m 8G \
> >>> +-M dumpdtb=binaries/virt-gicv2.dtb
> >>> +
> >>> +#dtc -I dtb -O dts binaries/virt-gicv2.dtb >
> >>> +binaries/virt-gicv2.dts
> >>> +
> >>> +# ImageBuilder
> >>> +rm -rf imagebuilder
> >>> +git clone https://gitlab.com/ViryaOS/imagebuilder
> >>> +
> >>> +echo "MEMORY_START=\"0x4000\"
> >>> +MEMORY_END=\"0x02\"
> >>> +
> >>> +DEVICE_TREE=\"virt-gicv2.dtb\"
> >>> +
> >>> +XEN=\"xen-static_mem\"
> >>> +XEN_CMD=\"console=dtuart earlyprintk xsm=dummy\"
> >>
> >> AFAIK, earlyprintk is not an option for Xen on Arm (at least). It is
> >> also not clear why you need to pass xsm=dummy.
> >>
> >>> +
> >>> +NUM_DOMUS=1
> >>> +DOMU_MEM[0]=512
> >>> +DOMU_VCPUS[0]=1
> >>> +DOMU_KERNEL[0]=\"Image\"
> >>> +DOMU_RAMDISK[0]=\"initrd.cpio.gz\"
> >>> +DOMU_CMD[0]=\"earlyprintk console=ttyAMA0\"
> >>> +DOMU_STATIC_MEM[0]=\"${base[0]} ${size[0]} ${base[1]} ${size[1]}\"
> >>> +

You would like to add  DOMU_DIRECT_MAP[0] = 1 to enable direct-map.

> >>> +UBOOT_SOURCE=\"boot.source\"
> >>> +UBOOT_SCRIPT=\"boot.scr\"" > binaries/imagebuilder_config
> >>> +
> >>> +bash imagebuilder/scripts/uboot-script-gen -t tftp -d binaries/ -c
> >>> binaries/imagebuilder_config
> >>> +
> >>> +# Run the test
> >>> +rm -f qemu-staticmem.serial
> >>> +set +e
> >>> +echo "  virtio scan; dhcp; tftpb 0x4000 boot.scr; source
> >>> +0x4000"| \ timeout -k 1 60 ./binaries/qemu-system-aarch64 -
> nographic \
> >>> +-M virtualization=true \
> >>> +-M virt \
> >>> +-M virt,gic-version=2 \
> >>> +-cpu cortex-a57 \
> >>> +-smp 2 \
> >>> +-m 8G \
> >>> +-no-reboot \
> >>> +-device virtio-net-pci,netdev=vnet0 -netdev
> >>> +user,id=vnet0,tftp=binaries
> >>> \
> >>> +-bios /usr/lib/u-boot/qemu_arm64/u-boot.bin \
> >>> +-dtb ./binaries/virt-gicv2.dtb \
> >>> +|& tee qemu-staticmem.serial
> >>> +
> >>> +set -e
> >>
> >> A lot of the code above is duplicated from qemu-smoke-arm64.sh. I
> >> think it would be better to consolidate in a single script. Looking
> >> briefly throught the existing scripts, it looks like it is possible
> >> to pass arguments (see qemu-smoke-x86-64.sh).
> >
> > One idea would be to make the script common and "source" a second
> > script or config file with just the ImageBuilder configuration because
> > it looks like it is the only thing different.
> >
> >
> >>> +
> >>> +(grep -q "Xen dom0less mode detected" qemu-staticmem.serial) ||
> >>> +exit 1
> >>> +
> >>> +for ((i=0; i<${#base[@]}; i++))
> >>> +do
> >>> +start="$(printf "0x%016x" ${base[$i]})"
> >>> +end="$(printf "0x%016x" $((${base[$i]} + ${size[$i]} - 1)))"
> >>> +grep -q "node   0: \[mem ${start}-${end}\]" qemu-staticmem.serial
> >>> +if test $? -eq 1
> >>> +then
> >>> +exit 1
> >>> +fi
> >>> +done
> >>
> >> Please add a comment on top to explain what this is meant to do.
> >> However, I think we should avoid relying on output that we have
> >> written ourself. IOW, relying on Xen/Linux to always write the same
> >> message is risky because they can change at any time.
> >
> > Especially if we make the script common, then we could just rely on
> > the existing check to see if the guest started correctly (no special
> > check for static memory).
> 
> In this case, how the test will verify that the static memory configuration 
> has
> been taken into account and has not just been ignored?
> 

If only statically allocated memory is enabled, guest memory address will still 
be mapped
to GUEST_RAM_BASE(0x4000)

> >>> +
> >>> +(grep -q "BusyBox" qemu-staticmem.serial) || exit 1
> >
> 
> --
> Xenia

---
Cheers,
Penny Zheng






RE: [PATCH v5 7/8] xen/arm: create shared memory nodes in guest device tree

2022-07-11 Thread Penny Zheng
Hi Stefano

> -Original Message-
> From: Stefano Stabellini 
> Sent: Saturday, July 9, 2022 12:41 AM
> To: Penny Zheng 
> Cc: Stefano Stabellini ; jul...@xen.org; xen-
> de...@lists.xenproject.org; Wei Chen ; Bertrand
> Marquis ; Volodymyr Babchuk
> 
> Subject: RE: [PATCH v5 7/8] xen/arm: create shared memory nodes in guest
> device tree
> 
> On Thu, 7 Jul 2022, Penny Zheng wrote:
> > Hi Stefano and julien
> >
> > > -Original Message-
> > > From: Stefano Stabellini 
> > > Sent: Thursday, July 7, 2022 7:53 AM
> > > To: Penny Zheng 
> > > Cc: Stefano Stabellini ; Julien Grall
> > > ; xen-devel@lists.xenproject.org; Wei Chen
> > > ; Bertrand Marquis
> ;
> > > Volodymyr Babchuk 
> > > Subject: RE: [PATCH v5 7/8] xen/arm: create shared memory nodes in
> > > guest device tree
> > >
> > > On Mon, 4 Jul 2022, Penny Zheng wrote:
> > > > Hi Stefano and Julien
> > > >
> > > > > -Original Message-
> > > > > From: Stefano Stabellini 
> > > > > Sent: Saturday, June 25, 2022 5:57 AM
> > > > > To: Julien Grall 
> > > > > Cc: Penny Zheng ;
> > > > > xen-devel@lists.xenproject.org; Wei Chen ;
> > > Stefano
> > > > > Stabellini ; Bertrand Marquis
> > > > > ; Volodymyr Babchuk
> > > > > 
> > > > > Subject: Re: [PATCH v5 7/8] xen/arm: create shared memory nodes
> > > > > in guest device tree
> > > > >
> > > > > On Fri, 24 Jun 2022, Julien Grall wrote:
> > > > > > On 20/06/2022 06:11, Penny Zheng wrote:
> > > > > > > We expose the shared memory to the domU using the
> > > > > > > "xen,shared-
> > > > > memory-v1"
> > > > > > > reserved-memory binding. See
> > > > > > > Documentation/devicetree/bindings/reserved-
> memory/xen,shared
> > > > > > > -
> > > > > memory.
> > > > > > > txt in Linux for the corresponding device tree binding.
> > > > > > >
> > > > > > > To save the cost of re-parsing shared memory device tree
> > > > > > > configuration when creating shared memory nodes in guest
> > > > > > > device tree, this commit adds new field "shm_mem" to store
> > > > > > > shm-info per domain.
> > > > > > >
> > > > > > > For each shared memory region, a range is exposed under the
> > > > > > > /reserved-memory node as a child node. Each range sub-node
> > > > > > > is named xen-shmem@ and has the following
> properties:
> > > > > > > - compatible:
> > > > > > >  compatible = "xen,shared-memory-v1"
> > > > > > > - reg:
> > > > > > >  the base guest physical address and size of the
> > > > > > > shared memory region
> > > > > > > - xen,id:
> > > > > > >  a string that identifies the shared memory region.
> > > > > > >
> > > > > > > Signed-off-by: Penny Zheng 
> > > > > > > Reviewed-by: Stefano Stabellini 
> > > > > > > ---
> > > > > > > v5 change:
> > > > > > > - no change
> > > > > > > ---
> > > > > > > v4 change:
> > > > > > > - no change
> > > > > > > ---
> > > > > > > v3 change:
> > > > > > > - move field "shm_mem" to kernel_info
> > > > > > > ---
> > > > > > > v2 change:
> > > > > > > - using xzalloc
> > > > > > > - shm_id should be uint8_t
> > > > > > > - make reg a local variable
> > > > > > > - add #address-cells and #size-cells properties
> > > > > > > - fix alignment
> > > > > > > ---
> > > > > > >   xen/arch/arm/domain_build.c   | 143
> > > > > +-
> > > > > > >   xen/arch/arm/include/asm/kernel.h |   1 +
> > > > > > >   xen/arch/arm/include/asm/setup.h  |   1 +
> > > > > > >   3 files changed, 143 insertions(+), 2 deletions(-)
> > > > > > >
> > > > > > > di

[PATCH v8 8/9] xen: introduce prepare_staticmem_pages

2022-07-07 Thread Penny Zheng
Later, we want to use acquire_domstatic_pages() for populating memory
for static domain on runtime, however, there are a lot of pointless work
(checking mfn_valid(), scrubbing the free part, cleaning the cache...)
considering we know the page is valid and belong to the guest.

This commit splits acquire_staticmem_pages() in two parts, and
introduces prepare_staticmem_pages to bypass all "pointless work".

Signed-off-by: Penny Zheng 
Acked-by: Jan Beulich 
---
v8 changes:
- no change
---
v7 changes:
- no change
---
v6 changes:
- adapt to PGC_static
---
v5 changes:
- new commit
---
 xen/common/page_alloc.c | 61 -
 1 file changed, 36 insertions(+), 25 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index b01272a59a..6112f6a3ed 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2702,26 +2702,13 @@ void free_domstatic_page(struct page_info *page)
 put_domain(d);
 }
 
-/*
- * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
- * static memory.
- * This function needs to be reworked if used outside of boot.
- */
-static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
- unsigned long nr_mfns,
- unsigned int memflags)
+static bool __init prepare_staticmem_pages(struct page_info *pg,
+   unsigned long nr_mfns,
+   unsigned int memflags)
 {
 bool need_tlbflush = false;
 uint32_t tlbflush_timestamp = 0;
 unsigned long i;
-struct page_info *pg;
-
-ASSERT(nr_mfns);
-for ( i = 0; i < nr_mfns; i++ )
-if ( !mfn_valid(mfn_add(smfn, i)) )
-return NULL;
-
-pg = mfn_to_page(smfn);
 
 spin_lock(&heap_lock);
 
@@ -2732,7 +2719,7 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 {
 printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
-   i, mfn_x(smfn) + i,
+   i, mfn_x(page_to_mfn(pg)) + i,
pg[i].count_info, pg[i].tlbflush_timestamp);
 goto out_err;
 }
@@ -2756,6 +2743,38 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 if ( need_tlbflush )
 filtered_flush_tlb_mask(tlbflush_timestamp);
 
+return true;
+
+ out_err:
+while ( i-- )
+pg[i].count_info = PGC_static | PGC_state_free;
+
+spin_unlock(&heap_lock);
+
+return false;
+}
+
+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ * This function needs to be reworked if used outside of boot.
+ */
+static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
+ unsigned long nr_mfns,
+ unsigned int memflags)
+{
+unsigned long i;
+struct page_info *pg;
+
+ASSERT(nr_mfns);
+for ( i = 0; i < nr_mfns; i++ )
+if ( !mfn_valid(mfn_add(smfn, i)) )
+return NULL;
+
+pg = mfn_to_page(smfn);
+if ( !prepare_staticmem_pages(pg, nr_mfns, memflags) )
+return NULL;
+
 /*
  * Ensure cache and RAM are consistent for platforms where the guest
  * can control its own visibility of/through the cache.
@@ -2764,14 +2783,6 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
 
 return pg;
-
- out_err:
-while ( i-- )
-pg[i].count_info = PGC_static | PGC_state_free;
-
-spin_unlock(&heap_lock);
-
-return NULL;
 }
 
 /*
-- 
2.25.1




[PATCH v8 9/9] xen: retrieve reserved pages on populate_physmap

2022-07-07 Thread Penny Zheng
When a static domain populates memory through populate_physmap at runtime,
it shall retrieve reserved pages from resv_page_list to make sure that
guest RAM is still restricted in statically configured memory regions.
This commit also introduces a new helper acquire_reserved_page to make it work.

Signed-off-by: Penny Zheng 
---
v8 changes:
- As concurrent free/allocate could modify the resv_page_list, we still
need the lock
---
v7 changes:
- remove the lock, since we add the page to rsv_page_list after it has
been totally freed.
---
v6 changes:
- drop the lock before returning
---
v5 changes:
- extract common codes for assigning pages into a helper assign_domstatic_pages
- refine commit message
- remove stub function acquire_reserved_page
- Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock
---
v4 changes:
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
---
v3 changes:
- move is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
---
 xen/common/memory.c | 23 ++
 xen/common/page_alloc.c | 70 +++--
 xen/include/xen/mm.h|  1 +
 3 files changed, 77 insertions(+), 17 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index f2d009843a..cb330ce877 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -245,6 +245,29 @@ static void populate_physmap(struct memop_args *a)
 
 mfn = _mfn(gpfn);
 }
+else if ( is_domain_using_staticmem(d) )
+{
+/*
+ * No easy way to guarantee the retrieved pages are contiguous,
+ * so forbid non-zero-order requests here.
+ */
+if ( a->extent_order != 0 )
+{
+gdprintk(XENLOG_WARNING,
+ "Cannot allocate static order-%u pages for static 
%pd\n",
+ a->extent_order, d);
+goto out;
+}
+
+mfn = acquire_reserved_page(d, a->memflags);
+if ( mfn_eq(mfn, INVALID_MFN) )
+{
+gdprintk(XENLOG_WARNING,
+ "%pd: failed to retrieve a reserved page\n",
+ d);
+goto out;
+}
+}
 else
 {
 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 6112f6a3ed..390a9c002d 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2702,9 +2702,8 @@ void free_domstatic_page(struct page_info *page)
 put_domain(d);
 }
 
-static bool __init prepare_staticmem_pages(struct page_info *pg,
-   unsigned long nr_mfns,
-   unsigned int memflags)
+static bool prepare_staticmem_pages(struct page_info *pg, unsigned long 
nr_mfns,
+unsigned int memflags)
 {
 bool need_tlbflush = false;
 uint32_t tlbflush_timestamp = 0;
@@ -2785,21 +2784,9 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 return pg;
 }
 
-/*
- * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
- * then assign them to one specific domain #d.
- */
-int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
-   unsigned int nr_mfns, unsigned int memflags)
+static int assign_domstatic_pages(struct domain *d, struct page_info *pg,
+  unsigned int nr_mfns, unsigned int memflags)
 {
-struct page_info *pg;
-
-ASSERT_ALLOC_CONTEXT();
-
-pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
-if ( !pg )
-return -ENOENT;
-
 if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
 {
 /*
@@ -2818,6 +2805,55 @@ int __init acquire_domstatic_pages(struct domain *d, 
mfn_t smfn,
 
 return 0;
 }
+
+/*
+ * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ */
+int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
+   unsigned int nr_mfns, unsigned int memflags)
+{
+struct page_info *pg;
+
+ASSERT_ALLOC_CONTEXT();
+
+pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
+if ( !pg )
+return -ENOENT;
+
+if 

[PATCH v8 6/9] xen/arm: introduce CDF_staticmem

2022-07-07 Thread Penny Zheng
In order to have an easy and quick way to find out whether this domain memory
is statically configured, this commit introduces a new flag CDF_staticmem and a
new helper is_domain_using_staticmem() to tell.

Signed-off-by: Penny Zheng 
---
v8 changes:
- #ifdef-ary around is_domain_using_staticmem() is not needed anymore
---
v7 changes:
- IS_ENABLED(CONFIG_STATIC_MEMORY) would not be needed anymore
---
v6 changes:
- move non-zero is_domain_using_staticmem() from ARM header to common
header
---
v5 changes:
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
---
v4 changes:
- no changes
---
v3 changes:
- change name from "is_domain_static()" to "is_domain_using_staticmem"
---
v2 changes:
- change name from "is_domain_on_static_allocation" to "is_domain_static()"
---
 xen/arch/arm/domain_build.c | 5 -
 xen/include/xen/domain.h| 8 
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 3fd1186b53..b76a84e8f5 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -3287,9 +3287,12 @@ void __init create_domUs(void)
 if ( !dt_device_is_compatible(node, "xen,domain") )
 continue;
 
+if ( dt_find_property(node, "xen,static-mem", NULL) )
+flags |= CDF_staticmem;
+
 if ( dt_property_read_bool(node, "direct-map") )
 {
-if ( !IS_ENABLED(CONFIG_STATIC_MEMORY) || !dt_find_property(node, 
"xen,static-mem", NULL) )
+if ( !(flags & CDF_staticmem) )
 panic("direct-map is not valid for domain %s without static 
allocation.\n",
   dt_node_name(node));
 
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 628b14b086..2c8116afba 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -35,6 +35,14 @@ void arch_get_domain_info(const struct domain *d,
 /* Should domain memory be directly mapped? */
 #define CDF_directmap(1U << 1)
 #endif
+/* Is domain memory on static allocation? */
+#ifdef CONFIG_STATIC_MEMORY
+#define CDF_staticmem(1U << 2)
+#else
+#define CDF_staticmem0
+#endif
+
+#define is_domain_using_staticmem(d) ((d)->cdf & CDF_staticmem)
 
 /*
  * Arch-specifics.
-- 
2.25.1




[PATCH v8 7/9] xen/arm: unpopulate memory when domain is static

2022-07-07 Thread Penny Zheng
Today when a domain unpopulates the memory on runtime, they will always
hand the memory back to the heap allocator. And it will be a problem if domain
is static.

Pages as guest RAM for static domain shall be reserved to only this domain
and not be used for any other purposes, so they shall never go back to heap
allocator.

This commit puts reserved pages on the new list resv_page_list only after
having taken them off the "normal" list, when the last ref dropped.

Signed-off-by: Penny Zheng 
---
v8 changes:
- adapt this patch for newly introduced free_domstatic_page
- order as a parameter is not needed here, as all staticmem operations are
limited to order-0 regions
- move d->page_alloc_lock after operation on d->resv_page_list
---
v7 changes:
- Add page on the rsv_page_list *after* it has been freed
---
v6 changes:
- refine in-code comment
- move PGC_static !CONFIG_STATIC_MEMORY definition to common header
---
v5 changes:
- adapt this patch for PGC_staticmem
---
v4 changes:
- no changes
---
v3 changes:
- have page_list_del() just once out of the if()
- remove resv_pages counter
- make arch_free_heap_page be an expression, not a compound statement.
---
v2 changes:
- put reserved pages on resv_page_list after having taken them off
the "normal" list
---
 xen/common/domain.c |  4 
 xen/common/page_alloc.c | 10 --
 xen/include/xen/mm.h|  6 ++
 xen/include/xen/sched.h |  3 +++
 4 files changed, 21 insertions(+), 2 deletions(-)

diff --git a/xen/common/domain.c b/xen/common/domain.c
index 875730df50..4043498ffa 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -604,6 +604,10 @@ struct domain *domain_create(domid_t domid,
 INIT_PAGE_LIST_HEAD(&d->page_list);
 INIT_PAGE_LIST_HEAD(&d->extra_page_list);
 INIT_PAGE_LIST_HEAD(&d->xenpage_list);
+#ifdef CONFIG_STATIC_MEMORY
+INIT_PAGE_LIST_HEAD(&d->resv_page_list);
+#endif
+
 
 spin_lock_init(&d->node_affinity_lock);
 d->node_affinity = NODE_MASK_ALL;
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 3260490688..b01272a59a 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2681,8 +2681,6 @@ void free_domstatic_page(struct page_info *page)
 need_scrub = d->is_dying || scrub_debug || opt_scrub_domheap;
 
 drop_dom_ref = !domain_adjust_tot_pages(d, -1);
-
-spin_unlock_recursive(&d->page_alloc_lock);
 }
 else
 {
@@ -2692,6 +2690,14 @@ void free_domstatic_page(struct page_info *page)
 
 free_staticmem_pages(page, 1, need_scrub);
 
+if ( likely(d) )
+{
+/* Add page on the resv_page_list *after* it has been freed. */
+if ( !drop_dom_ref )
+put_static_page(d, page);
+spin_unlock_recursive(&d->page_alloc_lock);
+}
+
 if ( drop_dom_ref )
 put_domain(d);
 }
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index f1a7d5c991..07b8a45f1a 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -91,6 +91,12 @@ void free_staticmem_pages(struct page_info *pg, unsigned 
long nr_mfns,
 void free_domstatic_page(struct page_info *page);
 int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
 unsigned int memflags);
+#ifdef CONFIG_STATIC_MEMORY
+#define put_static_page(d, page) \
+page_list_add_tail((page), &(d)->resv_page_list)
+#else
+#define put_static_page(d, page) ((void)(d), (void)(page))
+#endif
 
 /* Map machine page range in Xen virtual address space. */
 int map_pages_to_xen(
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 98e8001c89..d4fbd3dea7 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -381,6 +381,9 @@ struct domain
 struct page_list_head page_list;  /* linked list */
 struct page_list_head extra_page_list; /* linked list (size extra_pages) */
 struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
+#ifdef CONFIG_STATIC_MEMORY
+struct page_list_head resv_page_list; /* linked list */
+#endif
 
 /*
  * This field should only be directly accessed by domain_adjust_tot_pages()
-- 
2.25.1




[PATCH v8 5/9] xen: add field "flags" to cover all internal CDF_XXX

2022-07-07 Thread Penny Zheng
With more and more CDF_xxx internal flags in and to save the space, this
commit introduces a new field "flags" in struct domain to store CDF_*
internal flags directly.

Another new CDF_xxx will be introduced in the next patch.

Signed-off-by: Penny Zheng 
Acked-by: Julien Grall 
---
v8 changes:
- no change
---
v7 changes:
- no change
---
v6 changes:
- no change
---
v5 changes:
- no change
---
v4 changes:
- no change
---
v3 changes:
- change fixed width type uint32_t to unsigned int
- change "flags" to a more descriptive name "cdf"
---
v2 changes:
- let "flags" live in the struct domain. So other arch can take
advantage of it in the future
- fix coding style
---
 xen/arch/arm/domain.c | 2 --
 xen/arch/arm/include/asm/domain.h | 3 +--
 xen/common/domain.c   | 3 +++
 xen/include/xen/sched.h   | 3 +++
 4 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 2f8eaab7b5..4722988ee7 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -709,8 +709,6 @@ int arch_domain_create(struct domain *d,
 ioreq_domain_init(d);
 #endif
 
-d->arch.directmap = flags & CDF_directmap;
-
 /* p2m_init relies on some value initialized by the IOMMU subsystem */
 if ( (rc = iommu_domain_init(d, config->iommu_opts)) != 0 )
 goto fail;
diff --git a/xen/arch/arm/include/asm/domain.h 
b/xen/arch/arm/include/asm/domain.h
index ed63c2b6f9..fe7a029ebf 100644
--- a/xen/arch/arm/include/asm/domain.h
+++ b/xen/arch/arm/include/asm/domain.h
@@ -29,7 +29,7 @@ enum domain_type {
 #define is_64bit_domain(d) (0)
 #endif
 
-#define is_domain_direct_mapped(d) (d)->arch.directmap
+#define is_domain_direct_mapped(d) ((d)->cdf & CDF_directmap)
 
 /*
  * Is the domain using the host memory layout?
@@ -103,7 +103,6 @@ struct arch_domain
 void *tee;
 #endif
 
-bool directmap;
 }  __cacheline_aligned;
 
 struct arch_vcpu
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 3b1169d79b..875730df50 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -567,6 +567,9 @@ struct domain *domain_create(domid_t domid,
 /* Sort out our idea of is_system_domain(). */
 d->domain_id = domid;
 
+/* Holding CDF_* internal flags. */
+d->cdf = flags;
+
 /* Debug sanity. */
 ASSERT(is_system_domain(d) ? config == NULL : config != NULL);
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index b9515eb497..98e8001c89 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -596,6 +596,9 @@ struct domain
 struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
 } ioreq_server;
 #endif
+
+/* Holding CDF_* constant. Internal flags for domain creation. */
+unsigned int cdf;
 };
 
 static inline struct page_list_head *page_to_list(
-- 
2.25.1




[PATCH v8 4/9] xen: do not merge reserved pages in free_heap_pages()

2022-07-07 Thread Penny Zheng
The code in free_heap_pages() will try to merge pages with the
successor/predecessor if pages are suitably aligned. So if the pages
reserved are right next to the pages given to the heap allocator,
free_heap_pages() will merge them, and give the reserved pages to heap
allocator accidently as a result.

So in order to avoid the above scenario, this commit updates free_heap_pages()
to check whether the predecessor and/or successor has PGC_reserved set,
when trying to merge the about-to-be-freed chunk with the predecessor
and/or successor.

Suggested-by: Julien Grall 
Signed-off-by: Penny Zheng 
Reviewed-by: Jan Beulich 
Reviewed-by: Julien Grall 
---
v8 changes:
- no change
---
v7 changes:
- no change
---
v6 changes:
- adapt to PGC_static
---
v5 changes:
- change PGC_reserved to adapt to PGC_staticmem
---
v4 changes:
- no changes
---
v3 changes:
- no changes
---
v2 changes:
- new commit
---
 xen/common/page_alloc.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 9a80ca10fa..3260490688 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1475,6 +1475,7 @@ static void free_heap_pages(
 /* Merge with predecessor block? */
 if ( !mfn_valid(page_to_mfn(predecessor)) ||
  !page_state_is(predecessor, free) ||
+ (predecessor->count_info & PGC_static) ||
  (PFN_ORDER(predecessor) != order) ||
  (phys_to_nid(page_to_maddr(predecessor)) != node) )
 break;
@@ -1498,6 +1499,7 @@ static void free_heap_pages(
 /* Merge with successor block? */
 if ( !mfn_valid(page_to_mfn(successor)) ||
  !page_state_is(successor, free) ||
+ (successor->count_info & PGC_static) ||
  (PFN_ORDER(successor) != order) ||
  (phys_to_nid(page_to_maddr(successor)) != node) )
 break;
-- 
2.25.1




[PATCH v8 1/9] xen/arm: rename PGC_reserved to PGC_static

2022-07-07 Thread Penny Zheng
PGC_reserved could be ambiguous, and we have to tell what the pages are
reserved for, so this commit intends to rename PGC_reserved to
PGC_static, which clearly indicates the page is reserved for static
memory.

Signed-off-by: Penny Zheng 
Acked-by: Jan Beulich 
---
v8 changes
- no change
---
v7 changes:
- no change
---
v6 changes:
- rename PGC_staticmem to PGC_static
---
v5 changes:
- new commit
---
 xen/arch/arm/include/asm/mm.h |  6 +++---
 xen/common/page_alloc.c   | 22 +++---
 2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index c4bc3cd1e5..8b2481c1f3 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -108,9 +108,9 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap PG_shift(2)
 #define PGC_xen_heap  PG_mask(1, 2)
-  /* Page is reserved */
-#define _PGC_reserved PG_shift(3)
-#define PGC_reserved  PG_mask(1, 3)
+  /* Page is static memory */
+#define _PGC_staticPG_shift(3)
+#define PGC_static PG_mask(1, 3)
 /* ... */
 /* Page is broken? */
 #define _PGC_broken   PG_shift(7)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index fe0e15429a..ed56379b96 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,8 +151,8 @@
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
-#ifndef PGC_reserved
-#define PGC_reserved 0
+#ifndef PGC_static
+#define PGC_static 0
 #endif
 
 /*
@@ -2286,7 +2286,7 @@ int assign_pages(
 
 for ( i = 0; i < nr; i++ )
 {
-ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
+ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_static)));
 if ( pg[i].count_info & PGC_extra )
 extra_pages++;
 }
@@ -2346,7 +2346,7 @@ int assign_pages(
 page_set_owner(&pg[i], d);
 smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
 pg[i].count_info =
-(pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 
1;
+(pg[i].count_info & (PGC_extra | PGC_static)) | PGC_allocated | 1;
 
 page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
 }
@@ -2652,8 +2652,8 @@ void __init free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 scrub_one_page(pg);
 }
 
-/* In case initializing page of static memory, mark it PGC_reserved. */
-pg[i].count_info |= PGC_reserved;
+/* In case initializing page of static memory, mark it PGC_static. */
+pg[i].count_info |= PGC_static;
 }
 }
 
@@ -2682,8 +2682,8 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 
 for ( i = 0; i < nr_mfns; i++ )
 {
-/* The page should be reserved and not yet allocated. */
-if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
+/* The page should be static and not yet allocated. */
+if ( pg[i].count_info != (PGC_state_free | PGC_static) )
 {
 printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
@@ -2697,10 +2697,10 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 &tlbflush_timestamp);
 
 /*
- * Preserve flag PGC_reserved and change page state
+ * Preserve flag PGC_static and change page state
  * to PGC_state_inuse.
  */
-pg[i].count_info = PGC_reserved | PGC_state_inuse;
+pg[i].count_info = PGC_static | PGC_state_inuse;
 /* Initialise fields which have other uses for free pages. */
 pg[i].u.inuse.type_info = 0;
 page_set_owner(&pg[i], NULL);
@@ -2722,7 +2722,7 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 
  out_err:
 while ( i-- )
-pg[i].count_info = PGC_reserved | PGC_state_free;
+pg[i].count_info = PGC_static | PGC_state_free;
 
 spin_unlock(&heap_lock);
 
-- 
2.25.1




[PATCH v8 3/9] xen: update SUPPORT.md for static allocation

2022-07-07 Thread Penny Zheng
SUPPORT.md doesn't seem to explicitly say whether static memory is
supported, so this commit updates SUPPORT.md to add feature static
allocation tech preview for now.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v8 changes:
- no change
---
v7 changes:
- no change
---
v6 changes:
- use domain instead of sub-systems
---
v5 changes:
- new commit
---
 SUPPORT.md | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/SUPPORT.md b/SUPPORT.md
index 70e98964cb..8e040d1c1e 100644
--- a/SUPPORT.md
+++ b/SUPPORT.md
@@ -286,6 +286,13 @@ to boot with memory < maxmem.
 
 Status, x86 HVM: Supported
 
+### Static Allocation
+
+Static allocation refers to domains for which memory areas are
+pre-defined by configuration using physical address ranges.
+
+Status, ARM: Tech Preview
+
 ### Memory Sharing
 
 Allow sharing of identical pages between guests
-- 
2.25.1




[PATCH v8 2/9] xen: do not free reserved memory into heap

2022-07-07 Thread Penny Zheng
Pages used as guest RAM for static domain, shall be reserved to this
domain only.
So in case reserved pages being used for other purpose, users
shall not free them back to heap, even when last ref gets dropped.

This commit introduces a new helper free_domstatic_page to free
static page in runtime, and free_staticmem_pages will be called by it
in runtime, so let's drop the __init flag.

Signed-off-by: Penny Zheng 
---
v8 changes:
- introduce new helper free_domstatic_page
- let put_page call free_domstatic_page for static page, when last ref
drops
- #define PGC_static zero when !CONFIG_STATIC_MEMORY in xen/mm.h, as it
is used outside page_alloc.c
---
v7 changes:
- protect free_staticmem_pages with heap_lock to match its reverse function
acquire_staticmem_pages
---
v6 changes:
- adapt to PGC_static
- remove #ifdef aroud function declaration
---
v5 changes:
- In order to avoid stub functions, we #define PGC_staticmem to non-zero only
when CONFIG_STATIC_MEMORY
- use "unlikely()" around pg->count_info & PGC_staticmem
- remove pointless "if", since mark_page_free() is going to set count_info
to PGC_state_free and by consequence clear PGC_staticmem
- move #define PGC_staticmem 0 to mm.h
---
v4 changes:
- no changes
---
v3 changes:
- fix possible racy issue in free_staticmem_pages()
- introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case
- move the change to free_heap_pages() to cover other potential call sites
- fix the indentation
---
v2 changes:
- new commit
---
---
 xen/arch/arm/include/asm/mm.h |  4 ++-
 xen/arch/arm/mm.c |  2 ++
 xen/common/page_alloc.c   | 51 ++-
 xen/include/xen/mm.h  |  7 +++--
 4 files changed, 54 insertions(+), 10 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 8b2481c1f3..f1640bbda4 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -108,9 +108,11 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap PG_shift(2)
 #define PGC_xen_heap  PG_mask(1, 2)
-  /* Page is static memory */
+#ifdef CONFIG_STATIC_MEMORY
+/* Page is static memory */
 #define _PGC_staticPG_shift(3)
 #define PGC_static PG_mask(1, 3)
+#endif
 /* ... */
 /* Page is broken? */
 #define _PGC_broken   PG_shift(7)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 009b8cd9ef..a3bc6d7a24 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1622,6 +1622,8 @@ void put_page(struct page_info *page)
 
 if ( unlikely((nx & PGC_count_mask) == 0) )
 {
+if ( unlikely(nx & PGC_static) )
+free_domstatic_page(page);
 free_domheap_page(page);
 }
 }
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index ed56379b96..9a80ca10fa 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,10 +151,6 @@
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
-#ifndef PGC_static
-#define PGC_static 0
-#endif
-
 /*
  * Comma-separated list of hexadecimal page numbers containing bad bytes.
  * e.g. 'badpage=0x3f45,0x8a321'.
@@ -2636,12 +2632,14 @@ struct domain *get_pg_owner(domid_t domid)
 
 #ifdef CONFIG_STATIC_MEMORY
 /* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
-void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
- bool need_scrub)
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+  bool need_scrub)
 {
 mfn_t mfn = page_to_mfn(pg);
 unsigned long i;
 
+spin_lock(&heap_lock);
+
 for ( i = 0; i < nr_mfns; i++ )
 {
 mark_page_free(&pg[i], mfn_add(mfn, i));
@@ -2652,9 +2650,48 @@ void __init free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 scrub_one_page(pg);
 }
 
-/* In case initializing page of static memory, mark it PGC_static. */
 pg[i].count_info |= PGC_static;
 }
+
+spin_unlock(&heap_lock);
+}
+
+void free_domstatic_page(struct page_info *page)
+{
+struct domain *d = page_get_owner(page);
+bool drop_dom_ref, need_scrub;
+
+ASSERT_ALLOC_CONTEXT();
+
+if ( likely(d) )
+{
+/* NB. May recursively lock from relinquish_memory(). */
+spin_lock_recursive(&d->page_alloc_lock);
+
+arch_free_heap_page(d, page);
+
+/*
+ * Normally we expect a domain to clear pages before freeing them,
+ * if it cares about the secrecy of their contents. However, after
+ * a domain has died we assume responsibility for erasure. We do
+ * scrub regardless if option scrub_domheap is set.
+ */
+need_scrub = d->is_dying || scrub_debug || opt_scrub_domheap;
+
+drop_dom_ref = !domain_adjust_tot_pages(d, -1);
+
+spin_unlock_recursive(&d->page_alloc_lock);

[PATCH v8 0/9] populate/unpopulate memory when domain on static allocation

2022-07-07 Thread Penny Zheng
Today when a domain unpopulates the memory on runtime, they will always
hand the memory over to the heap allocator. And it will be a problem if it
is a static domain.
Pages used as guest RAM for static domain shall always be reserved to this
domain only, and not be used for any other purposes, so they shall never go
back to heap allocator.

This patch serie intends to fix this issue, by adding pages on the new list
resv_page_list after having taken them off the "normal" list, when unpopulating
memory, and retrieving pages from resv page list(resv_page_list) when
populating memory.

---
v8 changes:
- introduce new helper free_domstatic_page
- let put_page call free_domstatic_page for static page, when last ref
drops
- #define PGC_static zero when !CONFIG_STATIC_MEMORY, as it is used
outside page_alloc.c
- #ifdef-ary around is_domain_using_staticmem() is not needed anymore
- order as a parameter is not needed here, as all staticmem operations are
limited to order-0 regions
- move d->page_alloc_lock after operation on d->resv_page_list
- As concurrent free/allocate could modify the resv_page_list, we still
need the lock
---
v7 changes:
- protect free_staticmem_pages with heap_lock to match its reverse function
acquire_staticmem_pages
- IS_ENABLED(CONFIG_STATIC_MEMORY) would not be needed anymore
- add page on the rsv_page_list *after* it has been freed
- remove the lock, since we add the page to rsv_page_list after it has
been totally freed.
---
v6 changes:
- rename PGC_staticmem to PGC_static
- remove #ifdef aroud function declaration
- use domain instead of sub-systems
- move non-zero is_domain_using_staticmem() from ARM header to common
header
- move PGC_static !CONFIG_STATIC_MEMORY definition to common header
- drop the lock before returning
---
v5 changes:
- introduce three new commits
- In order to avoid stub functions, we #define PGC_staticmem to non-zero only
when CONFIG_STATIC_MEMORY
- use "unlikely()" around pg->count_info & PGC_staticmem
- remove pointless "if", since mark_page_free() is going to set count_info
to PGC_state_free and by consequence clear PGC_staticmem
- move #define PGC_staticmem 0 to mm.h
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
- extract common codes for assigning pages into a helper assign_domstatic_pages
- refine commit message
- remove stub function acquire_reserved_page
- Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock
---
v4 changes:
- commit message refinement
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
---
v3 changes:
- fix possible racy issue in free_staticmem_pages()
- introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case
- move the change to free_heap_pages() to cover other potential call sites
- change fixed width type uint32_t to unsigned int
- change "flags" to a more descriptive name "cdf"
- change name from "is_domain_static()" to "is_domain_using_staticmem"
- have page_list_del() just once out of the if()
- remove resv_pages counter
- make arch_free_heap_page be an expression, not a compound statement.
- move #ifndef is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- let "flags" live in the struct domain. So other arch can take
advantage of it in the future
- change name from "is_domain_on_static_allocation" to "is_domain_static()"
- put reserved pages on resv_page_list after having taken them off
the "normal" list
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
- fix coding style

Penny Zheng (9):
  xen/arm: rename PGC_reserved to PGC_static
  xen: do not free reserved memory into heap
  xen: update SUPPORT.md for static allocation
  xen: do not merge reserved pages in free_heap_pages()
  xen: add field "flags" to cover all internal CDF_XXX
  xen/arm: introduce CDF_staticmem
  xen/arm: unpopulate memory when domain is static
  xen: introduce prepare_staticmem_pages
  xen: retrieve reserved pages on populate_physmap

 SUPPORT.md|   7 ++
 xen/arch/arm/domain.c |   2 -
 xen/arch/arm/domain_build.c   |   5 +-
 xen/arch/arm/include/asm/domain.h |   3 +-
 xen/arch/arm/include/asm/mm.h |   8 +-
 xen/arch/arm/mm.c |   2 +
 xen/common/domain.c   |   7 ++
 xen/common/memory.c  

RE: [PATCH v5 7/8] xen/arm: create shared memory nodes in guest device tree

2022-07-06 Thread Penny Zheng
Hi Stefano and julien

> -Original Message-
> From: Stefano Stabellini 
> Sent: Thursday, July 7, 2022 7:53 AM
> To: Penny Zheng 
> Cc: Stefano Stabellini ; Julien Grall 
> ;
> xen-devel@lists.xenproject.org; Wei Chen ; Bertrand
> Marquis ; Volodymyr Babchuk
> 
> Subject: RE: [PATCH v5 7/8] xen/arm: create shared memory nodes in guest
> device tree
> 
> On Mon, 4 Jul 2022, Penny Zheng wrote:
> > Hi Stefano and Julien
> >
> > > -Original Message-
> > > From: Stefano Stabellini 
> > > Sent: Saturday, June 25, 2022 5:57 AM
> > > To: Julien Grall 
> > > Cc: Penny Zheng ;
> > > xen-devel@lists.xenproject.org; Wei Chen ;
> Stefano
> > > Stabellini ; Bertrand Marquis
> > > ; Volodymyr Babchuk
> > > 
> > > Subject: Re: [PATCH v5 7/8] xen/arm: create shared memory nodes in
> > > guest device tree
> > >
> > > On Fri, 24 Jun 2022, Julien Grall wrote:
> > > > On 20/06/2022 06:11, Penny Zheng wrote:
> > > > > We expose the shared memory to the domU using the "xen,shared-
> > > memory-v1"
> > > > > reserved-memory binding. See
> > > > > Documentation/devicetree/bindings/reserved-memory/xen,shared-
> > > memory.
> > > > > txt in Linux for the corresponding device tree binding.
> > > > >
> > > > > To save the cost of re-parsing shared memory device tree
> > > > > configuration when creating shared memory nodes in guest device
> > > > > tree, this commit adds new field "shm_mem" to store shm-info per
> > > > > domain.
> > > > >
> > > > > For each shared memory region, a range is exposed under the
> > > > > /reserved-memory node as a child node. Each range sub-node is
> > > > > named xen-shmem@ and has the following properties:
> > > > > - compatible:
> > > > >  compatible = "xen,shared-memory-v1"
> > > > > - reg:
> > > > >  the base guest physical address and size of the shared
> > > > > memory region
> > > > > - xen,id:
> > > > >  a string that identifies the shared memory region.
> > > > >
> > > > > Signed-off-by: Penny Zheng 
> > > > > Reviewed-by: Stefano Stabellini 
> > > > > ---
> > > > > v5 change:
> > > > > - no change
> > > > > ---
> > > > > v4 change:
> > > > > - no change
> > > > > ---
> > > > > v3 change:
> > > > > - move field "shm_mem" to kernel_info
> > > > > ---
> > > > > v2 change:
> > > > > - using xzalloc
> > > > > - shm_id should be uint8_t
> > > > > - make reg a local variable
> > > > > - add #address-cells and #size-cells properties
> > > > > - fix alignment
> > > > > ---
> > > > >   xen/arch/arm/domain_build.c   | 143
> > > +-
> > > > >   xen/arch/arm/include/asm/kernel.h |   1 +
> > > > >   xen/arch/arm/include/asm/setup.h  |   1 +
> > > > >   3 files changed, 143 insertions(+), 2 deletions(-)
> > > > >
> > > > > diff --git a/xen/arch/arm/domain_build.c
> > > > > b/xen/arch/arm/domain_build.c index 1584e6c2ce..4d62440a0e
> > > > > 100644
> > > > > --- a/xen/arch/arm/domain_build.c
> > > > > +++ b/xen/arch/arm/domain_build.c
> > > > > @@ -900,7 +900,22 @@ static int __init
> > > > > allocate_shared_memory(struct domain *d,
> > > > >   return ret;
> > > > >   }
> > > > >   -static int __init process_shm(struct domain *d,
> > > > > +static int __init append_shm_bank_to_domain(struct kernel_info
> *kinfo,
> > > > > +paddr_t start, paddr_t 
> > > > > size,
> > > > > +u32 shm_id) {
> > > > > +if ( (kinfo->shm_mem.nr_banks + 1) > NR_MEM_BANKS )
> > > > > +return -ENOMEM;
> > > > > +
> > > > > +kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].start = start;
> > > > > +kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].size = size;
> > > > > +kinfo->shm_mem.bank[kinfo->shm_mem

RE: [PATCH v5 7/8] xen/arm: create shared memory nodes in guest device tree

2022-07-04 Thread Penny Zheng
Hi Stefano and Julien

> -Original Message-
> From: Stefano Stabellini 
> Sent: Saturday, June 25, 2022 5:57 AM
> To: Julien Grall 
> Cc: Penny Zheng ; xen-devel@lists.xenproject.org;
> Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk 
> Subject: Re: [PATCH v5 7/8] xen/arm: create shared memory nodes in guest
> device tree
> 
> On Fri, 24 Jun 2022, Julien Grall wrote:
> > On 20/06/2022 06:11, Penny Zheng wrote:
> > > We expose the shared memory to the domU using the "xen,shared-
> memory-v1"
> > > reserved-memory binding. See
> > > Documentation/devicetree/bindings/reserved-memory/xen,shared-
> memory.
> > > txt in Linux for the corresponding device tree binding.
> > >
> > > To save the cost of re-parsing shared memory device tree
> > > configuration when creating shared memory nodes in guest device
> > > tree, this commit adds new field "shm_mem" to store shm-info per
> > > domain.
> > >
> > > For each shared memory region, a range is exposed under the
> > > /reserved-memory node as a child node. Each range sub-node is named
> > > xen-shmem@ and has the following properties:
> > > - compatible:
> > >  compatible = "xen,shared-memory-v1"
> > > - reg:
> > >  the base guest physical address and size of the shared
> > > memory region
> > > - xen,id:
> > >  a string that identifies the shared memory region.
> > >
> > > Signed-off-by: Penny Zheng 
> > > Reviewed-by: Stefano Stabellini 
> > > ---
> > > v5 change:
> > > - no change
> > > ---
> > > v4 change:
> > > - no change
> > > ---
> > > v3 change:
> > > - move field "shm_mem" to kernel_info
> > > ---
> > > v2 change:
> > > - using xzalloc
> > > - shm_id should be uint8_t
> > > - make reg a local variable
> > > - add #address-cells and #size-cells properties
> > > - fix alignment
> > > ---
> > >   xen/arch/arm/domain_build.c   | 143
> +-
> > >   xen/arch/arm/include/asm/kernel.h |   1 +
> > >   xen/arch/arm/include/asm/setup.h  |   1 +
> > >   3 files changed, 143 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/xen/arch/arm/domain_build.c
> > > b/xen/arch/arm/domain_build.c index 1584e6c2ce..4d62440a0e 100644
> > > --- a/xen/arch/arm/domain_build.c
> > > +++ b/xen/arch/arm/domain_build.c
> > > @@ -900,7 +900,22 @@ static int __init allocate_shared_memory(struct
> > > domain *d,
> > >   return ret;
> > >   }
> > >   -static int __init process_shm(struct domain *d,
> > > +static int __init append_shm_bank_to_domain(struct kernel_info *kinfo,
> > > +paddr_t start, paddr_t size,
> > > +u32 shm_id) {
> > > +if ( (kinfo->shm_mem.nr_banks + 1) > NR_MEM_BANKS )
> > > +return -ENOMEM;
> > > +
> > > +kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].start = start;
> > > +kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].size = size;
> > > +kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].shm_id = shm_id;
> > > +kinfo->shm_mem.nr_banks++;
> > > +
> > > +return 0;
> > > +}
> > > +
> > > +static int __init process_shm(struct domain *d, struct kernel_info
> > > +*kinfo,
> > > const struct dt_device_node *node)
> > >   {
> > >   struct dt_device_node *shm_node; @@ -971,6 +986,14 @@ static
> > > int __init process_shm(struct domain *d,
> > >   if ( ret )
> > >   return ret;
> > >   }
> > > +
> > > +/*
> > > + * Record static shared memory region info for later setting
> > > + * up shm-node in guest device tree.
> > > + */
> > > +ret = append_shm_bank_to_domain(kinfo, gbase, psize, shm_id);
> > > +if ( ret )
> > > +return ret;
> > >   }
> > > return 0;
> > > @@ -1301,6 +1324,117 @@ static int __init make_memory_node(const
> > > struct domain *d,
> > >   return res;
> > >   }
> > >   +#ifdef CONFIG_STATIC_SHM
> > > +static int __init make_shm_memory_node(const struct domain *d,
> > &g

RE: [PATCH v5 2/8] xen/arm: allocate static shared memory to the default owner dom_io

2022-07-04 Thread Penny Zheng
> -Original Message-
> From: Julien Grall 
> Sent: Wednesday, June 29, 2022 6:35 PM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk ; Andrew Cooper
> ; George Dunlap ;
> Jan Beulich ; Wei Liu 
> Subject: Re: [PATCH v5 2/8] xen/arm: allocate static shared memory to the
> default owner dom_io
> 
> 
> 
> On 29/06/2022 08:13, Penny Zheng wrote:
> > Hi Julien
> 
> Hi Penny,
> 

Hi Julien
> >
> >> -Original Message-
> >> From: Julien Grall 
> >> Sent: Saturday, June 25, 2022 2:22 AM
> >> To: Penny Zheng ; xen-
> de...@lists.xenproject.org
> >> Cc: Wei Chen ; Stefano Stabellini
> >> ; Bertrand Marquis
> >> ; Volodymyr Babchuk
> >> ; Andrew Cooper
> >> ; George Dunlap
> >> ; Jan Beulich ; Wei Liu
> >> 
> >> Subject: Re: [PATCH v5 2/8] xen/arm: allocate static shared memory to
> >> the default owner dom_io
> >>
> >> Hi Penny,
> >>
> >> On 20/06/2022 06:11, Penny Zheng wrote:
> >>> From: Penny Zheng 
> >>>
> >>> This commit introduces process_shm to cope with static shared memory
> >>> in domain construction.
> >>>
> >>> DOMID_IO will be the default owner of memory pre-shared among
> >> multiple
> >>> domains at boot time, when no explicit owner is specified.
> >>
> >> The document in patch #1 suggest the page will be shared with
> dom_shared.
> >> But here you say "DOMID_IO".
> >>
> >> Which one is correct?
> >>
> >
> > I’ll fix the documentation, DOM_IO is the last call.
> >
> >>>
> >>> This commit only considers allocating static shared memory to dom_io
> >>> when owner domain is not explicitly defined in device tree, all the
> >>> left, including the "borrower" code path, the "explicit owner" code
> >>> path, shall be introduced later in the following patches.
> >>>
> >>> Signed-off-by: Penny Zheng 
> >>> Reviewed-by: Stefano Stabellini 
> >>> ---
> >>> v5 change:
> >>> - refine in-code comment
> >>> ---
> >>> v4 change:
> >>> - no changes
> >>> ---
> >>> v3 change:
> >>> - refine in-code comment
> >>> ---
> >>> v2 change:
> >>> - instead of introducing a new system domain, reuse the existing
> >>> dom_io
> >>> - make dom_io a non-auto-translated domain, then no need to create
> >>> P2M for it
> >>> - change dom_io definition and make it wider to support static shm
> >>> here too
> >>> - introduce is_shm_allocated_to_domio to check whether static shm is
> >>> allocated yet, instead of using shm_mask bitmap
> >>> - add in-code comment
> >>> ---
> >>>xen/arch/arm/domain_build.c | 132
> >> +++-
> >>>xen/common/domain.c |   3 +
> >>>2 files changed, 134 insertions(+), 1 deletion(-)
> >>>
> >>> diff --git a/xen/arch/arm/domain_build.c
> >>> b/xen/arch/arm/domain_build.c index 7ddd16c26d..91a5ace851 100644
> >>> --- a/xen/arch/arm/domain_build.c
> >>> +++ b/xen/arch/arm/domain_build.c
> >>> @@ -527,6 +527,10 @@ static bool __init
> >> append_static_memory_to_bank(struct domain *d,
> >>>return true;
> >>>}
> >>>
> >>> +/*
> >>> + * If cell is NULL, pbase and psize should hold valid values.
> >>> + * Otherwise, cell will be populated together with pbase and psize.
> >>> + */
> >>>static mfn_t __init acquire_static_memory_bank(struct domain *d,
> >>>   const __be32 **cell,
> >>>   u32 addr_cells,
> >>> u32 size_cells, @@ -535,7 +539,8 @@ static mfn_t __init
> >> acquire_static_memory_bank(struct domain *d,
> >>>mfn_t smfn;
> >>>int res;
> >>>
> >>> -device_tree_get_reg(cell, addr_cells, size_cells, pbase, psize);
> >>> +if ( cell )
> >>> +device_tree_get_reg(cell, addr_cells, size_cells, pbase,
> >>> + psize);
> >>
> >> I think this is a bit of a hack. To me it sounds like this should be
> >> moved out to

RE: [PATCH v5 1/8] xen/arm: introduce static shared memory

2022-06-29 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Saturday, June 25, 2022 3:26 AM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk 
> Subject: Re: [PATCH v5 1/8] xen/arm: introduce static shared memory
> 
> Hi Penny,
> 
> I have looked at the code and I have further questions about the binding.
> 
> On 20/06/2022 06:11, Penny Zheng wrote:
> > ---
> >   docs/misc/arm/device-tree/booting.txt | 120
> ++
> >   xen/arch/arm/Kconfig  |   6 ++
> >   xen/arch/arm/bootfdt.c|  68 +++
> >   xen/arch/arm/include/asm/setup.h  |   3 +
> >   4 files changed, 197 insertions(+)
> >
> > diff --git a/docs/misc/arm/device-tree/booting.txt
> > b/docs/misc/arm/device-tree/booting.txt
> > index 98253414b8..6467bc5a28 100644
> > --- a/docs/misc/arm/device-tree/booting.txt
> > +++ b/docs/misc/arm/device-tree/booting.txt
> > @@ -378,3 +378,123 @@ device-tree:
> >
> >   This will reserve a 512MB region starting at the host physical address
> >   0x3000 to be exclusively used by DomU1.
> > +
> > +Static Shared Memory
> > +
> > +
> > +The static shared memory device tree nodes allow users to statically
> > +set up shared memory on dom0less system, enabling domains to do
> > +shm-based communication.
> > +
> > +- compatible
> > +
> > +"xen,domain-shared-memory-v1"
> > +
> > +- xen,shm-id
> > +
> > +An 8-bit integer that represents the unique identifier of the shared
> memory
> > +region. The maximum identifier shall be "xen,shm-id = <0xff>".
> 
> There is nothing in Xen that will ensure that xen,shm-id will match for all 
> the
> nodes using the same region.
> 

True, we actually do not use this field, adding it here to just be aligned with 
Linux.
I could add a check in the very beginning when we parse the device tree.
I'll give more details to explain in which code locates.

> I see you write it to the guest device-tree. However there is a mismatch of 
> the
> type: here you use an integer whereas the guest binding is using a string.
> 
> Cheers,
> 
> --
> Julien Grall


RE: [PATCH v5 1/8] xen/arm: introduce static shared memory

2022-06-29 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Saturday, June 25, 2022 1:55 AM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk 
> Subject: Re: [PATCH v5 1/8] xen/arm: introduce static shared memory
> 
> Hi Penny,
> 
> On 20/06/2022 06:11, Penny Zheng wrote:
> > From: Penny Zheng 
> >
> > This patch serie introduces a new feature: setting up static
> 
> Typo: s/serie/series/
> 
> > shared memory on a dom0less system, through device tree configuration.
> >
> > This commit parses shared memory node at boot-time, and reserve it in
> > bootinfo.reserved_mem to avoid other use.
> >
> > This commits proposes a new Kconfig CONFIG_STATIC_SHM to wrap
> > static-shm-related codes, and this option depends on static memory(
> > CONFIG_STATIC_MEMORY). That's because that later we want to reuse a
> > few helpers, guarded with CONFIG_STATIC_MEMORY, like
> > acquire_staticmem_pages, etc, on static shared memory.
> >
> > Signed-off-by: Penny Zheng 
> > Reviewed-by: Stefano Stabellini 
> > ---
> > v5 change:
> > - no change
> > ---
> > v4 change:
> > - nit fix on doc
> > ---
> > v3 change:
> > - make nr_shm_domain unsigned int
> > ---
> > v2 change:
> > - document refinement
> > - remove bitmap and use the iteration to check
> > - add a new field nr_shm_domain to keep the number of shared domain
> > ---
> >   docs/misc/arm/device-tree/booting.txt | 120
> ++
> >   xen/arch/arm/Kconfig  |   6 ++
> >   xen/arch/arm/bootfdt.c|  68 +++
> >   xen/arch/arm/include/asm/setup.h  |   3 +
> >   4 files changed, 197 insertions(+)
> >
> > diff --git a/docs/misc/arm/device-tree/booting.txt
> > b/docs/misc/arm/device-tree/booting.txt
> > index 98253414b8..6467bc5a28 100644
> > --- a/docs/misc/arm/device-tree/booting.txt
> > +++ b/docs/misc/arm/device-tree/booting.txt
> > @@ -378,3 +378,123 @@ device-tree:
> >
> >   This will reserve a 512MB region starting at the host physical address
> >   0x3000 to be exclusively used by DomU1.
> > +
> > +Static Shared Memory
> > +
> > +
> > +The static shared memory device tree nodes allow users to statically
> > +set up shared memory on dom0less system, enabling domains to do
> > +shm-based communication.
> > +
> > +- compatible
> > +
> > +"xen,domain-shared-memory-v1"
> > +
> > +- xen,shm-id
> > +
> > +An 8-bit integer that represents the unique identifier of the shared
> memory
> > +region. The maximum identifier shall be "xen,shm-id = <0xff>".
> > +
> > +- xen,shared-mem
> > +
> > +An array takes a physical address, which is the base address of the
> > +shared memory region in host physical address space, a size, and a
> guest
> > +physical address, as the target address of the mapping. The number of
> cells
> > +for the host address (and size) is the same as the guest 
> > pseudo-physical
> > +address and they are inherited from the parent node.
> 
> Sorry for jump in the discussion late. But as this is going to be a stable 
> ABI, I
> would to make sure the interface is going to be easily extendable.
> 
> AFAIU, with your proposal the host physical address is mandatory. I would
> expect that some user may want to share memory but don't care about the
> exact location in memory. So I think it would be good to make it optional in
> the binding.
> 
> I think this wants to be done now because it would be difficult to change the
> binding afterwards (the host physical address is the first set of cells).
> 
> The Xen doesn't need to handle the optional case.
> 
> [...]
> 
> > diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig index
> > be9eff0141..7321f47c0f 100644
> > --- a/xen/arch/arm/Kconfig
> > +++ b/xen/arch/arm/Kconfig
> > @@ -139,6 +139,12 @@ config TEE
> >
> >   source "arch/arm/tee/Kconfig"
> >
> > +config STATIC_SHM
> > +   bool "Statically shared memory on a dom0less system" if
> UNSUPPORTED
> 
> You also want to update SUPPORT.md.
> 
> > +   depends on STATIC_MEMORY
> > +   help
> > + This option enables statically shared memory on a dom0less system.
> > +
> >   endmenu
> >
> >   menu "ARM errata workaround via the alternative framework"
>

RE: [PATCH v5 5/8] xen/arm: Add additional reference to owner domain when the owner is allocated

2022-06-29 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Saturday, June 25, 2022 3:18 AM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk 
> Subject: Re: [PATCH v5 5/8] xen/arm: Add additional reference to owner
> domain when the owner is allocated
> 
> Hi Penny,
> 
> On 20/06/2022 06:11, Penny Zheng wrote:
> > Borrower domain will fail to get a page ref using the owner domain
> > during allocation, when the owner is created after borrower.
> >
> > So here, we decide to get and add the right amount of reference, which
> > is the number of borrowers, when the owner is allocated.
> >
> > Signed-off-by: Penny Zheng 
> > Reviewed-by: Stefano Stabellini 
> > ---
> > v5 change:
> > - no change
> > ---
> > v4 changes:
> > - no change
> > ---
> > v3 change:
> > - printk rather than dprintk since it is a serious error
> > ---
> > v2 change:
> > - new commit
> > ---
> >   xen/arch/arm/domain_build.c | 62
> +
> >   1 file changed, 62 insertions(+)
> >
> > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> > index d4fd64e2bd..650d18f5ef 100644
> > --- a/xen/arch/arm/domain_build.c
> > +++ b/xen/arch/arm/domain_build.c
> > @@ -799,6 +799,34 @@ static mfn_t __init
> > acquire_shared_memory_bank(struct domain *d,
> >
> >   }
> >
> > +static int __init acquire_nr_borrower_domain(struct domain *d,
> > + paddr_t pbase, paddr_t psize,
> > + unsigned long
> > +*nr_borrowers) {
> > +unsigned long bank;
> > +
> > +/* Iterate reserved memory to find requested shm bank. */
> > +for ( bank = 0 ; bank < bootinfo.reserved_mem.nr_banks; bank++ )
> > +{
> > +paddr_t bank_start = bootinfo.reserved_mem.bank[bank].start;
> > +paddr_t bank_size = bootinfo.reserved_mem.bank[bank].size;
> > +
> > +if ( pbase == bank_start && psize == bank_size )
> > +break;
> > +}
> > +
> > +if ( bank == bootinfo.reserved_mem.nr_banks )
> > +return -ENOENT;
> > +
> > +if ( d == dom_io )
> > +*nr_borrowers =
> bootinfo.reserved_mem.bank[bank].nr_shm_domain;
> > +else
> > +/* Exclude the owner domain itself. */
> NIT: I think this comment wants to be just above the 'if' and expanded to
> explain why the "dom_io" is not included. AFAIU, this is because "dom_io" is
> not described in the Device-Tree, so it would not be taken into account for
> nr_shm_domain.
> 
> > +*nr_borrowers =
> > + bootinfo.reserved_mem.bank[bank].nr_shm_domain - 1;
> 
> TBH, given the use here. I would have consider to not increment
> nr_shm_domain if the role was owner in parsing code. This is v5 now, so I
> would be OK with the comment above.
> 
> But I would suggest to consider it as a follow-up.
> 

LTM, it is not a big change, I'll try to include it in the next serie~

> > +
> > +return 0;
> > +}
> > +
> >   /*
> >* Func allocate_shared_memory is supposed to be only called
> >* from the owner.
> > @@ -810,6 +838,8 @@ static int __init allocate_shared_memory(struct
> domain *d,
> >   {
> >   mfn_t smfn;
> >   int ret = 0;
> > +unsigned long nr_pages, nr_borrowers, i;
> > +struct page_info *page;
> >
> >   dprintk(XENLOG_INFO,
> >   "Allocate static shared memory BANK
> > %#"PRIpaddr"-%#"PRIpaddr".\n", @@ -824,6 +854,7 @@ static int __init
> allocate_shared_memory(struct domain *d,
> >* DOMID_IO is the domain, like DOMID_XEN, that is not auto-
> translated.
> >* It sees RAM 1:1 and we do not need to create P2M mapping for it
> >*/
> > +nr_pages = PFN_DOWN(psize);
> >   if ( d != dom_io )
> >   {
> >   ret = guest_physmap_add_pages(d, gaddr_to_gfn(gbase), smfn,
> > PFN_DOWN(psize)); @@ -835,6 +866,37 @@ static int __init
> allocate_shared_memory(struct domain *d,
> >   }
> >   }
> >
> > +/*
> > + * Get the right amount of references per page, which is the number of
> > + * borrow domains.
> > + */
> > +ret = acquire_nr_borrower_domain(d, pbase, psize, &nr_borrowers);
> > +if ( ret )
> > +retur

RE: [PATCH v5 3/8] xen/arm: allocate static shared memory to a specific owner domain

2022-06-29 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Saturday, June 25, 2022 3:07 AM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk 
> Subject: Re: [PATCH v5 3/8] xen/arm: allocate static shared memory to a
> specific owner domain
> 
> Hi Penny,
> 
> On 20/06/2022 06:11, Penny Zheng wrote:
> > If owner property is defined, then owner domain of a static shared
> > memory region is not the default dom_io anymore, but a specific domain.
> >
> > This commit implements allocating static shared memory to a specific
> > domain when owner property is defined.
> >
> > Coding flow for dealing borrower domain will be introduced later in
> > the following commits.
> >
> > Signed-off-by: Penny Zheng 
> > Reviewed-by: Stefano Stabellini 
> > ---
> > v5 change:
> > - no change
> > ---
> > v4 change:
> > - no changes
> > ---
> > v3 change:
> > - simplify the code since o_gbase is not used if the domain is dom_io
> > ---
> > v2 change:
> > - P2M mapping is restricted to normal domain
> > - in-code comment fix
> > ---
> >   xen/arch/arm/domain_build.c | 44 +++
> --
> >   1 file changed, 33 insertions(+), 11 deletions(-)
> >
> > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> > index 91a5ace851..d4fd64e2bd 100644
> > --- a/xen/arch/arm/domain_build.c
> > +++ b/xen/arch/arm/domain_build.c
> > @@ -805,9 +805,11 @@ static mfn_t __init
> acquire_shared_memory_bank(struct domain *d,
> >*/
> >   static int __init allocate_shared_memory(struct domain *d,
> >u32 addr_cells, u32 size_cells,
> > - paddr_t pbase, paddr_t psize)
> > + paddr_t pbase, paddr_t psize,
> > + paddr_t gbase)
> >   {
> >   mfn_t smfn;
> > +int ret = 0;
> >
> >   dprintk(XENLOG_INFO,
> >   "Allocate static shared memory BANK
> > %#"PRIpaddr"-%#"PRIpaddr".\n", @@ -822,8 +824,18 @@ static int __init
> allocate_shared_memory(struct domain *d,
> >* DOMID_IO is the domain, like DOMID_XEN, that is not auto-
> translated.
> >* It sees RAM 1:1 and we do not need to create P2M mapping for it
> >*/
> > -ASSERT(d == dom_io);
> > -return 0;
> > +if ( d != dom_io )
> > +{
> > +ret = guest_physmap_add_pages(d, gaddr_to_gfn(gbase), smfn,
> > + PFN_DOWN(psize));
> 
> Coding style: this line is over 80 characters. And...
> 
> > +if ( ret )
> > +{
> > +printk(XENLOG_ERR
> > +   "Failed to map shared memory to %pd.\n", d);
> 
> ... this line could be merged with the previous one.
> 
> > +return ret;
> > +}
> > +}
> > +
> > +return ret;
> >   }
> >
> >   static int __init process_shm(struct domain *d, @@ -836,6 +848,8 @@
> > static int __init process_shm(struct domain *d,
> >   u32 shm_id;
> >   u32 addr_cells, size_cells;
> >   paddr_t gbase, pbase, psize;
> > +const char *role_str;
> > +bool owner_dom_io = true;
> 
> I think it would be best if role_str and owner_dom_io are defined within the
> loop. Same goes for all the other declarations.
> 
> >
> >   dt_for_each_child_node(node, shm_node)
> >   {
> > @@ -862,19 +876,27 @@ static int __init process_shm(struct domain *d,
> >   ASSERT(IS_ALIGNED(pbase, PAGE_SIZE) && IS_ALIGNED(psize,
> PAGE_SIZE));
> >   gbase = dt_read_number(cells, addr_cells);
> >
> > -/* TODO: Consider owner domain is not the default dom_io. */
> > +/*
> > + * "role" property is optional and if it is defined explicitly,
> > + * then the owner domain is not the default "dom_io" domain.
> > + */
> > +if ( dt_property_read_string(shm_node, "role", &role_str) == 0 )
> > +owner_dom_io = false
> IIUC, the role is per-region. However, owner_dom_io is first initialized to
> false outside the loop. Therefore, the variable may not be correct on the next
> region.
> 
> So I think you want to write:
> 
> owner_dom_io = !dt_property_read_string(...);
> 
> This can also be avoided i

RE: [PATCH v5 2/8] xen/arm: allocate static shared memory to the default owner dom_io

2022-06-29 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Saturday, June 25, 2022 2:22 AM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk ; Andrew Cooper
> ; George Dunlap ;
> Jan Beulich ; Wei Liu 
> Subject: Re: [PATCH v5 2/8] xen/arm: allocate static shared memory to the
> default owner dom_io
> 
> Hi Penny,
> 
> On 20/06/2022 06:11, Penny Zheng wrote:
> > From: Penny Zheng 
> >
> > This commit introduces process_shm to cope with static shared memory
> > in domain construction.
> >
> > DOMID_IO will be the default owner of memory pre-shared among
> multiple
> > domains at boot time, when no explicit owner is specified.
> 
> The document in patch #1 suggest the page will be shared with dom_shared.
> But here you say "DOMID_IO".
> 
> Which one is correct?
> 

I’ll fix the documentation, DOM_IO is the last call.

> >
> > This commit only considers allocating static shared memory to dom_io
> > when owner domain is not explicitly defined in device tree, all the
> > left, including the "borrower" code path, the "explicit owner" code
> > path, shall be introduced later in the following patches.
> >
> > Signed-off-by: Penny Zheng 
> > Reviewed-by: Stefano Stabellini 
> > ---
> > v5 change:
> > - refine in-code comment
> > ---
> > v4 change:
> > - no changes
> > ---
> > v3 change:
> > - refine in-code comment
> > ---
> > v2 change:
> > - instead of introducing a new system domain, reuse the existing
> > dom_io
> > - make dom_io a non-auto-translated domain, then no need to create P2M
> > for it
> > - change dom_io definition and make it wider to support static shm
> > here too
> > - introduce is_shm_allocated_to_domio to check whether static shm is
> > allocated yet, instead of using shm_mask bitmap
> > - add in-code comment
> > ---
> >   xen/arch/arm/domain_build.c | 132
> +++-
> >   xen/common/domain.c |   3 +
> >   2 files changed, 134 insertions(+), 1 deletion(-)
> >
> > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> > index 7ddd16c26d..91a5ace851 100644
> > --- a/xen/arch/arm/domain_build.c
> > +++ b/xen/arch/arm/domain_build.c
> > @@ -527,6 +527,10 @@ static bool __init
> append_static_memory_to_bank(struct domain *d,
> >   return true;
> >   }
> >
> > +/*
> > + * If cell is NULL, pbase and psize should hold valid values.
> > + * Otherwise, cell will be populated together with pbase and psize.
> > + */
> >   static mfn_t __init acquire_static_memory_bank(struct domain *d,
> >  const __be32 **cell,
> >  u32 addr_cells, u32
> > size_cells, @@ -535,7 +539,8 @@ static mfn_t __init
> acquire_static_memory_bank(struct domain *d,
> >   mfn_t smfn;
> >   int res;
> >
> > -device_tree_get_reg(cell, addr_cells, size_cells, pbase, psize);
> > +if ( cell )
> > +device_tree_get_reg(cell, addr_cells, size_cells, pbase,
> > + psize);
> 
> I think this is a bit of a hack. To me it sounds like this should be moved 
> out to
> a separate helper. This will also make the interface of
> acquire_shared_memory_bank() less questionable (see below).
> 

Ok,  I'll try to not reuse acquire_static_memory_bank in
acquire_shared_memory_bank.

> As this is v5, I would be OK with a follow-up for this split. But this 
> interface of
> acuiqre_shared_memory_bank() needs to change.
> 

I'll try to fix it in next version.

> >   ASSERT(IS_ALIGNED(*pbase, PAGE_SIZE) && IS_ALIGNED(*psize,
> > PAGE_SIZE));
> 
> In the context of your series, who is checking that both psize and pbase are
> suitably aligned?
> 

Actually, the whole parsing process is redundant for the static shared memory.
I've already parsed it and checked it before in process_shm.

> >   if ( PFN_DOWN(*psize) > UINT_MAX )
> >   {
> > @@ -759,6 +764,125 @@ static void __init assign_static_memory_11(struct
> domain *d,
> >   panic("Failed to assign requested static memory for direct-map
> domain %pd.",
> > d);
> >   }
> > +
> > +#ifdef CONFIG_STATIC_SHM
> > +/*
> > + * This function checks whether the static shared memory region is
> > + * already allocated to dom_io.
> > + */
> > +static bool __init is_shm_allocated_to_domio(p

RE: [PATCH v7 7/9] xen/arm: unpopulate memory when domain is static

2022-06-28 Thread Penny Zheng
Hi Jan

> -Original Message-
> From: Jan Beulich 
> Sent: Wednesday, June 29, 2022 1:56 PM
> To: Penny Zheng 
> Cc: Wei Chen ; Andrew Cooper
> ; George Dunlap ;
> Stefano Stabellini ; Wei Liu ; xen-
> de...@lists.xenproject.org; Julien Grall 
> Subject: Re: [PATCH v7 7/9] xen/arm: unpopulate memory when domain is
> static
> 
> On 29.06.2022 05:12, Penny Zheng wrote:
> > Hi Julien and Jan
> >
> >> -Original Message-
> >> From: Julien Grall 
> >> Sent: Monday, June 27, 2022 6:19 PM
> >> To: Penny Zheng ; Jan Beulich
> >> 
> >> Cc: Wei Chen ; Andrew Cooper
> >> ; George Dunlap
> >> ; Stefano Stabellini
> >> ; Wei Liu ; xen-
> >> de...@lists.xenproject.org
> >> Subject: Re: [PATCH v7 7/9] xen/arm: unpopulate memory when domain
> is
> >> static
> >>
> >>
> >>
> >> On 27/06/2022 11:03, Penny Zheng wrote:
> >>> Hi jan
> >>>
> >>>> -Original Message-
> >>> put_static_pages, that is, adding pages to the reserved list, is
> >>> only for freeing static pages on runtime. In static page
> >>> initialization stage, I also use free_statimem_pages, and in which
> >>> stage, I think the domain has not been constructed at all. So I
> >>> prefer the freeing of staticmem pages is split into two parts:
> >>> free_staticmem_pages and put_static_pages
> >>
> >> AFAIU, all the pages would have to be allocated via
> >> acquire_domstatic_pages(). This call requires the domain to be
> >> allocated and setup for handling memory.
> >>
> >> Therefore, I think the split is unnecessary. This would also have the
> >> advantage to remove one loop. Admittly, this is not important when
> >> the order 0, but it would become a problem for larger order (you may
> >> have to pull the struct page_info multiple time in the cache).
> >>
> >
> > How about this:
> > I create a new func free_domstatic_page, and it will be like:
> > "
> > static void free_domstatic_page(struct domain *d, struct page_info
> > *page) {
> > unsigned int i;
> > bool need_scrub;
> >
> > /* NB. May recursively lock from relinquish_memory(). */
> > spin_lock_recursive(&d->page_alloc_lock);
> >
> > arch_free_heap_page(d, page);
> >
> > /*
> >  * Normally we expect a domain to clear pages before freeing them,
> >  * if it cares about the secrecy of their contents. However, after
> >  * a domain has died we assume responsibility for erasure. We do
> >  * scrub regardless if option scrub_domheap is set.
> >  */
> > need_scrub = d->is_dying || scrub_debug || opt_scrub_domheap;
> >
> > free_staticmem_pages(page, 1, need_scrub);
> >
> > /* Add page on the resv_page_list *after* it has been freed. */
> > put_static_page(d, page);
> >
> > drop_dom_ref = !domain_adjust_tot_pages(d, -1);
> >
> > spin_unlock_recursive(&d->page_alloc_lock);
> >
> > if ( drop_dom_ref )
> > put_domain(d);
> > }
> > "
> >
> > In free_domheap_pages, we just call free_domstatic_page:
> >
> > "
> > @@ -2430,6 +2430,9 @@ void free_domheap_pages(struct page_info *pg,
> > unsigned int order)
> >
> >  ASSERT_ALLOC_CONTEXT();
> >
> > +if ( unlikely(pg->count_info & PGC_static) )
> > +return free_domstatic_page(d, pg);
> > +
> >  if ( unlikely(is_xen_heap_page(pg)) )
> >  {
> >  /* NB. May recursively lock from relinquish_memory(). */ @@
> > -2673,6 +2676,38 @@ void free_staticmem_pages(struct page_info *pg,
> > unsigned long nr_mfns, "
> >
> > Then the split could be avoided and we could save the loop as much as
> possible.
> > Any suggestion?
> 
> Looks reasonable at the first glance (will need to see it in proper context 
> for a
> final opinion), provided e.g. Xen heap pages can never be static.

If you don't prefer let free_domheap_pages to call free_domstatic_page, then, 
maybe
the if-array should happen at put_page
"
@@ -1622,6 +1622,8 @@ void put_page(struct page_info *page)

 if ( unlikely((nx & PGC_count_mask) == 0) )
 {
+if ( unlikely(page->count_info & PGC_static) )
+free_domstatic_page(page);
 free_domheap_page(page);
 }
 }
"
Wdyt now?
 
> 
> Jan


RE: [PATCH v5 1/8] xen/arm: introduce static shared memory

2022-06-28 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Saturday, June 25, 2022 1:55 AM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk 
> Subject: Re: [PATCH v5 1/8] xen/arm: introduce static shared memory
> 
> Hi Penny,
> 
> On 20/06/2022 06:11, Penny Zheng wrote:
> > From: Penny Zheng 
> >
> > This patch serie introduces a new feature: setting up static
> 
> Typo: s/serie/series/
> 
> > shared memory on a dom0less system, through device tree configuration.
> >
> > This commit parses shared memory node at boot-time, and reserve it in
> > bootinfo.reserved_mem to avoid other use.
> >
> > This commits proposes a new Kconfig CONFIG_STATIC_SHM to wrap
> > static-shm-related codes, and this option depends on static memory(
> > CONFIG_STATIC_MEMORY). That's because that later we want to reuse a
> > few helpers, guarded with CONFIG_STATIC_MEMORY, like
> > acquire_staticmem_pages, etc, on static shared memory.
> >
> > Signed-off-by: Penny Zheng 
> > Reviewed-by: Stefano Stabellini 
> > ---
> > v5 change:
> > - no change
> > ---
> > v4 change:
> > - nit fix on doc
> > ---
> > v3 change:
> > - make nr_shm_domain unsigned int
> > ---
> > v2 change:
> > - document refinement
> > - remove bitmap and use the iteration to check
> > - add a new field nr_shm_domain to keep the number of shared domain
> > ---
> >   docs/misc/arm/device-tree/booting.txt | 120
> ++
> >   xen/arch/arm/Kconfig  |   6 ++
> >   xen/arch/arm/bootfdt.c|  68 +++
> >   xen/arch/arm/include/asm/setup.h  |   3 +
> >   4 files changed, 197 insertions(+)
> >
> > diff --git a/docs/misc/arm/device-tree/booting.txt
> > b/docs/misc/arm/device-tree/booting.txt
> > index 98253414b8..6467bc5a28 100644
> > --- a/docs/misc/arm/device-tree/booting.txt
> > +++ b/docs/misc/arm/device-tree/booting.txt
> > @@ -378,3 +378,123 @@ device-tree:
> >
> >   This will reserve a 512MB region starting at the host physical address
> >   0x3000 to be exclusively used by DomU1.
> > +
> > +Static Shared Memory
> > +
> > +
> > +The static shared memory device tree nodes allow users to statically
> > +set up shared memory on dom0less system, enabling domains to do
> > +shm-based communication.
> > +
> > +- compatible
> > +
> > +"xen,domain-shared-memory-v1"
> > +
> > +- xen,shm-id
> > +
> > +An 8-bit integer that represents the unique identifier of the shared
> memory
> > +region. The maximum identifier shall be "xen,shm-id = <0xff>".
> > +
> > +- xen,shared-mem
> > +
> > +An array takes a physical address, which is the base address of the
> > +shared memory region in host physical address space, a size, and a
> guest
> > +physical address, as the target address of the mapping. The number of
> cells
> > +for the host address (and size) is the same as the guest 
> > pseudo-physical
> > +address and they are inherited from the parent node.
> 
> Sorry for jump in the discussion late. But as this is going to be a stable 
> ABI, I
> would to make sure the interface is going to be easily extendable.
> 
> AFAIU, with your proposal the host physical address is mandatory. I would
> expect that some user may want to share memory but don't care about the
> exact location in memory. So I think it would be good to make it optional in
> the binding.
> 
> I think this wants to be done now because it would be difficult to change the
> binding afterwards (the host physical address is the first set of cells).
> 
> The Xen doesn't need to handle the optional case.
> 

Sure, I'll make "the host physical address" optional here, and right now, with 
no actual
code implementation. I'll make up it later in free time~

The user case you mentioned here is that we let xen to allocate an arbitrary 
static shared
memory region, so size and guest physical address are still mandatory, right?
 
> [...]
> 
> > diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig index
> > be9eff0141..7321f47c0f 100644
> > --- a/xen/arch/arm/Kconfig
> > +++ b/xen/arch/arm/Kconfig
> > @@ -139,6 +139,12 @@ config TEE
> >
> >   source "arch/arm/tee/Kconfig"
> >
> > +config STATIC_SHM
> > +   bool "Statically shared memory on a dom0less system"

RE: [PATCH v7 7/9] xen/arm: unpopulate memory when domain is static

2022-06-28 Thread Penny Zheng
Hi Julien and Jan

> -Original Message-
> From: Julien Grall 
> Sent: Monday, June 27, 2022 6:19 PM
> To: Penny Zheng ; Jan Beulich 
> Cc: Wei Chen ; Andrew Cooper
> ; George Dunlap ;
> Stefano Stabellini ; Wei Liu ; xen-
> de...@lists.xenproject.org
> Subject: Re: [PATCH v7 7/9] xen/arm: unpopulate memory when domain is
> static
> 
> 
> 
> On 27/06/2022 11:03, Penny Zheng wrote:
> > Hi jan
> >
> >> -Original Message-
> > put_static_pages, that is, adding pages to the reserved list, is only
> > for freeing static pages on runtime. In static page initialization
> > stage, I also use free_statimem_pages, and in which stage, I think the
> > domain has not been constructed at all. So I prefer the freeing of
> > staticmem pages is split into two parts: free_staticmem_pages and
> > put_static_pages
> 
> AFAIU, all the pages would have to be allocated via
> acquire_domstatic_pages(). This call requires the domain to be allocated and
> setup for handling memory.
> 
> Therefore, I think the split is unnecessary. This would also have the
> advantage to remove one loop. Admittly, this is not important when the
> order 0, but it would become a problem for larger order (you may have to
> pull the struct page_info multiple time in the cache).
> 

How about this:
I create a new func free_domstatic_page, and it will be like:
"
static void free_domstatic_page(struct domain *d, struct page_info *page)
{
unsigned int i;
bool need_scrub;

/* NB. May recursively lock from relinquish_memory(). */
spin_lock_recursive(&d->page_alloc_lock);

arch_free_heap_page(d, page);

/*
 * Normally we expect a domain to clear pages before freeing them,
 * if it cares about the secrecy of their contents. However, after
 * a domain has died we assume responsibility for erasure. We do
 * scrub regardless if option scrub_domheap is set.
 */
need_scrub = d->is_dying || scrub_debug || opt_scrub_domheap;

free_staticmem_pages(page, 1, need_scrub);

/* Add page on the resv_page_list *after* it has been freed. */
put_static_page(d, page);

drop_dom_ref = !domain_adjust_tot_pages(d, -1);

spin_unlock_recursive(&d->page_alloc_lock);

if ( drop_dom_ref )
put_domain(d);
}
"

In free_domheap_pages, we just call free_domstatic_page:

"
@@ -2430,6 +2430,9 @@ void free_domheap_pages(struct page_info *pg, unsigned 
int order)

 ASSERT_ALLOC_CONTEXT();

+if ( unlikely(pg->count_info & PGC_static) )
+return free_domstatic_page(d, pg);
+
 if ( unlikely(is_xen_heap_page(pg)) )
 {
 /* NB. May recursively lock from relinquish_memory(). */
@@ -2673,6 +2676,38 @@ void free_staticmem_pages(struct page_info *pg, unsigned 
long nr_mfns,
"

Then the split could be avoided and we could save the loop as much as possible.
Any suggestion? 

> Cheers,
> 
> --
> Julien Grall


RE: [PATCH v7 9/9] xen: retrieve reserved pages on populate_physmap

2022-06-27 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Saturday, June 25, 2022 3:51 AM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Andrew Cooper
> ; George Dunlap ;
> Jan Beulich ; Stefano Stabellini ;
> Wei Liu 
> Subject: Re: [PATCH v7 9/9] xen: retrieve reserved pages on
> populate_physmap
> 
> Hi Penny,
> 
> On 20/06/2022 03:44, Penny Zheng wrote:
> > When a static domain populates memory through populate_physmap at
> > runtime, it shall retrieve reserved pages from resv_page_list to make
> > sure that guest RAM is still restricted in statically configured memory
> regions.
> > This commit also introduces a new helper acquire_reserved_page to make
> it work.
> >
> > Signed-off-by: Penny Zheng 
> > ---
> > v7 changes:
> > - remove the lock, since we add the page to rsv_page_list after it has
> > been totally freed.
> 
> Hmmm... Adding the page after it has been totally freed doesn't mean you
> can get away with the lock. AFAICT you can still have concurrent free/allocate
> that could modify the list.
> 
> Therefore if you add/remove pages without the list, you would end up to
> corrupt the list.
> 
> If you disagree, then please point out which lock (or mechanism) will prevent
> concurrent access.
> 

Ok. Combined with the last serie comments, actually, you suggest that we need 
to add
two locks, right?

One is the lock for concurrent free/allocation on page_info, and we will use
heap_lock, one stays in free_staticmem_pages, one stays in its reversed function
prepare_staticmem_pages.

The other is for concurrent free/allocation on resv_page_list, and we will use
d->page_alloc_lock tp guard it. One stays in put_static_page, and another
stays in reversed function acquire_reserved_page.

> Cheers,
> 
> --
> Julien Grall


RE: [PATCH v7 7/9] xen/arm: unpopulate memory when domain is static

2022-06-27 Thread Penny Zheng
Hi jan

> -Original Message-
> From: Jan Beulich 
> Sent: Wednesday, June 22, 2022 5:24 PM
> To: Penny Zheng 
> Cc: Wei Chen ; Andrew Cooper
> ; George Dunlap ;
> Julien Grall ; Stefano Stabellini ;
> Wei Liu ; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v7 7/9] xen/arm: unpopulate memory when domain is
> static
> 
> On 20.06.2022 04:44, Penny Zheng wrote:
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -2498,6 +2498,10 @@ void free_domheap_pages(struct page_info *pg,
> unsigned int order)
> >  }
> >
> >  free_heap_pages(pg, order, scrub);
> > +
> > +/* Add page on the resv_page_list *after* it has been freed. */
> > +if ( unlikely(pg->count_info & PGC_static) )
> > +put_static_pages(d, pg, order);
> 
> Unless I'm overlooking something the list addition done there / ...
> 
> > --- a/xen/include/xen/mm.h
> > +++ b/xen/include/xen/mm.h
> > @@ -90,6 +90,15 @@ void free_staticmem_pages(struct page_info *pg,
> unsigned long nr_mfns,
> >bool need_scrub);  int
> > acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int
> nr_mfns,
> >  unsigned int memflags);
> > +#ifdef CONFIG_STATIC_MEMORY
> > +#define put_static_pages(d, page, order) ({ \
> > +unsigned int i; \
> > +for ( i = 0; i < (1 << (order)); i++ )  \
> > +page_list_add_tail((pg) + i, &(d)->resv_page_list); \
> > +})
> 
> ... here isn't guarded by any lock. Feels like we've been there before.
> It's not really clear to me why the freeing of staticmem pages needs to be
> split like this - if it wasn't split, the list addition would "naturally" 
> occur with
> the lock held, I think.

Reminded by you and Julien, I need to add a lock for 
operations(free/allocation) on
resv_page_list, I'll guard the put_static_pages with d->page_alloc_lock. And 
bring
back the lock in acquire_reserved_page.

put_static_pages, that is, adding pages to the reserved list, is only for 
freeing static
pages on runtime. In static page initialization stage, I also use 
free_statimem_pages,
and in which stage, I think the domain has not been constructed at all. So I 
prefer
the freeing of staticmem pages is split into two parts: free_staticmem_pages and
put_static_pages 

> 
> Furthermore careful with the local variable name used here. Consider what
> would happen with an invocation of
> 
> put_static_pages(d, page, i);
> 
> To common approach is to suffix an underscore to the variable name.
> Such names are not supposed to be used outside of macros definitions, and
> hence there's then no potential for such a conflict.
> 

Understood!! I will change "unsigned int i" to "unsigned int _i";

> Finally I think you mean (1u << (order)) to be on the safe side against UB if
> order could ever reach 31. Then again - is "order" as a parameter needed
> here in the first place? Wasn't it that staticmem operations are limited to
> order-0 regions?

Yes, right now, the actual usage is limited to order-0, how about I add 
assertion here
and remove order parameter:

/* Add page on the resv_page_list *after* it has been freed. */
if ( unlikely(pg->count_info & PGC_static) )
{
ASSERT(!order);
put_static_pages(d, pg);
}

> Jan


[PATCH v5 8/8] xen/arm: enable statically shared memory on Dom0

2022-06-19 Thread Penny Zheng
From: Penny Zheng 

To add statically shared memory nodes in Dom0, user shall put according
static shared memory configuration under /chosen node.

This commit adds shm-processing function process_shm in construct_dom0
to enable statically shared memory on Dom0.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v5 change:
- no change
---
v4 change:
- no change
---
v3 change:
- no change
---
v2 change:
- no change
---
 xen/arch/arm/domain_build.c | 14 ++
 1 file changed, 14 insertions(+)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 4d62440a0e..b57c60f411 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -2658,6 +2658,11 @@ static int __init handle_node(struct domain *d, struct 
kernel_info *kinfo,
 if ( res )
 return res;
 }
+
+res = make_resv_memory_node(d, kinfo->fdt, addrcells, sizecells,
+&kinfo->shm_mem);
+if ( res )
+return res;
 }
 
 res = fdt_end_node(kinfo->fdt);
@@ -3730,6 +3735,9 @@ static int __init construct_dom0(struct domain *d)
 {
 struct kernel_info kinfo = {};
 int rc;
+#ifdef CONFIG_STATIC_SHM
+const struct dt_device_node *chosen = dt_find_node_by_path("/chosen");
+#endif
 
 /* Sanity! */
 BUG_ON(d->domain_id != 0);
@@ -3764,6 +3772,12 @@ static int __init construct_dom0(struct domain *d)
 allocate_memory_11(d, &kinfo);
 find_gnttab_region(d, &kinfo);
 
+#ifdef CONFIG_STATIC_SHM
+rc = process_shm(d, &kinfo, chosen);
+if ( rc < 0 )
+return rc;
+#endif
+
 /* Map extra GIC MMIO, irqs and other hw stuffs to dom0. */
 rc = gic_map_hwdom_extra_mappings(d);
 if ( rc < 0 )
-- 
2.25.1




[PATCH v5 7/8] xen/arm: create shared memory nodes in guest device tree

2022-06-19 Thread Penny Zheng
We expose the shared memory to the domU using the "xen,shared-memory-v1"
reserved-memory binding. See
Documentation/devicetree/bindings/reserved-memory/xen,shared-memory.txt
in Linux for the corresponding device tree binding.

To save the cost of re-parsing shared memory device tree configuration when
creating shared memory nodes in guest device tree, this commit adds new field
"shm_mem" to store shm-info per domain.

For each shared memory region, a range is exposed under
the /reserved-memory node as a child node. Each range sub-node is
named xen-shmem@ and has the following properties:
- compatible:
compatible = "xen,shared-memory-v1"
- reg:
the base guest physical address and size of the shared memory region
- xen,id:
a string that identifies the shared memory region.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v5 change:
- no change
---
v4 change:
- no change
---
v3 change:
- move field "shm_mem" to kernel_info
---
v2 change:
- using xzalloc
- shm_id should be uint8_t
- make reg a local variable
- add #address-cells and #size-cells properties
- fix alignment
---
 xen/arch/arm/domain_build.c   | 143 +-
 xen/arch/arm/include/asm/kernel.h |   1 +
 xen/arch/arm/include/asm/setup.h  |   1 +
 3 files changed, 143 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 1584e6c2ce..4d62440a0e 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -900,7 +900,22 @@ static int __init allocate_shared_memory(struct domain *d,
 return ret;
 }
 
-static int __init process_shm(struct domain *d,
+static int __init append_shm_bank_to_domain(struct kernel_info *kinfo,
+paddr_t start, paddr_t size,
+u32 shm_id)
+{
+if ( (kinfo->shm_mem.nr_banks + 1) > NR_MEM_BANKS )
+return -ENOMEM;
+
+kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].start = start;
+kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].size = size;
+kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].shm_id = shm_id;
+kinfo->shm_mem.nr_banks++;
+
+return 0;
+}
+
+static int __init process_shm(struct domain *d, struct kernel_info *kinfo,
   const struct dt_device_node *node)
 {
 struct dt_device_node *shm_node;
@@ -971,6 +986,14 @@ static int __init process_shm(struct domain *d,
 if ( ret )
 return ret;
 }
+
+/*
+ * Record static shared memory region info for later setting
+ * up shm-node in guest device tree.
+ */
+ret = append_shm_bank_to_domain(kinfo, gbase, psize, shm_id);
+if ( ret )
+return ret;
 }
 
 return 0;
@@ -1301,6 +1324,117 @@ static int __init make_memory_node(const struct domain 
*d,
 return res;
 }
 
+#ifdef CONFIG_STATIC_SHM
+static int __init make_shm_memory_node(const struct domain *d,
+   void *fdt,
+   int addrcells, int sizecells,
+   struct meminfo *mem)
+{
+unsigned long i = 0;
+int res = 0;
+
+if ( mem->nr_banks == 0 )
+return -ENOENT;
+
+/*
+ * For each shared memory region, a range is exposed under
+ * the /reserved-memory node as a child node. Each range sub-node is
+ * named xen-shmem@.
+ */
+dt_dprintk("Create xen-shmem node\n");
+
+for ( ; i < mem->nr_banks; i++ )
+{
+uint64_t start = mem->bank[i].start;
+uint64_t size = mem->bank[i].size;
+uint8_t shm_id = mem->bank[i].shm_id;
+/* Placeholder for xen-shmem@ + a 64-bit number + \0 */
+char buf[27];
+const char compat[] = "xen,shared-memory-v1";
+__be32 reg[4];
+__be32 *cells;
+unsigned int len = (addrcells + sizecells) * sizeof(__be32);
+
+snprintf(buf, sizeof(buf), "xen-shmem@%"PRIx64, mem->bank[i].start);
+res = fdt_begin_node(fdt, buf);
+if ( res )
+return res;
+
+res = fdt_property(fdt, "compatible", compat, sizeof(compat));
+if ( res )
+return res;
+
+cells = reg;
+dt_child_set_range(&cells, addrcells, sizecells, start, size);
+
+res = fdt_property(fdt, "reg", reg, len);
+if ( res )
+return res;
+
+dt_dprintk("Shared memory bank %lu: %#"PRIx64"->%#"PRIx64"\n",
+   i, start, start + size);
+
+res = fdt_property_cell(fdt, "xen,id", shm_id);
+if ( res )
+return res;
+
+res = fdt_end_node(fdt);
+if ( res )
+return res;
+}
+
+return res;
+}
+#els

[PATCH v5 6/8] xen/arm: set up shared memory foreign mapping for borrower domain

2022-06-19 Thread Penny Zheng
This commit sets up shared memory foreign mapping for borrower domain.

If owner domain is the default dom_io, all shared domain are treated as
borrower domain.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v5 change:
- no change
---
v4 changes:
- no change
---
v3 change:
- use map_regions_p2mt instead
---
v2 change:
- remove guest_physmap_add_shm, since for borrower domain, we only
do P2M foreign memory mapping now.
---
 xen/arch/arm/domain_build.c | 9 +
 1 file changed, 9 insertions(+)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 650d18f5ef..1584e6c2ce 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -962,6 +962,15 @@ static int __init process_shm(struct domain *d,
 if ( ret )
 return ret;
 }
+
+if ( owner_dom_io || (strcmp(role_str, "borrower") == 0) )
+{
+/* Set up P2M foreign mapping for borrower domain. */
+ret = map_regions_p2mt(d, _gfn(PFN_UP(gbase)), PFN_DOWN(psize),
+   _mfn(PFN_UP(pbase)), p2m_map_foreign_rw);
+if ( ret )
+return ret;
+}
 }
 
 return 0;
-- 
2.25.1




[PATCH v5 5/8] xen/arm: Add additional reference to owner domain when the owner is allocated

2022-06-19 Thread Penny Zheng
Borrower domain will fail to get a page ref using the owner domain
during allocation, when the owner is created after borrower.

So here, we decide to get and add the right amount of reference, which
is the number of borrowers, when the owner is allocated.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v5 change:
- no change
---
v4 changes:
- no change
---
v3 change:
- printk rather than dprintk since it is a serious error
---
v2 change:
- new commit
---
 xen/arch/arm/domain_build.c | 62 +
 1 file changed, 62 insertions(+)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index d4fd64e2bd..650d18f5ef 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -799,6 +799,34 @@ static mfn_t __init acquire_shared_memory_bank(struct 
domain *d,
 
 }
 
+static int __init acquire_nr_borrower_domain(struct domain *d,
+ paddr_t pbase, paddr_t psize,
+ unsigned long *nr_borrowers)
+{
+unsigned long bank;
+
+/* Iterate reserved memory to find requested shm bank. */
+for ( bank = 0 ; bank < bootinfo.reserved_mem.nr_banks; bank++ )
+{
+paddr_t bank_start = bootinfo.reserved_mem.bank[bank].start;
+paddr_t bank_size = bootinfo.reserved_mem.bank[bank].size;
+
+if ( pbase == bank_start && psize == bank_size )
+break;
+}
+
+if ( bank == bootinfo.reserved_mem.nr_banks )
+return -ENOENT;
+
+if ( d == dom_io )
+*nr_borrowers = bootinfo.reserved_mem.bank[bank].nr_shm_domain;
+else
+/* Exclude the owner domain itself. */
+*nr_borrowers = bootinfo.reserved_mem.bank[bank].nr_shm_domain - 1;
+
+return 0;
+}
+
 /*
  * Func allocate_shared_memory is supposed to be only called
  * from the owner.
@@ -810,6 +838,8 @@ static int __init allocate_shared_memory(struct domain *d,
 {
 mfn_t smfn;
 int ret = 0;
+unsigned long nr_pages, nr_borrowers, i;
+struct page_info *page;
 
 dprintk(XENLOG_INFO,
 "Allocate static shared memory BANK %#"PRIpaddr"-%#"PRIpaddr".\n",
@@ -824,6 +854,7 @@ static int __init allocate_shared_memory(struct domain *d,
  * DOMID_IO is the domain, like DOMID_XEN, that is not auto-translated.
  * It sees RAM 1:1 and we do not need to create P2M mapping for it
  */
+nr_pages = PFN_DOWN(psize);
 if ( d != dom_io )
 {
 ret = guest_physmap_add_pages(d, gaddr_to_gfn(gbase), smfn, 
PFN_DOWN(psize));
@@ -835,6 +866,37 @@ static int __init allocate_shared_memory(struct domain *d,
 }
 }
 
+/*
+ * Get the right amount of references per page, which is the number of
+ * borrow domains.
+ */
+ret = acquire_nr_borrower_domain(d, pbase, psize, &nr_borrowers);
+if ( ret )
+return ret;
+
+/*
+ * Instead of let borrower domain get a page ref, we add as many
+ * additional reference as the number of borrowers when the owner
+ * is allocated, since there is a chance that owner is created
+ * after borrower.
+ */
+page = mfn_to_page(smfn);
+for ( i = 0; i < nr_pages; i++ )
+{
+if ( !get_page_nr(page + i, d, nr_borrowers) )
+{
+printk(XENLOG_ERR
+   "Failed to add %lu references to page %"PRI_mfn".\n",
+   nr_borrowers, mfn_x(smfn) + i);
+goto fail;
+}
+}
+
+return 0;
+
+ fail:
+while ( --i >= 0 )
+put_page_nr(page + i, nr_borrowers);
 return ret;
 }
 
-- 
2.25.1




[PATCH v5 4/8] xen/arm: introduce put_page_nr and get_page_nr

2022-06-19 Thread Penny Zheng
Later, we need to add the right amount of references, which should be
the number of borrower domains, to the owner domain. Since we only have
get_page() to increment the page reference by 1, a loop is needed per
page, which is inefficient and time-consuming.

To save the loop time, this commit introduces a set of new helpers
put_page_nr() and get_page_nr() to increment/drop the page reference by nr.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v5 change:
- no change
---
v4 changes:
- fix the assert about checking overflow to make sure that the right equation
return is at least equal to nr
- simplify the assert about checking the underflow
---
v3 changes:
- check overflow with "n"
- remove spurious change
- bring back the check that we enter the loop only when count_info is
greater than 0
---
v2 change:
- new commit
---
 xen/arch/arm/include/asm/mm.h |  4 
 xen/arch/arm/mm.c | 42 +++
 2 files changed, 37 insertions(+), 9 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 045a8ba4bb..8384eac2c8 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -343,6 +343,10 @@ void free_init_memory(void);
 int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
   unsigned int order);
 
+extern bool get_page_nr(struct page_info *page, const struct domain *domain,
+unsigned long nr);
+extern void put_page_nr(struct page_info *page, unsigned long nr);
+
 extern void put_page_type(struct page_info *page);
 static inline void put_page_and_type(struct page_info *page)
 {
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index be37176a47..79b7d8de56 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1587,21 +1587,29 @@ long arch_memory_op(int op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 return 0;
 }
 
-struct domain *page_get_owner_and_reference(struct page_info *page)
+static struct domain *page_get_owner_and_nr_reference(struct page_info *page,
+  unsigned long nr)
 {
 unsigned long x, y = page->count_info;
 struct domain *owner;
 
+/* Restrict nr to avoid "double" overflow */
+if ( nr >= PGC_count_mask )
+{
+ASSERT_UNREACHABLE();
+return NULL;
+}
+
 do {
 x = y;
 /*
  * Count ==  0: Page is not allocated, so we cannot take a reference.
  * Count == -1: Reference count would wrap, which is invalid.
  */
-if ( unlikely(((x + 1) & PGC_count_mask) <= 1) )
+if ( unlikely(((x + nr) & PGC_count_mask) <= nr) )
 return NULL;
 }
-while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
+while ( (y = cmpxchg(&page->count_info, x, x + nr)) != x );
 
 owner = page_get_owner(page);
 ASSERT(owner);
@@ -1609,14 +1617,19 @@ struct domain *page_get_owner_and_reference(struct 
page_info *page)
 return owner;
 }
 
-void put_page(struct page_info *page)
+struct domain *page_get_owner_and_reference(struct page_info *page)
+{
+return page_get_owner_and_nr_reference(page, 1);
+}
+
+void put_page_nr(struct page_info *page, unsigned long nr)
 {
 unsigned long nx, x, y = page->count_info;
 
 do {
-ASSERT((y & PGC_count_mask) != 0);
+ASSERT((y & PGC_count_mask) >= nr);
 x  = y;
-nx = x - 1;
+nx = x - nr;
 }
 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
 
@@ -1626,19 +1639,30 @@ void put_page(struct page_info *page)
 }
 }
 
-bool get_page(struct page_info *page, const struct domain *domain)
+void put_page(struct page_info *page)
+{
+put_page_nr(page, 1);
+}
+
+bool get_page_nr(struct page_info *page, const struct domain *domain,
+ unsigned long nr)
 {
-const struct domain *owner = page_get_owner_and_reference(page);
+const struct domain *owner = page_get_owner_and_nr_reference(page, nr);
 
 if ( likely(owner == domain) )
 return true;
 
 if ( owner != NULL )
-put_page(page);
+put_page_nr(page, nr);
 
 return false;
 }
 
+bool get_page(struct page_info *page, const struct domain *domain)
+{
+return get_page_nr(page, domain, 1);
+}
+
 /* Common code requires get_page_type and put_page_type.
  * We don't care about typecounts so we just do the minimum to make it
  * happy. */
-- 
2.25.1




[PATCH v5 3/8] xen/arm: allocate static shared memory to a specific owner domain

2022-06-19 Thread Penny Zheng
If owner property is defined, then owner domain of a static shared memory
region is not the default dom_io anymore, but a specific domain.

This commit implements allocating static shared memory to a specific domain
when owner property is defined.

Coding flow for dealing borrower domain will be introduced later in the
following commits.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v5 change:
- no change
---
v4 change:
- no changes
---
v3 change:
- simplify the code since o_gbase is not used if the domain is dom_io
---
v2 change:
- P2M mapping is restricted to normal domain
- in-code comment fix
---
 xen/arch/arm/domain_build.c | 44 +++--
 1 file changed, 33 insertions(+), 11 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 91a5ace851..d4fd64e2bd 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -805,9 +805,11 @@ static mfn_t __init acquire_shared_memory_bank(struct 
domain *d,
  */
 static int __init allocate_shared_memory(struct domain *d,
  u32 addr_cells, u32 size_cells,
- paddr_t pbase, paddr_t psize)
+ paddr_t pbase, paddr_t psize,
+ paddr_t gbase)
 {
 mfn_t smfn;
+int ret = 0;
 
 dprintk(XENLOG_INFO,
 "Allocate static shared memory BANK %#"PRIpaddr"-%#"PRIpaddr".\n",
@@ -822,8 +824,18 @@ static int __init allocate_shared_memory(struct domain *d,
  * DOMID_IO is the domain, like DOMID_XEN, that is not auto-translated.
  * It sees RAM 1:1 and we do not need to create P2M mapping for it
  */
-ASSERT(d == dom_io);
-return 0;
+if ( d != dom_io )
+{
+ret = guest_physmap_add_pages(d, gaddr_to_gfn(gbase), smfn, 
PFN_DOWN(psize));
+if ( ret )
+{
+printk(XENLOG_ERR
+   "Failed to map shared memory to %pd.\n", d);
+return ret;
+}
+}
+
+return ret;
 }
 
 static int __init process_shm(struct domain *d,
@@ -836,6 +848,8 @@ static int __init process_shm(struct domain *d,
 u32 shm_id;
 u32 addr_cells, size_cells;
 paddr_t gbase, pbase, psize;
+const char *role_str;
+bool owner_dom_io = true;
 
 dt_for_each_child_node(node, shm_node)
 {
@@ -862,19 +876,27 @@ static int __init process_shm(struct domain *d,
 ASSERT(IS_ALIGNED(pbase, PAGE_SIZE) && IS_ALIGNED(psize, PAGE_SIZE));
 gbase = dt_read_number(cells, addr_cells);
 
-/* TODO: Consider owner domain is not the default dom_io. */
+/*
+ * "role" property is optional and if it is defined explicitly,
+ * then the owner domain is not the default "dom_io" domain.
+ */
+if ( dt_property_read_string(shm_node, "role", &role_str) == 0 )
+owner_dom_io = false;
+
 /*
  * Per static shared memory region could be shared between multiple
  * domains.
- * In case re-allocating the same shared memory region, we check
- * if it is already allocated to the default owner dom_io before
- * the actual allocation.
+ * So when owner domain is the default dom_io, in case re-allocating
+ * the same shared memory region, we check if it is already allocated
+ * to the default owner dom_io before the actual allocation.
  */
-if ( !is_shm_allocated_to_domio(pbase) )
+if ( (owner_dom_io && !is_shm_allocated_to_domio(pbase)) ||
+ (!owner_dom_io && strcmp(role_str, "owner") == 0) )
 {
-/* Allocate statically shared pages to the default owner dom_io. */
-ret = allocate_shared_memory(dom_io, addr_cells, size_cells,
- pbase, psize);
+/* Allocate statically shared pages to the owner domain. */
+ret = allocate_shared_memory(owner_dom_io ? dom_io : d,
+ addr_cells, size_cells,
+ pbase, psize, gbase);
 if ( ret )
 return ret;
 }
-- 
2.25.1




[PATCH v5 2/8] xen/arm: allocate static shared memory to the default owner dom_io

2022-06-19 Thread Penny Zheng
From: Penny Zheng 

This commit introduces process_shm to cope with static shared memory in
domain construction.

DOMID_IO will be the default owner of memory pre-shared among multiple domains
at boot time, when no explicit owner is specified.

This commit only considers allocating static shared memory to dom_io
when owner domain is not explicitly defined in device tree, all the left,
including the "borrower" code path, the "explicit owner" code path, shall
be introduced later in the following patches.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v5 change:
- refine in-code comment
---
v4 change:
- no changes
---
v3 change:
- refine in-code comment
---
v2 change:
- instead of introducing a new system domain, reuse the existing dom_io
- make dom_io a non-auto-translated domain, then no need to create P2M
for it
- change dom_io definition and make it wider to support static shm here too
- introduce is_shm_allocated_to_domio to check whether static shm is
allocated yet, instead of using shm_mask bitmap
- add in-code comment
---
 xen/arch/arm/domain_build.c | 132 +++-
 xen/common/domain.c |   3 +
 2 files changed, 134 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 7ddd16c26d..91a5ace851 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -527,6 +527,10 @@ static bool __init append_static_memory_to_bank(struct 
domain *d,
 return true;
 }
 
+/*
+ * If cell is NULL, pbase and psize should hold valid values.
+ * Otherwise, cell will be populated together with pbase and psize.
+ */
 static mfn_t __init acquire_static_memory_bank(struct domain *d,
const __be32 **cell,
u32 addr_cells, u32 size_cells,
@@ -535,7 +539,8 @@ static mfn_t __init acquire_static_memory_bank(struct 
domain *d,
 mfn_t smfn;
 int res;
 
-device_tree_get_reg(cell, addr_cells, size_cells, pbase, psize);
+if ( cell )
+device_tree_get_reg(cell, addr_cells, size_cells, pbase, psize);
 ASSERT(IS_ALIGNED(*pbase, PAGE_SIZE) && IS_ALIGNED(*psize, PAGE_SIZE));
 if ( PFN_DOWN(*psize) > UINT_MAX )
 {
@@ -759,6 +764,125 @@ static void __init assign_static_memory_11(struct domain 
*d,
 panic("Failed to assign requested static memory for direct-map domain 
%pd.",
   d);
 }
+
+#ifdef CONFIG_STATIC_SHM
+/*
+ * This function checks whether the static shared memory region is
+ * already allocated to dom_io.
+ */
+static bool __init is_shm_allocated_to_domio(paddr_t pbase)
+{
+struct page_info *page;
+
+page = maddr_to_page(pbase);
+ASSERT(page);
+
+if ( page_get_owner(page) == NULL )
+return false;
+
+ASSERT(page_get_owner(page) == dom_io);
+return true;
+}
+
+static mfn_t __init acquire_shared_memory_bank(struct domain *d,
+   u32 addr_cells, u32 size_cells,
+   paddr_t *pbase, paddr_t *psize)
+{
+/*
+ * Pages of statically shared memory shall be included
+ * in domain_tot_pages().
+ */
+d->max_pages += PFN_DOWN(*psize);
+
+return acquire_static_memory_bank(d, NULL, addr_cells, size_cells,
+  pbase, psize);
+
+}
+
+/*
+ * Func allocate_shared_memory is supposed to be only called
+ * from the owner.
+ */
+static int __init allocate_shared_memory(struct domain *d,
+ u32 addr_cells, u32 size_cells,
+ paddr_t pbase, paddr_t psize)
+{
+mfn_t smfn;
+
+dprintk(XENLOG_INFO,
+"Allocate static shared memory BANK %#"PRIpaddr"-%#"PRIpaddr".\n",
+pbase, pbase + psize);
+
+smfn = acquire_shared_memory_bank(d, addr_cells, size_cells, &pbase,
+  &psize);
+if ( mfn_eq(smfn, INVALID_MFN) )
+return -EINVAL;
+
+/*
+ * DOMID_IO is the domain, like DOMID_XEN, that is not auto-translated.
+ * It sees RAM 1:1 and we do not need to create P2M mapping for it
+ */
+ASSERT(d == dom_io);
+return 0;
+}
+
+static int __init process_shm(struct domain *d,
+  const struct dt_device_node *node)
+{
+struct dt_device_node *shm_node;
+int ret = 0;
+const struct dt_property *prop;
+const __be32 *cells;
+u32 shm_id;
+u32 addr_cells, size_cells;
+paddr_t gbase, pbase, psize;
+
+dt_for_each_child_node(node, shm_node)
+{
+if ( !dt_device_is_compatible(shm_node, "xen,domain-shared-memory-v1") 
)
+continue;
+
+if ( !dt_property_read_u32(shm_node, "xen,shm-id", &shm_id) )
+{
+printk("Shared memory node does not provide \"x

[PATCH v5 0/8] static shared memory on dom0less system

2022-06-19 Thread Penny Zheng
In safety-critical environment, it is not considered safe to
dynamically change important configurations at runtime. Everything
should be statically defined and statically verified.

In this case, if the system configuration knows a priori that there are
only 2 VMs and they need to communicate over shared memory, it is safer
to pre-configure the shared memory at build time rather than let the VMs
attempt to share memory at runtime. And it is faster too.

Furthermore, on dom0less system, the legacy way to build up communication
channels between domains, like grant table, are normally absent there.

So this patch serie introduces a set of static shared memory device tree nodes
to allow users to statically set up shared memory on dom0less system, enabling
domains to do shm-based communication.

The only way to trigger this static shared memory configuration should
be via device tree, which is at the same level as the XSM rules.

It was inspired by the patch serie of ["xl/libxl-based shared mem](
https://marc.info/?l=xen-devel&m=154404821731186ory";).

Looking into related [design link](
https://lore.kernel.org/all/a50d9fde-1d06-7cda-2779-9eea9e1c0...@xen.org/T/)
for more details.

Penny Zheng (8):
  xen/arm: introduce static shared memory
  xen/arm: allocate static shared memory to the default owner dom_io
  xen/arm: allocate static shared memory to a specific owner domain
  xen/arm: introduce put_page_nr and get_page_nr
  xen/arm: Add additional reference to owner domain when the owner is
allocated
  xen/arm: set up shared memory foreign mapping for borrower domain
  xen/arm: create shared memory nodes in guest device tree
  xen/arm: enable statically shared memory on Dom0

 docs/misc/arm/device-tree/booting.txt | 120 
 xen/arch/arm/Kconfig  |   6 +
 xen/arch/arm/bootfdt.c|  68 +
 xen/arch/arm/domain_build.c   | 378 +-
 xen/arch/arm/include/asm/kernel.h |   1 +
 xen/arch/arm/include/asm/mm.h |   4 +
 xen/arch/arm/include/asm/setup.h  |   4 +
 xen/arch/arm/mm.c |  42 ++-
 xen/common/domain.c   |   3 +
 9 files changed, 616 insertions(+), 10 deletions(-)

-- 
2.25.1




[PATCH v5 1/8] xen/arm: introduce static shared memory

2022-06-19 Thread Penny Zheng
From: Penny Zheng 

This patch serie introduces a new feature: setting up static
shared memory on a dom0less system, through device tree configuration.

This commit parses shared memory node at boot-time, and reserve it in
bootinfo.reserved_mem to avoid other use.

This commits proposes a new Kconfig CONFIG_STATIC_SHM to wrap
static-shm-related codes, and this option depends on static memory(
CONFIG_STATIC_MEMORY). That's because that later we want to reuse a few
helpers, guarded with CONFIG_STATIC_MEMORY, like acquire_staticmem_pages, etc,
on static shared memory.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v5 change:
- no change
---
v4 change:
- nit fix on doc
---
v3 change:
- make nr_shm_domain unsigned int
---
v2 change:
- document refinement
- remove bitmap and use the iteration to check
- add a new field nr_shm_domain to keep the number of shared domain
---
 docs/misc/arm/device-tree/booting.txt | 120 ++
 xen/arch/arm/Kconfig  |   6 ++
 xen/arch/arm/bootfdt.c|  68 +++
 xen/arch/arm/include/asm/setup.h  |   3 +
 4 files changed, 197 insertions(+)

diff --git a/docs/misc/arm/device-tree/booting.txt 
b/docs/misc/arm/device-tree/booting.txt
index 98253414b8..6467bc5a28 100644
--- a/docs/misc/arm/device-tree/booting.txt
+++ b/docs/misc/arm/device-tree/booting.txt
@@ -378,3 +378,123 @@ device-tree:
 
 This will reserve a 512MB region starting at the host physical address
 0x3000 to be exclusively used by DomU1.
+
+Static Shared Memory
+
+
+The static shared memory device tree nodes allow users to statically set up
+shared memory on dom0less system, enabling domains to do shm-based
+communication.
+
+- compatible
+
+"xen,domain-shared-memory-v1"
+
+- xen,shm-id
+
+An 8-bit integer that represents the unique identifier of the shared memory
+region. The maximum identifier shall be "xen,shm-id = <0xff>".
+
+- xen,shared-mem
+
+An array takes a physical address, which is the base address of the
+shared memory region in host physical address space, a size, and a guest
+physical address, as the target address of the mapping. The number of cells
+for the host address (and size) is the same as the guest pseudo-physical
+address and they are inherited from the parent node.
+
+- role (Optional)
+
+A string property specifying the ownership of a shared memory region,
+the value must be one of the following: "owner", or "borrower"
+A shared memory region could be explicitly backed by one domain, which is
+called "owner domain", and all the other domains who are also sharing
+this region are called "borrower domain".
+If not specified, the default value is "borrower" and owner is
+"dom_shared", a system domain.
+
+As an example:
+
+chosen {
+#address-cells = <0x1>;
+#size-cells = <0x1>;
+xen,xen-bootargs = "console=dtuart dtuart=serial0 bootscrub=0";
+
+..
+
+/* this is for Dom0 */
+dom0-shared-mem@1000 {
+compatible = "xen,domain-shared-memory-v1";
+role = "owner";
+xen,shm-id = <0x0>;
+xen,shared-mem = <0x1000 0x1000 0x1000>;
+}
+
+domU1 {
+compatible = "xen,domain";
+#address-cells = <0x1>;
+#size-cells = <0x1>;
+memory = <0 131072>;
+cpus = <2>;
+vpl011;
+
+/*
+ * shared memory region identified as 0x0(xen,shm-id = <0x0>)
+ * is shared between Dom0 and DomU1.
+ */
+domU1-shared-mem@1000 {
+compatible = "xen,domain-shared-memory-v1";
+role = "borrower";
+xen,shm-id = <0x0>;
+xen,shared-mem = <0x1000 0x1000 0x5000>;
+}
+
+/*
+ * shared memory region identified as 0x1(xen,shm-id = <0x1>)
+ * is shared between DomU1 and DomU2.
+ */
+domU1-shared-mem@5000 {
+compatible = "xen,domain-shared-memory-v1";
+xen,shm-id = <0x1>;
+xen,shared-mem = <0x5000 0x2000 0x6000>;
+}
+
+..
+
+};
+
+domU2 {
+compatible = "xen,domain";
+#address-cells = <0x1>;
+#size-cells = <0x1>;
+memory = <0 65536>;
+cpus = <1>;
+
+/*
+ * shared memory region identified as 0x1(xen,shm-id = <0x1>)
+ * is shared between domU1 and domU2.
+ */
+domU2-shared-mem@5000 {
+compatible = "xen,domain-shared-memory-v1";
+xen,shm-id = <0x1>;
+xen,shared-mem = <0x5000 0x2000 0x7000>;
+}
+
+

[PATCH v7 6/9] xen/arm: introduce CDF_staticmem

2022-06-19 Thread Penny Zheng
In order to have an easy and quick way to find out whether this domain memory
is statically configured, this commit introduces a new flag CDF_staticmem and a
new helper is_domain_using_staticmem() to tell.

Signed-off-by: Penny Zheng 
---
v7 changes:
- IS_ENABLED(CONFIG_STATIC_MEMORY) would not be needed anymore
---
v6 changes:
- move non-zero is_domain_using_staticmem() from ARM header to common
header
---
v5 changes:
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
---
v4 changes:
- no changes
---
v3 changes:
- change name from "is_domain_static()" to "is_domain_using_staticmem"
---
v2 changes:
- change name from "is_domain_on_static_allocation" to "is_domain_static()"
---
 xen/arch/arm/domain_build.c |  5 -
 xen/include/xen/domain.h| 12 
 2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 7ddd16c26d..17cd886be8 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -3287,9 +3287,12 @@ void __init create_domUs(void)
 if ( !dt_device_is_compatible(node, "xen,domain") )
 continue;
 
+if ( dt_find_property(node, "xen,static-mem", NULL) )
+flags |= CDF_staticmem;
+
 if ( dt_property_read_bool(node, "direct-map") )
 {
-if ( !IS_ENABLED(CONFIG_STATIC_MEMORY) || !dt_find_property(node, 
"xen,static-mem", NULL) )
+if ( !(flags & CDF_staticmem) )
 panic("direct-map is not valid for domain %s without static 
allocation.\n",
   dt_node_name(node));
 
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 1c3c88a14d..cf34eddf6d 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -35,6 +35,18 @@ void arch_get_domain_info(const struct domain *d,
 /* Should domain memory be directly mapped? */
 #define CDF_directmap(1U << 1)
 #endif
+/* Is domain memory on static allocation? */
+#ifdef CONFIG_STATIC_MEMORY
+#define CDF_staticmem(1U << 2)
+#else
+#define CDF_staticmem0
+#endif
+
+#ifdef CONFIG_STATIC_MEMORY
+#define is_domain_using_staticmem(d) ((d)->cdf & CDF_staticmem)
+#else
+#define is_domain_using_staticmem(d) ((void)(d), false)
+#endif
 
 /*
  * Arch-specifics.
-- 
2.25.1




[PATCH v7 9/9] xen: retrieve reserved pages on populate_physmap

2022-06-19 Thread Penny Zheng
When a static domain populates memory through populate_physmap at runtime,
it shall retrieve reserved pages from resv_page_list to make sure that
guest RAM is still restricted in statically configured memory regions.
This commit also introduces a new helper acquire_reserved_page to make it work.

Signed-off-by: Penny Zheng 
---
v7 changes:
- remove the lock, since we add the page to rsv_page_list after it has
been totally freed.
---
v6 changes:
- drop the lock before returning
---
v5 changes:
- extract common codes for assigning pages into a helper assign_domstatic_pages
- refine commit message
- remove stub function acquire_reserved_page
- Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock
---
v4 changes:
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
---
v3 changes:
- move is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
---
 xen/common/memory.c | 23 ++
 xen/common/page_alloc.c | 68 ++---
 xen/include/xen/mm.h|  1 +
 3 files changed, 75 insertions(+), 17 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index f2d009843a..cb330ce877 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -245,6 +245,29 @@ static void populate_physmap(struct memop_args *a)
 
 mfn = _mfn(gpfn);
 }
+else if ( is_domain_using_staticmem(d) )
+{
+/*
+ * No easy way to guarantee the retrieved pages are contiguous,
+ * so forbid non-zero-order requests here.
+ */
+if ( a->extent_order != 0 )
+{
+gdprintk(XENLOG_WARNING,
+ "Cannot allocate static order-%u pages for static 
%pd\n",
+ a->extent_order, d);
+goto out;
+}
+
+mfn = acquire_reserved_page(d, a->memflags);
+if ( mfn_eq(mfn, INVALID_MFN) )
+{
+gdprintk(XENLOG_WARNING,
+ "%pd: failed to retrieve a reserved page\n",
+ d);
+goto out;
+}
+}
 else
 {
 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index fee396a92d..74628889ea 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2673,9 +2673,8 @@ void free_staticmem_pages(struct page_info *pg, unsigned 
long nr_mfns,
 spin_unlock(&heap_lock);
 }
 
-static bool __init prepare_staticmem_pages(struct page_info *pg,
-   unsigned long nr_mfns,
-   unsigned int memflags)
+static bool prepare_staticmem_pages(struct page_info *pg, unsigned long 
nr_mfns,
+unsigned int memflags)
 {
 bool need_tlbflush = false;
 uint32_t tlbflush_timestamp = 0;
@@ -2756,21 +2755,9 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 return pg;
 }
 
-/*
- * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
- * then assign them to one specific domain #d.
- */
-int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
-   unsigned int nr_mfns, unsigned int memflags)
+static int assign_domstatic_pages(struct domain *d, struct page_info *pg,
+  unsigned int nr_mfns, unsigned int memflags)
 {
-struct page_info *pg;
-
-ASSERT_ALLOC_CONTEXT();
-
-pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
-if ( !pg )
-return -ENOENT;
-
 if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
 {
 /*
@@ -2789,6 +2776,53 @@ int __init acquire_domstatic_pages(struct domain *d, 
mfn_t smfn,
 
 return 0;
 }
+
+/*
+ * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ */
+int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
+   unsigned int nr_mfns, unsigned int memflags)
+{
+struct page_info *pg;
+
+ASSERT_ALLOC_CONTEXT();
+
+pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
+if ( !pg )
+return -ENOENT;
+
+if ( assign_domstatic_pages(d, pg, nr_mfns, memflags) )
+return -EI

[PATCH v7 8/9] xen: introduce prepare_staticmem_pages

2022-06-19 Thread Penny Zheng
Later, we want to use acquire_domstatic_pages() for populating memory
for static domain on runtime, however, there are a lot of pointless work
(checking mfn_valid(), scrubbing the free part, cleaning the cache...)
considering we know the page is valid and belong to the guest.

This commit splits acquire_staticmem_pages() in two parts, and
introduces prepare_staticmem_pages to bypass all "pointless work".

Signed-off-by: Penny Zheng 
Acked-by: Jan Beulich 
---
v7 changes:
- no change
---
v6 changes:
- adapt to PGC_static
---
v5 changes:
- new commit
---
 xen/common/page_alloc.c | 61 -
 1 file changed, 36 insertions(+), 25 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7d223087c0..fee396a92d 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2673,26 +2673,13 @@ void free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 spin_unlock(&heap_lock);
 }
 
-/*
- * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
- * static memory.
- * This function needs to be reworked if used outside of boot.
- */
-static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
- unsigned long nr_mfns,
- unsigned int memflags)
+static bool __init prepare_staticmem_pages(struct page_info *pg,
+   unsigned long nr_mfns,
+   unsigned int memflags)
 {
 bool need_tlbflush = false;
 uint32_t tlbflush_timestamp = 0;
 unsigned long i;
-struct page_info *pg;
-
-ASSERT(nr_mfns);
-for ( i = 0; i < nr_mfns; i++ )
-if ( !mfn_valid(mfn_add(smfn, i)) )
-return NULL;
-
-pg = mfn_to_page(smfn);
 
 spin_lock(&heap_lock);
 
@@ -2703,7 +2690,7 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 {
 printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
-   i, mfn_x(smfn) + i,
+   i, mfn_x(page_to_mfn(pg)) + i,
pg[i].count_info, pg[i].tlbflush_timestamp);
 goto out_err;
 }
@@ -2727,6 +2714,38 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 if ( need_tlbflush )
 filtered_flush_tlb_mask(tlbflush_timestamp);
 
+return true;
+
+ out_err:
+while ( i-- )
+pg[i].count_info = PGC_static | PGC_state_free;
+
+spin_unlock(&heap_lock);
+
+return false;
+}
+
+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ * This function needs to be reworked if used outside of boot.
+ */
+static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
+ unsigned long nr_mfns,
+ unsigned int memflags)
+{
+unsigned long i;
+struct page_info *pg;
+
+ASSERT(nr_mfns);
+for ( i = 0; i < nr_mfns; i++ )
+if ( !mfn_valid(mfn_add(smfn, i)) )
+return NULL;
+
+pg = mfn_to_page(smfn);
+if ( !prepare_staticmem_pages(pg, nr_mfns, memflags) )
+return NULL;
+
 /*
  * Ensure cache and RAM are consistent for platforms where the guest
  * can control its own visibility of/through the cache.
@@ -2735,14 +2754,6 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
 
 return pg;
-
- out_err:
-while ( i-- )
-pg[i].count_info = PGC_static | PGC_state_free;
-
-spin_unlock(&heap_lock);
-
-return NULL;
 }
 
 /*
-- 
2.25.1




[PATCH v7 7/9] xen/arm: unpopulate memory when domain is static

2022-06-19 Thread Penny Zheng
Today when a domain unpopulates the memory on runtime, they will always
hand the memory back to the heap allocator. And it will be a problem if domain
is static.

Pages as guest RAM for static domain shall be reserved to only this domain
and not be used for any other purposes, so they shall never go back to heap
allocator.

This commit puts reserved pages on the new list resv_page_list only after
having taken them off the "normal" list, when the last ref dropped.

Signed-off-by: Penny Zheng 
---
v7 changes:
- Add page on the rsv_page_list *after* it has been freed
---
v6 changes:
- refine in-code comment
- move PGC_static !CONFIG_STATIC_MEMORY definition to common header
---
v5 changes:
- adapt this patch for PGC_staticmem
---
v4 changes:
- no changes
---
v3 changes:
- have page_list_del() just once out of the if()
- remove resv_pages counter
- make arch_free_heap_page be an expression, not a compound statement.
---
v2 changes:
- put reserved pages on resv_page_list after having taken them off
the "normal" list
---
 xen/common/domain.c | 4 
 xen/common/page_alloc.c | 4 
 xen/include/xen/mm.h| 9 +
 xen/include/xen/sched.h | 3 +++
 4 files changed, 20 insertions(+)

diff --git a/xen/common/domain.c b/xen/common/domain.c
index a3ef991bd1..a49574fa24 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -604,6 +604,10 @@ struct domain *domain_create(domid_t domid,
 INIT_PAGE_LIST_HEAD(&d->page_list);
 INIT_PAGE_LIST_HEAD(&d->extra_page_list);
 INIT_PAGE_LIST_HEAD(&d->xenpage_list);
+#ifdef CONFIG_STATIC_MEMORY
+INIT_PAGE_LIST_HEAD(&d->resv_page_list);
+#endif
+
 
 spin_lock_init(&d->node_affinity_lock);
 d->node_affinity = NODE_MASK_ALL;
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index d9253df270..7d223087c0 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2498,6 +2498,10 @@ void free_domheap_pages(struct page_info *pg, unsigned 
int order)
 }
 
 free_heap_pages(pg, order, scrub);
+
+/* Add page on the resv_page_list *after* it has been freed. */
+if ( unlikely(pg->count_info & PGC_static) )
+put_static_pages(d, pg, order);
 }
 
 if ( drop_dom_ref )
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 1c4ddb336b..68a647ceae 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -90,6 +90,15 @@ void free_staticmem_pages(struct page_info *pg, unsigned 
long nr_mfns,
   bool need_scrub);
 int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
 unsigned int memflags);
+#ifdef CONFIG_STATIC_MEMORY
+#define put_static_pages(d, page, order) ({ \
+unsigned int i; \
+for ( i = 0; i < (1 << (order)); i++ )  \
+page_list_add_tail((pg) + i, &(d)->resv_page_list); \
+})
+#else
+#define put_static_pages(d, page, order) ((void)(d), (void)(page), 
(void)(order))
+#endif
 
 /* Map machine page range in Xen virtual address space. */
 int map_pages_to_xen(
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 5191853c18..bd2782b3c5 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -381,6 +381,9 @@ struct domain
 struct page_list_head page_list;  /* linked list */
 struct page_list_head extra_page_list; /* linked list (size extra_pages) */
 struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
+#ifdef CONFIG_STATIC_MEMORY
+struct page_list_head resv_page_list; /* linked list */
+#endif
 
 /*
  * This field should only be directly accessed by domain_adjust_tot_pages()
-- 
2.25.1




[PATCH v7 5/9] xen: add field "flags" to cover all internal CDF_XXX

2022-06-19 Thread Penny Zheng
With more and more CDF_xxx internal flags in and to save the space, this
commit introduces a new field "flags" in struct domain to store CDF_*
internal flags directly.

Another new CDF_xxx will be introduced in the next patch.

Signed-off-by: Penny Zheng 
Acked-by: Julien Grall 
---
v7 changes:
- no change
---
v6 changes:
- no change
---
v5 changes:
- no change
---
v4 changes:
- no change
---
v3 changes:
- change fixed width type uint32_t to unsigned int
- change "flags" to a more descriptive name "cdf"
---
v2 changes:
- let "flags" live in the struct domain. So other arch can take
advantage of it in the future
- fix coding style
---
 xen/arch/arm/domain.c | 2 --
 xen/arch/arm/include/asm/domain.h | 3 +--
 xen/common/domain.c   | 3 +++
 xen/include/xen/sched.h   | 3 +++
 4 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 8110c1df86..74189d9878 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -709,8 +709,6 @@ int arch_domain_create(struct domain *d,
 ioreq_domain_init(d);
 #endif
 
-d->arch.directmap = flags & CDF_directmap;
-
 /* p2m_init relies on some value initialized by the IOMMU subsystem */
 if ( (rc = iommu_domain_init(d, config->iommu_opts)) != 0 )
 goto fail;
diff --git a/xen/arch/arm/include/asm/domain.h 
b/xen/arch/arm/include/asm/domain.h
index ed63c2b6f9..fe7a029ebf 100644
--- a/xen/arch/arm/include/asm/domain.h
+++ b/xen/arch/arm/include/asm/domain.h
@@ -29,7 +29,7 @@ enum domain_type {
 #define is_64bit_domain(d) (0)
 #endif
 
-#define is_domain_direct_mapped(d) (d)->arch.directmap
+#define is_domain_direct_mapped(d) ((d)->cdf & CDF_directmap)
 
 /*
  * Is the domain using the host memory layout?
@@ -103,7 +103,6 @@ struct arch_domain
 void *tee;
 #endif
 
-bool directmap;
 }  __cacheline_aligned;
 
 struct arch_vcpu
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 7570eae91a..a3ef991bd1 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -567,6 +567,9 @@ struct domain *domain_create(domid_t domid,
 /* Sort out our idea of is_system_domain(). */
 d->domain_id = domid;
 
+/* Holding CDF_* internal flags. */
+d->cdf = flags;
+
 /* Debug sanity. */
 ASSERT(is_system_domain(d) ? config == NULL : config != NULL);
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 463d41ffb6..5191853c18 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -596,6 +596,9 @@ struct domain
 struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
 } ioreq_server;
 #endif
+
+/* Holding CDF_* constant. Internal flags for domain creation. */
+unsigned int cdf;
 };
 
 static inline struct page_list_head *page_to_list(
-- 
2.25.1




[PATCH v7 4/9] xen: do not merge reserved pages in free_heap_pages()

2022-06-19 Thread Penny Zheng
The code in free_heap_pages() will try to merge pages with the
successor/predecessor if pages are suitably aligned. So if the pages
reserved are right next to the pages given to the heap allocator,
free_heap_pages() will merge them, and give the reserved pages to heap
allocator accidently as a result.

So in order to avoid the above scenario, this commit updates free_heap_pages()
to check whether the predecessor and/or successor has PGC_reserved set,
when trying to merge the about-to-be-freed chunk with the predecessor
and/or successor.

Suggested-by: Julien Grall 
Signed-off-by: Penny Zheng 
Reviewed-by: Jan Beulich 
Reviewed-by: Julien Grall 
---
v7 changes:
- no change
---
v6 changes:
- adapt to PGC_static
---
v5 changes:
- change PGC_reserved to adapt to PGC_staticmem
---
v4 changes:
- no changes
---
v3 changes:
- no changes
---
v2 changes:
- new commit
---
 xen/common/page_alloc.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index f27fa90ec4..d9253df270 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1486,6 +1486,7 @@ static void free_heap_pages(
 /* Merge with predecessor block? */
 if ( !mfn_valid(page_to_mfn(predecessor)) ||
  !page_state_is(predecessor, free) ||
+ (predecessor->count_info & PGC_static) ||
  (PFN_ORDER(predecessor) != order) ||
  (phys_to_nid(page_to_maddr(predecessor)) != node) )
 break;
@@ -1509,6 +1510,7 @@ static void free_heap_pages(
 /* Merge with successor block? */
 if ( !mfn_valid(page_to_mfn(successor)) ||
  !page_state_is(successor, free) ||
+ (successor->count_info & PGC_static) ||
  (PFN_ORDER(successor) != order) ||
  (phys_to_nid(page_to_maddr(successor)) != node) )
 break;
-- 
2.25.1




[PATCH v7 0/9] populate/unpopulate memory when domain on static allocation

2022-06-19 Thread Penny Zheng
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Today when a domain unpopulates the memory on runtime, they will always
hand the memory over to the heap allocator. And it will be a problem if it
is a static domain.
Pages used as guest RAM for static domain shall always be reserved to this
domain only, and not be used for any other purposes, so they shall never go
back to heap allocator.

This patch serie intends to fix this issue, by adding pages on the new list
resv_page_list after having taken them off the "normal" list, when unpopulating
memory, and retrieving pages from resv page list(resv_page_list) when
populating memory.

---
v7 changes:
- protect free_staticmem_pages with heap_lock to match its reverse function
acquire_staticmem_pages
- IS_ENABLED(CONFIG_STATIC_MEMORY) would not be needed anymore
- add page on the rsv_page_list *after* it has been freed
- remove the lock, since we add the page to rsv_page_list after it has
been totally freed.
---
v6 changes:
- rename PGC_staticmem to PGC_static
- remove #ifdef aroud function declaration
- use domain instead of sub-systems
- move non-zero is_domain_using_staticmem() from ARM header to common
header
- move PGC_static !CONFIG_STATIC_MEMORY definition to common header
- drop the lock before returning
---
v5 changes:
- introduce three new commits
- In order to avoid stub functions, we #define PGC_staticmem to non-zero only
when CONFIG_STATIC_MEMORY
- use "unlikely()" around pg->count_info & PGC_staticmem
- remove pointless "if", since mark_page_free() is going to set count_info
to PGC_state_free and by consequence clear PGC_staticmem
- move #define PGC_staticmem 0 to mm.h
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
- extract common codes for assigning pages into a helper assign_domstatic_pages
- refine commit message
- remove stub function acquire_reserved_page
- Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock
---
v4 changes:
- commit message refinement
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
---
v3 changes:
- fix possible racy issue in free_staticmem_pages()
- introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case
- move the change to free_heap_pages() to cover other potential call sites
- change fixed width type uint32_t to unsigned int
- change "flags" to a more descriptive name "cdf"
- change name from "is_domain_static()" to "is_domain_using_staticmem"
- have page_list_del() just once out of the if()
- remove resv_pages counter
- make arch_free_heap_page be an expression, not a compound statement.
- move #ifndef is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- let "flags" live in the struct domain. So other arch can take
advantage of it in the future
- change name from "is_domain_on_static_allocation" to "is_domain_static()"
- put reserved pages on resv_page_list after having taken them off
the "normal" list
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
- fix coding style

Penny Zheng (9):
  xen/arm: rename PGC_reserved to PGC_static
  xen: do not free reserved memory into heap
  xen: update SUPPORT.md for static allocation
  xen: do not merge reserved pages in free_heap_pages()
  xen: add field "flags" to cover all internal CDF_XXX
  xen/arm: introduce CDF_staticmem
  xen/arm: unpopulate memory when domain is static
  xen: introduce prepare_staticmem_pages
  xen: retrieve reserved pages on populate_physmap

 SUPPORT.md|   7 ++
 xen/arch/arm/domain.c |   2 -
 xen/arch/arm/domain_build.c   |   5 +-
 xen/arch/arm/include/asm/domain.h |   3 +-
 xen/arch/arm/include/asm/mm.h |   8 +-
 xen/common/domain.c   |   7 ++
 xen/common/memory.c   |  23 +
 xen/common/page_alloc.c   | 155 +-
 xen/include/xen/domain.h  |  12 +++
 xen/include/xen/mm.h  |  10 +-
 xen/include/xen/sched.h   |   6 ++
 11 files changed, 182 insertions(+), 56 deletions(-)

-- 
2.25.1




[PATCH v7 1/9] xen/arm: rename PGC_reserved to PGC_static

2022-06-19 Thread Penny Zheng
PGC_reserved could be ambiguous, and we have to tell what the pages are
reserved for, so this commit intends to rename PGC_reserved to
PGC_static, which clearly indicates the page is reserved for static
memory.

Signed-off-by: Penny Zheng 
Acked-by: Jan Beulich 
---
v7 changes:
- no change
---
v6 changes:
- rename PGC_staticmem to PGC_static
---
v5 changes:
- new commit
---
 xen/arch/arm/include/asm/mm.h |  6 +++---
 xen/common/page_alloc.c   | 22 +++---
 2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 045a8ba4bb..daef12e740 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -108,9 +108,9 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap PG_shift(2)
 #define PGC_xen_heap  PG_mask(1, 2)
-  /* Page is reserved */
-#define _PGC_reserved PG_shift(3)
-#define PGC_reserved  PG_mask(1, 3)
+  /* Page is static memory */
+#define _PGC_staticPG_shift(3)
+#define PGC_static PG_mask(1, 3)
 /* ... */
 /* Page is broken? */
 #define _PGC_broken   PG_shift(7)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 000ae6b972..743e3543fd 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,8 +151,8 @@
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
-#ifndef PGC_reserved
-#define PGC_reserved 0
+#ifndef PGC_static
+#define PGC_static 0
 #endif
 
 /*
@@ -2286,7 +2286,7 @@ int assign_pages(
 
 for ( i = 0; i < nr; i++ )
 {
-ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
+ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_static)));
 if ( pg[i].count_info & PGC_extra )
 extra_pages++;
 }
@@ -2346,7 +2346,7 @@ int assign_pages(
 page_set_owner(&pg[i], d);
 smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
 pg[i].count_info =
-(pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 
1;
+(pg[i].count_info & (PGC_extra | PGC_static)) | PGC_allocated | 1;
 
 page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
 }
@@ -2652,8 +2652,8 @@ void __init free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 scrub_one_page(pg);
 }
 
-/* In case initializing page of static memory, mark it PGC_reserved. */
-pg[i].count_info |= PGC_reserved;
+/* In case initializing page of static memory, mark it PGC_static. */
+pg[i].count_info |= PGC_static;
 }
 }
 
@@ -2682,8 +2682,8 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 
 for ( i = 0; i < nr_mfns; i++ )
 {
-/* The page should be reserved and not yet allocated. */
-if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
+/* The page should be static and not yet allocated. */
+if ( pg[i].count_info != (PGC_state_free | PGC_static) )
 {
 printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
@@ -2697,10 +2697,10 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 &tlbflush_timestamp);
 
 /*
- * Preserve flag PGC_reserved and change page state
+ * Preserve flag PGC_static and change page state
  * to PGC_state_inuse.
  */
-pg[i].count_info = PGC_reserved | PGC_state_inuse;
+pg[i].count_info = PGC_static | PGC_state_inuse;
 /* Initialise fields which have other uses for free pages. */
 pg[i].u.inuse.type_info = 0;
 page_set_owner(&pg[i], NULL);
@@ -2722,7 +2722,7 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 
  out_err:
 while ( i-- )
-pg[i].count_info = PGC_reserved | PGC_state_free;
+pg[i].count_info = PGC_static | PGC_state_free;
 
 spin_unlock(&heap_lock);
 
-- 
2.25.1




[PATCH v7 2/9] xen: do not free reserved memory into heap

2022-06-19 Thread Penny Zheng
Pages used as guest RAM for static domain, shall be reserved to this
domain only.
So in case reserved pages being used for other purpose, users
shall not free them back to heap, even when last ref gets dropped.

free_staticmem_pages will be called by free_heap_pages in runtime
for static domain freeing memory resource, so let's drop the __init
flag.

Signed-off-by: Penny Zheng 
Acked-by: Jan Beulich 
---
v7 changes:
- protect free_staticmem_pages with heap_lock to match its reverse function
acquire_staticmem_pages
---
v6 changes:
- adapt to PGC_static
- remove #ifdef aroud function declaration
---
v5 changes:
- In order to avoid stub functions, we #define PGC_staticmem to non-zero only
when CONFIG_STATIC_MEMORY
- use "unlikely()" around pg->count_info & PGC_staticmem
- remove pointless "if", since mark_page_free() is going to set count_info
to PGC_state_free and by consequence clear PGC_staticmem
- move #define PGC_staticmem 0 to mm.h
---
v4 changes:
- no changes
---
v3 changes:
- fix possible racy issue in free_staticmem_pages()
- introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case
- move the change to free_heap_pages() to cover other potential call sites
- fix the indentation
---
v2 changes:
- new commit
---
 xen/arch/arm/include/asm/mm.h |  4 +++-
 xen/common/page_alloc.c   | 16 +---
 xen/include/xen/mm.h  |  2 --
 3 files changed, 16 insertions(+), 6 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index daef12e740..066a869783 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -108,9 +108,11 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap PG_shift(2)
 #define PGC_xen_heap  PG_mask(1, 2)
-  /* Page is static memory */
+#ifdef CONFIG_STATIC_MEMORY
+/* Page is static memory */
 #define _PGC_staticPG_shift(3)
 #define PGC_static PG_mask(1, 3)
+#endif
 /* ... */
 /* Page is broken? */
 #define _PGC_broken   PG_shift(7)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 743e3543fd..f27fa90ec4 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1443,6 +1443,13 @@ static void free_heap_pages(
 
 ASSERT(order <= MAX_ORDER);
 
+if ( unlikely(pg->count_info & PGC_static) )
+{
+/* Pages of static memory shall not go back to the heap. */
+free_staticmem_pages(pg, 1UL << order, need_scrub);
+return;
+}
+
 spin_lock(&heap_lock);
 
 for ( i = 0; i < (1 << order); i++ )
@@ -2636,12 +2643,14 @@ struct domain *get_pg_owner(domid_t domid)
 
 #ifdef CONFIG_STATIC_MEMORY
 /* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
-void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
- bool need_scrub)
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+  bool need_scrub)
 {
 mfn_t mfn = page_to_mfn(pg);
 unsigned long i;
 
+spin_lock(&heap_lock);
+
 for ( i = 0; i < nr_mfns; i++ )
 {
 mark_page_free(&pg[i], mfn_add(mfn, i));
@@ -2652,9 +2661,10 @@ void __init free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 scrub_one_page(pg);
 }
 
-/* In case initializing page of static memory, mark it PGC_static. */
 pg[i].count_info |= PGC_static;
 }
+
+spin_unlock(&heap_lock);
 }
 
 /*
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 3be754da92..1c4ddb336b 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -85,13 +85,11 @@ bool scrub_free_pages(void);
 } while ( false )
 #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
 
-#ifdef CONFIG_STATIC_MEMORY
 /* These functions are for static memory */
 void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
   bool need_scrub);
 int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
 unsigned int memflags);
-#endif
 
 /* Map machine page range in Xen virtual address space. */
 int map_pages_to_xen(
-- 
2.25.1




[PATCH v7 3/9] xen: update SUPPORT.md for static allocation

2022-06-19 Thread Penny Zheng
SUPPORT.md doesn't seem to explicitly say whether static memory is
supported, so this commit updates SUPPORT.md to add feature static
allocation tech preview for now.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v7 changes:
- no change
---
v6 changes:
- use domain instead of sub-systems
---
v5 changes:
- new commit
---
 SUPPORT.md | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/SUPPORT.md b/SUPPORT.md
index 70e98964cb..8e040d1c1e 100644
--- a/SUPPORT.md
+++ b/SUPPORT.md
@@ -286,6 +286,13 @@ to boot with memory < maxmem.
 
 Status, x86 HVM: Supported
 
+### Static Allocation
+
+Static allocation refers to domains for which memory areas are
+pre-defined by configuration using physical address ranges.
+
+Status, ARM: Tech Preview
+
 ### Memory Sharing
 
 Allow sharing of identical pages between guests
-- 
2.25.1




RE: [PATCH v4 2/8] xen/arm: allocate static shared memory to the default owner dom_io

2022-06-17 Thread Penny Zheng
Hi Jan

Sorry about the late reply, got sidetracked a few weeks.

> -Original Message-
> From: Jan Beulich 
> Sent: Wednesday, May 18, 2022 2:36 PM
> To: Penny Zheng 
> Cc: Wei Chen ; Stefano Stabellini
> ; Julien Grall ; Bertrand Marquis
> ; Volodymyr Babchuk
> ; Andrew Cooper
> ; George Dunlap ;
> Wei Liu ; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v4 2/8] xen/arm: allocate static shared memory to the
> default owner dom_io
> 
> On 18.05.2022 05:14, Penny Zheng wrote:
> > Hi Jan
> >
> >> -Original Message-
> >> From: Jan Beulich 
> >> Sent: Wednesday, May 18, 2022 12:01 AM
> >> To: Penny Zheng 
> >> Cc: Wei Chen ; Stefano Stabellini
> >> ; Julien Grall ; Bertrand
> >> Marquis ; Volodymyr Babchuk
> >> ; Andrew Cooper
> >> ; George Dunlap
> >> ; Wei Liu ;
> >> xen-devel@lists.xenproject.org
> >> Subject: Re: [PATCH v4 2/8] xen/arm: allocate static shared memory to
> >> the default owner dom_io
> >>
> >> On 17.05.2022 11:05, Penny Zheng wrote:
> >>> --- a/xen/common/domain.c
> >>> +++ b/xen/common/domain.c
> >>> @@ -780,6 +780,11 @@ void __init setup_system_domains(void)
> >>>   * This domain owns I/O pages that are within the range of the
> page_info
> >>>   * array. Mappings occur at the priv of the caller.
> >>>   * Quarantined PCI devices will be associated with this domain.
> >>> + *
> >>> + * DOMID_IO could also be used for mapping memory when no explicit
> >>> + * domain is specified.
> >>> + * For instance, DOMID_IO is the owner of memory pre-shared among
> >>> + * multiple domains at boot time, when no explicit owner is 
> >>> specified.
> >>>   */
> >>>  dom_io = domain_create(DOMID_IO, NULL, 0);
> >>>  if ( IS_ERR(dom_io) )
> >>
> >> I'm sorry: The comment change is definitely better now than it was,
> >> but it is still written in a way requiring further knowledge to
> >> understand what it talks about. Without further context, "when no
> >> explicit domain is specified" only raises questions. I would have
> >> tried to make a suggestion, but I can't really figure what it is that you 
> >> want
> to get across here.
> >
> > How about I only retain the "For instance, xxx" and make it more in details.
> > "
> > DOMID_IO is also the default owner of memory pre-shared among multiple
> > domains at boot time, when no explicit owner is specified with "owner"
> > property in static shared memory device node. See section
> > docs/misc/arm/device-tree/booting.txt: Static Shared Memory for more
> details.
> > "
> 
> This reads quite a bit better. Yet I continue to be puzzled about the apparent
> conflict of "pre-shared" and "no explicit owner": How can memory be (pre-
> )shared when the owner isn't known? Shouldn't all memory have an owner?
> Or alternatively if this sharing model doesn't require ownership, shouldn't 
> all
> shared memory be owned by DomIO? In any event, to leave such details out of
> here, perhaps the comment could consist of just the first part of what you
> wrote, ending at where the first comma is?
> 

We have a short discussion about the memory ownership on my design link(
https://lore.kernel.org/all/a50d9fde-1d06-7cda-2779-9eea9e1c0...@xen.org/T/)
, we have user cases for both scenario.

Ok, I will modify the comment and only keep
"
DOMID_IO is also the default owner of memory pre-shared among multiple
domains at boot time.
"
 
> Jan



RE: [PATCH v6 2/9] xen: do not free reserved memory into heap

2022-06-12 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Thursday, June 9, 2022 5:22 PM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk ; Andrew Cooper
> ; George Dunlap ;
> Jan Beulich ; Wei Liu 
> Subject: Re: [PATCH v6 2/9] xen: do not free reserved memory into heap
> 
> Hi,
> 
> On 09/06/2022 06:54, Penny Zheng wrote:
> >
> >
> >> -Original Message-
> >> From: Julien Grall 
> >> Sent: Tuesday, June 7, 2022 5:13 PM
> >> To: Penny Zheng ; xen-devel@lists.xenproject.org
> >> Cc: Wei Chen ; Stefano Stabellini
> >> ; Bertrand Marquis
> >> ; Volodymyr Babchuk
> >> ; Andrew Cooper
> >> ; George Dunlap
> >> ; Jan Beulich ; Wei Liu
> >> 
> >> Subject: Re: [PATCH v6 2/9] xen: do not free reserved memory into
> >> heap
> >>
> >> Hi Penny,
> >>
> >
> > Hi Julien
> >
> >> On 07/06/2022 08:30, Penny Zheng wrote:
> >>> Pages used as guest RAM for static domain, shall be reserved to this
> >>> domain only.
> >>> So in case reserved pages being used for other purpose, users shall
> >>> not free them back to heap, even when last ref gets dropped.
> >>>
> >>> free_staticmem_pages will be called by free_heap_pages in runtime
> >>> for static domain freeing memory resource, so let's drop the __init flag.
> >>>
> >>> Signed-off-by: Penny Zheng 
> >>> ---
> >>> v6 changes:
> >>> - adapt to PGC_static
> >>> - remove #ifdef aroud function declaration
> >>> ---
> >>> v5 changes:
> >>> - In order to avoid stub functions, we #define PGC_staticmem to
> >>> non-zero only when CONFIG_STATIC_MEMORY
> >>> - use "unlikely()" around pg->count_info & PGC_staticmem
> >>> - remove pointless "if", since mark_page_free() is going to set
> >>> count_info to PGC_state_free and by consequence clear PGC_staticmem
> >>> - move #define PGC_staticmem 0 to mm.h
> >>> ---
> >>> v4 changes:
> >>> - no changes
> >>> ---
> >>> v3 changes:
> >>> - fix possible racy issue in free_staticmem_pages()
> >>> - introduce a stub free_staticmem_pages() for the
> >>> !CONFIG_STATIC_MEMORY case
> >>> - move the change to free_heap_pages() to cover other potential call
> >>> sites
> >>> - fix the indentation
> >>> ---
> >>> v2 changes:
> >>> - new commit
> >>> ---
> >>>xen/arch/arm/include/asm/mm.h |  4 +++-
> >>>xen/common/page_alloc.c   | 12 +---
> >>>xen/include/xen/mm.h  |  2 --
> >>>3 files changed, 12 insertions(+), 6 deletions(-)
> >>>
> >>> diff --git a/xen/arch/arm/include/asm/mm.h
> >>> b/xen/arch/arm/include/asm/mm.h index fbff11c468..7442893e77 100644
> >>> --- a/xen/arch/arm/include/asm/mm.h
> >>> +++ b/xen/arch/arm/include/asm/mm.h
> >>> @@ -108,9 +108,11 @@ struct page_info
> >>>  /* Page is Xen heap? */
> >>>#define _PGC_xen_heap PG_shift(2)
> >>>#define PGC_xen_heap  PG_mask(1, 2)
> >>> -  /* Page is static memory */
> >>
> >> NITpicking: You added this comment in patch #1 and now removing the
> space.
> >> Any reason to drop the space?
> >>
> >>> +#ifdef CONFIG_STATIC_MEMORY
> >>
> >> I think this change ought to be explained in the commit message.
> >> AFAIU, this is necessary to allow the compiler to remove code and
> >> avoid linking issues. Is that correct?
> >>
> >>> +/* Page is static memory */
> >>>#define _PGC_staticPG_shift(3)
> >>>#define PGC_static PG_mask(1, 3)
> >>> +#endif
> >>>/* ... */
> >>>/* Page is broken? */
> >>>#define _PGC_broken   PG_shift(7)
> >>> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index
> >>> 9e5c757847..6876869fa6 100644
> >>> --- a/xen/common/page_alloc.c
> >>> +++ b/xen/common/page_alloc.c
> >>> @@ -1443,6 +1443,13 @@ static void free_heap_pages(
> >>>
> >>>ASSERT(order <= MAX_ORDER);
> >>>
> >>> +if ( unlikely(pg->count_info & PGC_static) )
> &g

RE: [PATCH v6 9/9] xen: retrieve reserved pages on populate_physmap

2022-06-08 Thread Penny Zheng
Hi Jan

> -Original Message-
> From: Jan Beulich 
> Sent: Tuesday, June 7, 2022 3:58 PM
> To: Penny Zheng 
> Cc: Wei Chen ; Andrew Cooper
> ; George Dunlap ;
> Julien Grall ; Stefano Stabellini ; 
> Wei
> Liu ; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v6 9/9] xen: retrieve reserved pages on populate_physmap
> 
> On 07.06.2022 09:30, Penny Zheng wrote:
> > +/*
> > + * Acquire a page from reserved page list(resv_page_list), when
> > +populating
> > + * memory for static domain on runtime.
> > + */
> > +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> > +{
> > +struct page_info *page;
> > +
> > +spin_lock(&d->page_alloc_lock);
> > +/* Acquire a page from reserved page list(resv_page_list). */
> > +page = page_list_remove_head(&d->resv_page_list);
> > +spin_unlock(&d->page_alloc_lock);
> 
> With page removal done under lock, ...
> 
> > +if ( unlikely(!page) )
> > +return INVALID_MFN;
> > +
> > +if ( !prepare_staticmem_pages(page, 1, memflags) )
> > +goto fail;
> > +
> > +if ( assign_domstatic_pages(d, page, 1, memflags) )
> > +goto fail;
> > +
> > +return page_to_mfn(page);
> > +
> > + fail:
> > +page_list_add_tail(page, &d->resv_page_list);
> > +return INVALID_MFN;
> 
> ... doesn't re-adding the page to the list also need to be done with the lock
> held?

True, Sorry about that.
Like I said in another thread with Julien, I'll add the missing half part
"
For freeing part, I shall get the lock at arch_free_heap_page(),
where we insert the page to the rsv_page_list, and release the lock at the end 
of
the free_staticmem_page
"

> 
> Jan



RE: [PATCH v6 2/9] xen: do not free reserved memory into heap

2022-06-08 Thread Penny Zheng


> -Original Message-
> From: Julien Grall 
> Sent: Tuesday, June 7, 2022 5:13 PM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk ; Andrew Cooper
> ; George Dunlap ;
> Jan Beulich ; Wei Liu 
> Subject: Re: [PATCH v6 2/9] xen: do not free reserved memory into heap
> 
> Hi Penny,
> 

Hi Julien

> On 07/06/2022 08:30, Penny Zheng wrote:
> > Pages used as guest RAM for static domain, shall be reserved to this
> > domain only.
> > So in case reserved pages being used for other purpose, users shall
> > not free them back to heap, even when last ref gets dropped.
> >
> > free_staticmem_pages will be called by free_heap_pages in runtime for
> > static domain freeing memory resource, so let's drop the __init flag.
> >
> > Signed-off-by: Penny Zheng 
> > ---
> > v6 changes:
> > - adapt to PGC_static
> > - remove #ifdef aroud function declaration
> > ---
> > v5 changes:
> > - In order to avoid stub functions, we #define PGC_staticmem to
> > non-zero only when CONFIG_STATIC_MEMORY
> > - use "unlikely()" around pg->count_info & PGC_staticmem
> > - remove pointless "if", since mark_page_free() is going to set
> > count_info to PGC_state_free and by consequence clear PGC_staticmem
> > - move #define PGC_staticmem 0 to mm.h
> > ---
> > v4 changes:
> > - no changes
> > ---
> > v3 changes:
> > - fix possible racy issue in free_staticmem_pages()
> > - introduce a stub free_staticmem_pages() for the
> > !CONFIG_STATIC_MEMORY case
> > - move the change to free_heap_pages() to cover other potential call
> > sites
> > - fix the indentation
> > ---
> > v2 changes:
> > - new commit
> > ---
> >   xen/arch/arm/include/asm/mm.h |  4 +++-
> >   xen/common/page_alloc.c   | 12 +---
> >   xen/include/xen/mm.h  |  2 --
> >   3 files changed, 12 insertions(+), 6 deletions(-)
> >
> > diff --git a/xen/arch/arm/include/asm/mm.h
> > b/xen/arch/arm/include/asm/mm.h index fbff11c468..7442893e77 100644
> > --- a/xen/arch/arm/include/asm/mm.h
> > +++ b/xen/arch/arm/include/asm/mm.h
> > @@ -108,9 +108,11 @@ struct page_info
> > /* Page is Xen heap? */
> >   #define _PGC_xen_heap PG_shift(2)
> >   #define PGC_xen_heap  PG_mask(1, 2)
> > -  /* Page is static memory */
> 
> NITpicking: You added this comment in patch #1 and now removing the space.
> Any reason to drop the space?
> 
> > +#ifdef CONFIG_STATIC_MEMORY
> 
> I think this change ought to be explained in the commit message. AFAIU, this 
> is
> necessary to allow the compiler to remove code and avoid linking issues. Is
> that correct?
> 
> > +/* Page is static memory */
> >   #define _PGC_staticPG_shift(3)
> >   #define PGC_static PG_mask(1, 3)
> > +#endif
> >   /* ... */
> >   /* Page is broken? */
> >   #define _PGC_broken   PG_shift(7)
> > diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index
> > 9e5c757847..6876869fa6 100644
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -1443,6 +1443,13 @@ static void free_heap_pages(
> >
> >   ASSERT(order <= MAX_ORDER);
> >
> > +if ( unlikely(pg->count_info & PGC_static) )
> > +{
> > +/* Pages of static memory shall not go back to the heap. */
> > +free_staticmem_pages(pg, 1UL << order, need_scrub);
> I can't remember whether I asked this before (I couldn't find a thread).
> 
> free_staticmem_pages() doesn't seem to be protected by any lock. So how do
> you prevent the concurrent access to the page info with the acquire part?

True, last time you suggested that rsv_page_list needs to be protected with a
spinlock (mostly like d->page_alloc_lock). I haven't thought it thoroughly, 
sorry
about that.
So for freeing part, I shall get the lock at arch_free_heap_page(), where we 
insert
the page to the rsv_page_list, and release the lock at the end of the 
free_staticmem_page.
And for acquiring part, I've already put the lock around 
page = page_list_remove_head(&d->resv_page_list);

> Cheers,
> 
> --
> Julien Grall


RE: [PATCH v6 7/9] xen/arm: unpopulate memory when domain is static

2022-06-08 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Tuesday, June 7, 2022 5:20 PM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk ; Andrew Cooper
> ; George Dunlap ;
> Jan Beulich ; Wei Liu 
> Subject: Re: [PATCH v6 7/9] xen/arm: unpopulate memory when domain is
> static
> 
> Hi Penny,
> 
> On 07/06/2022 08:30, Penny Zheng wrote:
> > Today when a domain unpopulates the memory on runtime, they will
> > always hand the memory back to the heap allocator. And it will be a
> > problem if domain is static.
> >
> > Pages as guest RAM for static domain shall be reserved to only this
> > domain and not be used for any other purposes, so they shall never go
> > back to heap allocator.
> >
> > This commit puts reserved pages on the new list resv_page_list only
> > after having taken them off the "normal" list, when the last ref dropped.
> >
> > Signed-off-by: Penny Zheng 
> > Acked-by: Jan Beulich 
> > ---
> > v6 changes:
> > - refine in-code comment
> > - move PGC_static !CONFIG_STATIC_MEMORY definition to common header
> 
> I don't understand why this change is necessary for this patch. AFAICT, all 
> the
> users of PGC_static will be protected by #ifdef CONFIG_STATIC_MEMORY and
> therefore PGC_static should always be defined.
> 

True, I notice that arch_free_heap_page has already been guarded with
#ifdef CONFIG_STATIC_MEMORY. I'll revert the change.

> Cheers,
> 
> --
> Julien Grall


[PATCH v6 8/9] xen: introduce prepare_staticmem_pages

2022-06-07 Thread Penny Zheng
Later, we want to use acquire_domstatic_pages() for populating memory
for static domain on runtime, however, there are a lot of pointless work
(checking mfn_valid(), scrubbing the free part, cleaning the cache...)
considering we know the page is valid and belong to the guest.

This commit splits acquire_staticmem_pages() in two parts, and
introduces prepare_staticmem_pages to bypass all "pointless work".

Signed-off-by: Penny Zheng 
Acked-by: Jan Beulich 
---
v6 changes:
- adapt to PGC_static
---
v5 changes:
- new commit
---
 xen/common/page_alloc.c | 61 -
 1 file changed, 36 insertions(+), 25 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 886b5d82a2..9004dd41c1 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2661,26 +2661,13 @@ void free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 }
 }
 
-/*
- * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
- * static memory.
- * This function needs to be reworked if used outside of boot.
- */
-static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
- unsigned long nr_mfns,
- unsigned int memflags)
+static bool __init prepare_staticmem_pages(struct page_info *pg,
+   unsigned long nr_mfns,
+   unsigned int memflags)
 {
 bool need_tlbflush = false;
 uint32_t tlbflush_timestamp = 0;
 unsigned long i;
-struct page_info *pg;
-
-ASSERT(nr_mfns);
-for ( i = 0; i < nr_mfns; i++ )
-if ( !mfn_valid(mfn_add(smfn, i)) )
-return NULL;
-
-pg = mfn_to_page(smfn);
 
 spin_lock(&heap_lock);
 
@@ -2691,7 +2678,7 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 {
 printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
-   i, mfn_x(smfn) + i,
+   i, mfn_x(page_to_mfn(pg)) + i,
pg[i].count_info, pg[i].tlbflush_timestamp);
 goto out_err;
 }
@@ -2715,6 +2702,38 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 if ( need_tlbflush )
 filtered_flush_tlb_mask(tlbflush_timestamp);
 
+return true;
+
+ out_err:
+while ( i-- )
+pg[i].count_info = PGC_static | PGC_state_free;
+
+spin_unlock(&heap_lock);
+
+return false;
+}
+
+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ * This function needs to be reworked if used outside of boot.
+ */
+static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
+ unsigned long nr_mfns,
+ unsigned int memflags)
+{
+unsigned long i;
+struct page_info *pg;
+
+ASSERT(nr_mfns);
+for ( i = 0; i < nr_mfns; i++ )
+if ( !mfn_valid(mfn_add(smfn, i)) )
+return NULL;
+
+pg = mfn_to_page(smfn);
+if ( !prepare_staticmem_pages(pg, nr_mfns, memflags) )
+return NULL;
+
 /*
  * Ensure cache and RAM are consistent for platforms where the guest
  * can control its own visibility of/through the cache.
@@ -2723,14 +2742,6 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
 
 return pg;
-
- out_err:
-while ( i-- )
-pg[i].count_info = PGC_static | PGC_state_free;
-
-spin_unlock(&heap_lock);
-
-return NULL;
 }
 
 /*
-- 
2.25.1




[PATCH v6 7/9] xen/arm: unpopulate memory when domain is static

2022-06-07 Thread Penny Zheng
Today when a domain unpopulates the memory on runtime, they will always
hand the memory back to the heap allocator. And it will be a problem if domain
is static.

Pages as guest RAM for static domain shall be reserved to only this domain
and not be used for any other purposes, so they shall never go back to heap
allocator.

This commit puts reserved pages on the new list resv_page_list only after
having taken them off the "normal" list, when the last ref dropped.

Signed-off-by: Penny Zheng 
Acked-by: Jan Beulich 
---
v6 changes:
- refine in-code comment
- move PGC_static !CONFIG_STATIC_MEMORY definition to common header
---
v5 changes:
- adapt this patch for PGC_staticmem
---
v4 changes:
- no changes
---
v3 changes:
- have page_list_del() just once out of the if()
- remove resv_pages counter
- make arch_free_heap_page be an expression, not a compound statement.
---
v2 changes:
- put reserved pages on resv_page_list after having taken them off
the "normal" list
---
 xen/arch/arm/include/asm/mm.h | 12 
 xen/common/domain.c   |  4 
 xen/common/page_alloc.c   |  4 
 xen/include/xen/mm.h  |  4 
 xen/include/xen/sched.h   |  3 +++
 5 files changed, 23 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 7442893e77..2ce4d80796 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -360,6 +360,18 @@ void clear_and_clean_page(struct page_info *page);
 
 unsigned int arch_get_dma_bitsize(void);
 
+/*
+ * Put free pages on the resv page list after having taken them
+ * off the "normal" page list, when pages from static memory
+ */
+#ifdef CONFIG_STATIC_MEMORY
+#define arch_free_heap_page(d, pg) ({   \
+page_list_del(pg, page_to_list(d, pg)); \
+if ( (pg)->count_info & PGC_static )  \
+page_list_add_tail(pg, &(d)->resv_page_list);   \
+})
+#endif
+
 #endif /*  __ARCH_ARM_MM__ */
 /*
  * Local variables:
diff --git a/xen/common/domain.c b/xen/common/domain.c
index a3ef991bd1..a49574fa24 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -604,6 +604,10 @@ struct domain *domain_create(domid_t domid,
 INIT_PAGE_LIST_HEAD(&d->page_list);
 INIT_PAGE_LIST_HEAD(&d->extra_page_list);
 INIT_PAGE_LIST_HEAD(&d->xenpage_list);
+#ifdef CONFIG_STATIC_MEMORY
+INIT_PAGE_LIST_HEAD(&d->resv_page_list);
+#endif
+
 
 spin_lock_init(&d->node_affinity_lock);
 d->node_affinity = NODE_MASK_ALL;
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7fb28e2e07..886b5d82a2 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,10 +151,6 @@
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
-#ifndef PGC_static
-#define PGC_static 0
-#endif
-
 /*
  * Comma-separated list of hexadecimal page numbers containing bad bytes.
  * e.g. 'badpage=0x3f45,0x8a321'.
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 1c4ddb336b..e80b4bdcde 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -210,6 +210,10 @@ extern struct domain *dom_cow;
 
 #include 
 
+#ifndef PGC_static
+#define PGC_static 0
+#endif
+
 static inline bool is_special_page(const struct page_info *page)
 {
 return is_xen_heap_page(page) || (page->count_info & PGC_extra);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 5191853c18..bd2782b3c5 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -381,6 +381,9 @@ struct domain
 struct page_list_head page_list;  /* linked list */
 struct page_list_head extra_page_list; /* linked list (size extra_pages) */
 struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
+#ifdef CONFIG_STATIC_MEMORY
+struct page_list_head resv_page_list; /* linked list */
+#endif
 
 /*
  * This field should only be directly accessed by domain_adjust_tot_pages()
-- 
2.25.1




[PATCH v6 9/9] xen: retrieve reserved pages on populate_physmap

2022-06-07 Thread Penny Zheng
When a static domain populates memory through populate_physmap at runtime,
it shall retrieve reserved pages from resv_page_list to make sure that
guest RAM is still restricted in statically configured memory regions.
This commit also introduces a new helper acquire_reserved_page to make it work.

Signed-off-by: Penny Zheng 
---
v6 changes:
- drop the lock before returning
---
v5 changes:
- extract common codes for assigning pages into a helper assign_domstatic_pages
- refine commit message
- remove stub function acquire_reserved_page
- Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock
---
v4 changes:
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
---
v3 changes:
- move is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
---
 xen/common/memory.c | 23 ++
 xen/common/page_alloc.c | 70 +++--
 xen/include/xen/mm.h|  1 +
 3 files changed, 77 insertions(+), 17 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index f2d009843a..cb330ce877 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -245,6 +245,29 @@ static void populate_physmap(struct memop_args *a)
 
 mfn = _mfn(gpfn);
 }
+else if ( is_domain_using_staticmem(d) )
+{
+/*
+ * No easy way to guarantee the retrieved pages are contiguous,
+ * so forbid non-zero-order requests here.
+ */
+if ( a->extent_order != 0 )
+{
+gdprintk(XENLOG_WARNING,
+ "Cannot allocate static order-%u pages for static 
%pd\n",
+ a->extent_order, d);
+goto out;
+}
+
+mfn = acquire_reserved_page(d, a->memflags);
+if ( mfn_eq(mfn, INVALID_MFN) )
+{
+gdprintk(XENLOG_WARNING,
+ "%pd: failed to retrieve a reserved page\n",
+ d);
+goto out;
+}
+}
 else
 {
 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 9004dd41c1..57d28304df 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2661,9 +2661,8 @@ void free_staticmem_pages(struct page_info *pg, unsigned 
long nr_mfns,
 }
 }
 
-static bool __init prepare_staticmem_pages(struct page_info *pg,
-   unsigned long nr_mfns,
-   unsigned int memflags)
+static bool prepare_staticmem_pages(struct page_info *pg, unsigned long 
nr_mfns,
+unsigned int memflags)
 {
 bool need_tlbflush = false;
 uint32_t tlbflush_timestamp = 0;
@@ -2744,21 +2743,9 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 return pg;
 }
 
-/*
- * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
- * then assign them to one specific domain #d.
- */
-int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
-   unsigned int nr_mfns, unsigned int memflags)
+static int assign_domstatic_pages(struct domain *d, struct page_info *pg,
+  unsigned int nr_mfns, unsigned int memflags)
 {
-struct page_info *pg;
-
-ASSERT(!in_irq());
-
-pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
-if ( !pg )
-return -ENOENT;
-
 if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
 {
 /*
@@ -2777,6 +2764,55 @@ int __init acquire_domstatic_pages(struct domain *d, 
mfn_t smfn,
 
 return 0;
 }
+
+/*
+ * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ */
+int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
+   unsigned int nr_mfns, unsigned int memflags)
+{
+struct page_info *pg;
+
+ASSERT(!in_irq());
+
+pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
+if ( !pg )
+return -ENOENT;
+
+if ( assign_domstatic_pages(d, pg, nr_mfns, memflags) )
+return -EINVAL;
+
+return 0;
+}
+
+/*
+ * Acquire a page from reserved page list(resv_page_list), when populating
+ * memory for static domain on run

[PATCH v6 6/9] xen/arm: introduce CDF_staticmem

2022-06-07 Thread Penny Zheng
In order to have an easy and quick way to find out whether this domain memory
is statically configured, this commit introduces a new flag CDF_staticmem and a
new helper is_domain_using_staticmem() to tell.

Signed-off-by: Penny Zheng 
---
v6 changes:
- move non-zero is_domain_using_staticmem() from ARM header to common
header
---
v5 changes:
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
---
v4 changes:
- no changes
---
v3 changes:
- change name from "is_domain_static()" to "is_domain_using_staticmem"
---
v2 changes:
- change name from "is_domain_on_static_allocation" to "is_domain_static()"
---
 xen/arch/arm/domain_build.c | 5 -
 xen/include/xen/domain.h| 8 
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 7ddd16c26d..f6e2e44c1e 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -3287,9 +3287,12 @@ void __init create_domUs(void)
 if ( !dt_device_is_compatible(node, "xen,domain") )
 continue;
 
+if ( dt_find_property(node, "xen,static-mem", NULL) )
+flags |= CDF_staticmem;
+
 if ( dt_property_read_bool(node, "direct-map") )
 {
-if ( !IS_ENABLED(CONFIG_STATIC_MEMORY) || !dt_find_property(node, 
"xen,static-mem", NULL) )
+if ( !IS_ENABLED(CONFIG_STATIC_MEMORY) || !(flags & CDF_staticmem) 
)
 panic("direct-map is not valid for domain %s without static 
allocation.\n",
   dt_node_name(node));
 
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 1c3c88a14d..c847452414 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -34,6 +34,14 @@ void arch_get_domain_info(const struct domain *d,
 #ifdef CONFIG_ARM
 /* Should domain memory be directly mapped? */
 #define CDF_directmap(1U << 1)
+/* Is domain memory on static allocation? */
+#define CDF_staticmem(1U << 2)
+#endif
+
+#ifdef CONFIG_STATIC_MEMORY
+#define is_domain_using_staticmem(d) ((d)->cdf & CDF_staticmem)
+#else
+#define is_domain_using_staticmem(d) ((void)(d), false)
 #endif
 
 /*
-- 
2.25.1




[PATCH v6 5/9] xen: add field "flags" to cover all internal CDF_XXX

2022-06-07 Thread Penny Zheng
With more and more CDF_xxx internal flags in and to save the space, this
commit introduces a new field "flags" in struct domain to store CDF_*
internal flags directly.

Another new CDF_xxx will be introduced in the next patch.

Signed-off-by: Penny Zheng 
Acked-by: Julien Grall 
---
v6 changes:
- no change
---
v5 changes:
- no change
---
v4 changes:
- no change
---
v3 changes:
- change fixed width type uint32_t to unsigned int
- change "flags" to a more descriptive name "cdf"
---
v2 changes:
- let "flags" live in the struct domain. So other arch can take
advantage of it in the future
- fix coding style
---
 xen/arch/arm/domain.c | 2 --
 xen/arch/arm/include/asm/domain.h | 3 +--
 xen/common/domain.c   | 3 +++
 xen/include/xen/sched.h   | 3 +++
 4 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 8110c1df86..74189d9878 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -709,8 +709,6 @@ int arch_domain_create(struct domain *d,
 ioreq_domain_init(d);
 #endif
 
-d->arch.directmap = flags & CDF_directmap;
-
 /* p2m_init relies on some value initialized by the IOMMU subsystem */
 if ( (rc = iommu_domain_init(d, config->iommu_opts)) != 0 )
 goto fail;
diff --git a/xen/arch/arm/include/asm/domain.h 
b/xen/arch/arm/include/asm/domain.h
index ed63c2b6f9..fe7a029ebf 100644
--- a/xen/arch/arm/include/asm/domain.h
+++ b/xen/arch/arm/include/asm/domain.h
@@ -29,7 +29,7 @@ enum domain_type {
 #define is_64bit_domain(d) (0)
 #endif
 
-#define is_domain_direct_mapped(d) (d)->arch.directmap
+#define is_domain_direct_mapped(d) ((d)->cdf & CDF_directmap)
 
 /*
  * Is the domain using the host memory layout?
@@ -103,7 +103,6 @@ struct arch_domain
 void *tee;
 #endif
 
-bool directmap;
 }  __cacheline_aligned;
 
 struct arch_vcpu
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 7570eae91a..a3ef991bd1 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -567,6 +567,9 @@ struct domain *domain_create(domid_t domid,
 /* Sort out our idea of is_system_domain(). */
 d->domain_id = domid;
 
+/* Holding CDF_* internal flags. */
+d->cdf = flags;
+
 /* Debug sanity. */
 ASSERT(is_system_domain(d) ? config == NULL : config != NULL);
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 463d41ffb6..5191853c18 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -596,6 +596,9 @@ struct domain
 struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
 } ioreq_server;
 #endif
+
+/* Holding CDF_* constant. Internal flags for domain creation. */
+unsigned int cdf;
 };
 
 static inline struct page_list_head *page_to_list(
-- 
2.25.1




[PATCH v6 4/9] xen: do not merge reserved pages in free_heap_pages()

2022-06-07 Thread Penny Zheng
The code in free_heap_pages() will try to merge pages with the
successor/predecessor if pages are suitably aligned. So if the pages
reserved are right next to the pages given to the heap allocator,
free_heap_pages() will merge them, and give the reserved pages to heap
allocator accidently as a result.

So in order to avoid the above scenario, this commit updates free_heap_pages()
to check whether the predecessor and/or successor has PGC_reserved set,
when trying to merge the about-to-be-freed chunk with the predecessor
and/or successor.

Suggested-by: Julien Grall 
Signed-off-by: Penny Zheng 
Reviewed-by: Jan Beulich 
Reviewed-by: Julien Grall 
---
v6 changes:
- adapt to PGC_static
---
v5 changes:
- change PGC_reserved to adapt to PGC_staticmem
---
v4 changes:
- no changes
---
v3 changes:
- no changes
---
v2 changes:
- new commit
---
 xen/common/page_alloc.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 6876869fa6..7fb28e2e07 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1486,6 +1486,7 @@ static void free_heap_pages(
 /* Merge with predecessor block? */
 if ( !mfn_valid(page_to_mfn(predecessor)) ||
  !page_state_is(predecessor, free) ||
+ (predecessor->count_info & PGC_static) ||
  (PFN_ORDER(predecessor) != order) ||
  (phys_to_nid(page_to_maddr(predecessor)) != node) )
 break;
@@ -1509,6 +1510,7 @@ static void free_heap_pages(
 /* Merge with successor block? */
 if ( !mfn_valid(page_to_mfn(successor)) ||
  !page_state_is(successor, free) ||
+ (successor->count_info & PGC_static) ||
  (PFN_ORDER(successor) != order) ||
  (phys_to_nid(page_to_maddr(successor)) != node) )
 break;
-- 
2.25.1




[PATCH v6 3/9] xen: update SUPPORT.md for static allocation

2022-06-07 Thread Penny Zheng
SUPPORT.md doesn't seem to explicitly say whether static memory is
supported, so this commit updates SUPPORT.md to add feature static
allocation tech preview for now.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v6 changes:
- use domain instead of sub-systems
---
v5 changes:
- new commit
---
 SUPPORT.md | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/SUPPORT.md b/SUPPORT.md
index ee2cd319e2..f50bc3a0fd 100644
--- a/SUPPORT.md
+++ b/SUPPORT.md
@@ -278,6 +278,13 @@ to boot with memory < maxmem.
 
 Status, x86 HVM: Supported
 
+### Static Allocation
+
+Static allocation refers to domains for which memory areas are
+pre-defined by configuration using physical address ranges.
+
+Status, ARM: Tech Preview
+
 ### Memory Sharing
 
 Allow sharing of identical pages between guests
-- 
2.25.1




[PATCH v6 1/9] xen/arm: rename PGC_reserved to PGC_static

2022-06-07 Thread Penny Zheng
PGC_reserved could be ambiguous, and we have to tell what the pages are
reserved for, so this commit intends to rename PGC_reserved to
PGC_static, which clearly indicates the page is reserved for static
memory.

Signed-off-by: Penny Zheng 
---
v6 changes:
- rename PGC_staticmem to PGC_static
---
v5 changes:
- new commit
---
 xen/arch/arm/include/asm/mm.h |  6 +++---
 xen/common/page_alloc.c   | 22 +++---
 2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 424aaf2823..fbff11c468 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -108,9 +108,9 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap PG_shift(2)
 #define PGC_xen_heap  PG_mask(1, 2)
-  /* Page is reserved */
-#define _PGC_reserved PG_shift(3)
-#define PGC_reserved  PG_mask(1, 3)
+  /* Page is static memory */
+#define _PGC_staticPG_shift(3)
+#define PGC_static PG_mask(1, 3)
 /* ... */
 /* Page is broken? */
 #define _PGC_broken   PG_shift(7)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 319029140f..9e5c757847 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,8 +151,8 @@
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
-#ifndef PGC_reserved
-#define PGC_reserved 0
+#ifndef PGC_static
+#define PGC_static 0
 #endif
 
 /*
@@ -2286,7 +2286,7 @@ int assign_pages(
 
 for ( i = 0; i < nr; i++ )
 {
-ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
+ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_static)));
 if ( pg[i].count_info & PGC_extra )
 extra_pages++;
 }
@@ -2346,7 +2346,7 @@ int assign_pages(
 page_set_owner(&pg[i], d);
 smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
 pg[i].count_info =
-(pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 
1;
+(pg[i].count_info & (PGC_extra | PGC_static)) | PGC_allocated | 1;
 
 page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
 }
@@ -2652,8 +2652,8 @@ void __init free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 scrub_one_page(pg);
 }
 
-/* In case initializing page of static memory, mark it PGC_reserved. */
-pg[i].count_info |= PGC_reserved;
+/* In case initializing page of static memory, mark it PGC_static. */
+pg[i].count_info |= PGC_static;
 }
 }
 
@@ -2682,8 +2682,8 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 
 for ( i = 0; i < nr_mfns; i++ )
 {
-/* The page should be reserved and not yet allocated. */
-if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
+/* The page should be static and not yet allocated. */
+if ( pg[i].count_info != (PGC_state_free | PGC_static) )
 {
 printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
@@ -2697,10 +2697,10 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 &tlbflush_timestamp);
 
 /*
- * Preserve flag PGC_reserved and change page state
+ * Preserve flag PGC_static and change page state
  * to PGC_state_inuse.
  */
-pg[i].count_info = PGC_reserved | PGC_state_inuse;
+pg[i].count_info = PGC_static | PGC_state_inuse;
 /* Initialise fields which have other uses for free pages. */
 pg[i].u.inuse.type_info = 0;
 page_set_owner(&pg[i], NULL);
@@ -2722,7 +2722,7 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 
  out_err:
 while ( i-- )
-pg[i].count_info = PGC_reserved | PGC_state_free;
+pg[i].count_info = PGC_static | PGC_state_free;
 
 spin_unlock(&heap_lock);
 
-- 
2.25.1




[PATCH v6 2/9] xen: do not free reserved memory into heap

2022-06-07 Thread Penny Zheng
Pages used as guest RAM for static domain, shall be reserved to this
domain only.
So in case reserved pages being used for other purpose, users
shall not free them back to heap, even when last ref gets dropped.

free_staticmem_pages will be called by free_heap_pages in runtime
for static domain freeing memory resource, so let's drop the __init
flag.

Signed-off-by: Penny Zheng 
---
v6 changes:
- adapt to PGC_static
- remove #ifdef aroud function declaration
---
v5 changes:
- In order to avoid stub functions, we #define PGC_staticmem to non-zero only
when CONFIG_STATIC_MEMORY
- use "unlikely()" around pg->count_info & PGC_staticmem
- remove pointless "if", since mark_page_free() is going to set count_info
to PGC_state_free and by consequence clear PGC_staticmem
- move #define PGC_staticmem 0 to mm.h
---
v4 changes:
- no changes
---
v3 changes:
- fix possible racy issue in free_staticmem_pages()
- introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case
- move the change to free_heap_pages() to cover other potential call sites
- fix the indentation
---
v2 changes:
- new commit
---
 xen/arch/arm/include/asm/mm.h |  4 +++-
 xen/common/page_alloc.c   | 12 +---
 xen/include/xen/mm.h  |  2 --
 3 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index fbff11c468..7442893e77 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -108,9 +108,11 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap PG_shift(2)
 #define PGC_xen_heap  PG_mask(1, 2)
-  /* Page is static memory */
+#ifdef CONFIG_STATIC_MEMORY
+/* Page is static memory */
 #define _PGC_staticPG_shift(3)
 #define PGC_static PG_mask(1, 3)
+#endif
 /* ... */
 /* Page is broken? */
 #define _PGC_broken   PG_shift(7)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 9e5c757847..6876869fa6 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1443,6 +1443,13 @@ static void free_heap_pages(
 
 ASSERT(order <= MAX_ORDER);
 
+if ( unlikely(pg->count_info & PGC_static) )
+{
+/* Pages of static memory shall not go back to the heap. */
+free_staticmem_pages(pg, 1UL << order, need_scrub);
+return;
+}
+
 spin_lock(&heap_lock);
 
 for ( i = 0; i < (1 << order); i++ )
@@ -2636,8 +2643,8 @@ struct domain *get_pg_owner(domid_t domid)
 
 #ifdef CONFIG_STATIC_MEMORY
 /* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
-void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
- bool need_scrub)
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+  bool need_scrub)
 {
 mfn_t mfn = page_to_mfn(pg);
 unsigned long i;
@@ -2652,7 +2659,6 @@ void __init free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 scrub_one_page(pg);
 }
 
-/* In case initializing page of static memory, mark it PGC_static. */
 pg[i].count_info |= PGC_static;
 }
 }
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 3be754da92..1c4ddb336b 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -85,13 +85,11 @@ bool scrub_free_pages(void);
 } while ( false )
 #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
 
-#ifdef CONFIG_STATIC_MEMORY
 /* These functions are for static memory */
 void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
   bool need_scrub);
 int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
 unsigned int memflags);
-#endif
 
 /* Map machine page range in Xen virtual address space. */
 int map_pages_to_xen(
-- 
2.25.1




[PATCH v6 0/9] populate/unpopulate memory when domain on static allocation

2022-06-07 Thread Penny Zheng
Today when a domain unpopulates the memory on runtime, they will always
hand the memory over to the heap allocator. And it will be a problem if it
is a static domain.
Pages used as guest RAM for static domain shall always be reserved to this
domain only, and not be used for any other purposes, so they shall never go
back to heap allocator.

This patch serie intends to fix this issue, by adding pages on the new list
resv_page_list after having taken them off the "normal" list, when unpopulating
memory, and retrieving pages from resv page list(resv_page_list) when
populating memory.

---
v6 changes:
- rename PGC_staticmem to PGC_static
- remove #ifdef aroud function declaration
- use domain instead of sub-systems
- move non-zero is_domain_using_staticmem() from ARM header to common
header
- move PGC_static !CONFIG_STATIC_MEMORY definition to common header
- drop the lock before returning
---
v5 changes:
- introduce three new commits
- In order to avoid stub functions, we #define PGC_staticmem to non-zero only
when CONFIG_STATIC_MEMORY
- use "unlikely()" around pg->count_info & PGC_staticmem
- remove pointless "if", since mark_page_free() is going to set count_info
to PGC_state_free and by consequence clear PGC_staticmem
- move #define PGC_staticmem 0 to mm.h
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
- extract common codes for assigning pages into a helper assign_domstatic_pages
- refine commit message
- remove stub function acquire_reserved_page
- Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock
---
v4 changes:
- commit message refinement
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
---
v3 changes:
- fix possible racy issue in free_staticmem_pages()
- introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case
- move the change to free_heap_pages() to cover other potential call sites
- change fixed width type uint32_t to unsigned int
- change "flags" to a more descriptive name "cdf"
- change name from "is_domain_static()" to "is_domain_using_staticmem"
- have page_list_del() just once out of the if()
- remove resv_pages counter
- make arch_free_heap_page be an expression, not a compound statement.
- move #ifndef is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- let "flags" live in the struct domain. So other arch can take
advantage of it in the future
- change name from "is_domain_on_static_allocation" to "is_domain_static()"
- put reserved pages on resv_page_list after having taken them off
the "normal" list
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
- fix coding style

Penny Zheng (9):
  xen/arm: rename PGC_reserved to PGC_static
  xen: do not free reserved memory into heap
  xen: update SUPPORT.md for static allocation
  xen: do not merge reserved pages in free_heap_pages()
  xen: add field "flags" to cover all internal CDF_XXX
  xen/arm: introduce CDF_staticmem
  xen/arm: unpopulate memory when domain is static
  xen: introduce prepare_staticmem_pages
  xen: retrieve reserved pages on populate_physmap

 SUPPORT.md|   7 ++
 xen/arch/arm/domain.c |   2 -
 xen/arch/arm/domain_build.c   |   5 +-
 xen/arch/arm/include/asm/domain.h |   3 +-
 xen/arch/arm/include/asm/mm.h |  20 +++-
 xen/common/domain.c   |   7 ++
 xen/common/memory.c   |  23 +
 xen/common/page_alloc.c   | 149 --
 xen/include/xen/domain.h  |   8 ++
 xen/include/xen/mm.h  |   7 +-
 xen/include/xen/sched.h   |   6 ++
 11 files changed, 178 insertions(+), 59 deletions(-)

-- 
2.25.1




RE: [PATCH v5 6/9] xen/arm: introduce CDF_staticmem

2022-06-02 Thread Penny Zheng
Hi Jan

> -Original Message-
> From: Jan Beulich 
> Sent: Tuesday, May 31, 2022 4:41 PM
> To: Penny Zheng 
> Cc: Wei Chen ; Stefano Stabellini
> ; Julien Grall ; Bertrand Marquis
> ; Volodymyr Babchuk
> ; Andrew Cooper
> ; George Dunlap ;
> Wei Liu ; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v5 6/9] xen/arm: introduce CDF_staticmem
> 
> On 31.05.2022 05:12, Penny Zheng wrote:
> > --- a/xen/arch/arm/include/asm/domain.h
> > +++ b/xen/arch/arm/include/asm/domain.h
> > @@ -31,6 +31,10 @@ enum domain_type {
> >
> >  #define is_domain_direct_mapped(d) ((d)->cdf & CDF_directmap)
> >
> > +#ifdef CONFIG_STATIC_MEMORY
> > +#define is_domain_using_staticmem(d) ((d)->cdf & CDF_staticmem)
> > +#endif
> 
> Why is this in the Arm header, rather than ...
> 
> > --- a/xen/include/xen/domain.h
> > +++ b/xen/include/xen/domain.h
> > @@ -34,6 +34,12 @@ void arch_get_domain_info(const struct domain *d,
> > #ifdef CONFIG_ARM
> >  /* Should domain memory be directly mapped? */
> >  #define CDF_directmap(1U << 1)
> > +/* Is domain memory on static allocation? */
> > +#define CDF_staticmem(1U << 2)
> > +#endif
> > +
> > +#ifndef is_domain_using_staticmem
> > +#define is_domain_using_staticmem(d) ((void)(d), false)
> >  #endif
> 
> ... here (with what you have here now simply becoming the #else part)?
> Once living here, I expect it also can be an inline function rather than a 
> macro,
> with the #ifdef merely inside its body.
> 

In order to avoid bring the chicken and egg problem in xen/include/xen/domain.h,
I may need to move the static inline function to xen/include/xen/sched.h(which
has already included domain.h header).

> Jan



RE: [PATCH v5 2/9] xen: do not free reserved memory into heap

2022-06-01 Thread Penny Zheng


> -Original Message-
> From: Jan Beulich 
> Sent: Tuesday, May 31, 2022 4:37 PM
> To: Penny Zheng 
> Cc: Wei Chen ; Stefano Stabellini
> ; Julien Grall ; Bertrand Marquis
> ; Volodymyr Babchuk
> ; Andrew Cooper
> ; George Dunlap ;
> Wei Liu ; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v5 2/9] xen: do not free reserved memory into heap
> 
> On 31.05.2022 05:12, Penny Zheng wrote:
> > Pages used as guest RAM for static domain, shall be reserved to this
> > domain only.
> > So in case reserved pages being used for other purpose, users shall
> > not free them back to heap, even when last ref gets dropped.
> >
> > free_staticmem_pages will be called by free_heap_pages in runtime for
> > static domain freeing memory resource, so let's drop the __init flag.
> >
> > Signed-off-by: Penny Zheng 
> > ---
> > v5 changes:
> > - In order to avoid stub functions, we #define PGC_staticmem to
> > non-zero only when CONFIG_STATIC_MEMORY
> > - use "unlikely()" around pg->count_info & PGC_staticmem
> > - remove pointless "if", since mark_page_free() is going to set
> > count_info to PGC_state_free and by consequence clear PGC_staticmem
> > - move #define PGC_staticmem 0 to mm.h
> > ---
> > v4 changes:
> > - no changes
> > ---
> > v3 changes:
> > - fix possible racy issue in free_staticmem_pages()
> > - introduce a stub free_staticmem_pages() for the
> > !CONFIG_STATIC_MEMORY case
> > - move the change to free_heap_pages() to cover other potential call
> > sites
> > - fix the indentation
> > ---
> > v2 changes:
> > - new commit
> > ---
> >  xen/arch/arm/include/asm/mm.h |  2 ++
> >  xen/common/page_alloc.c   | 16 +---
> >  xen/include/xen/mm.h  |  6 +-
> >  3 files changed, 16 insertions(+), 8 deletions(-)
> >
> > diff --git a/xen/arch/arm/include/asm/mm.h
> > b/xen/arch/arm/include/asm/mm.h index 1226700085..56d0939318 100644
> > --- a/xen/arch/arm/include/asm/mm.h
> > +++ b/xen/arch/arm/include/asm/mm.h
> > @@ -108,9 +108,11 @@ struct page_info
> >/* Page is Xen heap? */
> >  #define _PGC_xen_heap PG_shift(2)
> >  #define PGC_xen_heap  PG_mask(1, 2)
> > +#ifdef CONFIG_STATIC_MEMORY
> >/* Page is static memory */
> >  #define _PGC_staticmemPG_shift(3)
> >  #define PGC_staticmem PG_mask(1, 3)
> > +#endif
> >  /* ... */
> >  /* Page is broken? */
> >  #define _PGC_broken   PG_shift(7)
> > diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index
> > 44600dd9cd..6425761116 100644
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -151,10 +151,6 @@
> >  #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
> > #endif
> >
> > -#ifndef PGC_staticmem
> > -#define PGC_staticmem 0
> > -#endif
> > -
> 
> Is the moving of this into the header really a necessary part of this change?
> Afaics the symbol is still only ever used in this one C file.

Later, in commit "xen/arm: unpopulate memory when domain is static", 
we will use this flag in xen/arch/arm/include/asm/mm.h

> > --- a/xen/include/xen/mm.h
> > +++ b/xen/include/xen/mm.h
> > @@ -85,10 +85,10 @@ bool scrub_free_pages(void);  } while ( false )
> > #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
> >
> > -#ifdef CONFIG_STATIC_MEMORY
> >  /* These functions are for static memory */  void
> > free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> >bool need_scrub);
> > +#ifdef CONFIG_STATIC_MEMORY
> >  int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int
> nr_mfns,
> >  unsigned int memflags);  #endif
> 
> Is the #ifdef really worth retaining at this point? Code is generally better
> readable without.
> 

Sure, will remove

> Jan



RE: [PATCH v5 1/9] xen/arm: rename PGC_reserved to PGC_staticmem

2022-06-01 Thread Penny Zheng
Hi jan 

> -Original Message-
> From: Jan Beulich 
> Sent: Tuesday, May 31, 2022 4:33 PM
> To: Penny Zheng 
> Cc: Wei Chen ; Stefano Stabellini
> ; Julien Grall ; Bertrand Marquis
> ; Volodymyr Babchuk
> ; Andrew Cooper
> ; George Dunlap ;
> Wei Liu ; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v5 1/9] xen/arm: rename PGC_reserved to PGC_staticmem
> 
> On 31.05.2022 05:12, Penny Zheng wrote:
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -151,8 +151,8 @@
> >  #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
> > #endif
> >
> > -#ifndef PGC_reserved
> > -#define PGC_reserved 0
> > +#ifndef PGC_staticmem
> > +#define PGC_staticmem 0
> >  #endif
> 
> Just wondering: Is the "mem" part of the name really significant? Pages always
> represent memory of some form, don't they?
> 

Sure, it seems redundant, I'll rename to PGC_static.

> Jan



[PATCH v5 9/9] xen: retrieve reserved pages on populate_physmap

2022-05-30 Thread Penny Zheng
When a static domain populates memory through populate_physmap at runtime,
it shall retrieve reserved pages from resv_page_list to make sure that
guest RAM is still restricted in statically configured memory regions.
This commit also introduces a new helper acquire_reserved_page to make it work.

Signed-off-by: Penny Zheng 
---
v5 changes:
- extract common codes for assigning pages into a helper assign_domstatic_pages
- refine commit message
- remove stub function acquire_reserved_page
- Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock
---
v4 changes:
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
---
v3 changes:
- move is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
---
 xen/common/memory.c | 23 ++
 xen/common/page_alloc.c | 70 +++--
 xen/include/xen/mm.h|  1 +
 3 files changed, 77 insertions(+), 17 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index f2d009843a..cb330ce877 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -245,6 +245,29 @@ static void populate_physmap(struct memop_args *a)
 
 mfn = _mfn(gpfn);
 }
+else if ( is_domain_using_staticmem(d) )
+{
+/*
+ * No easy way to guarantee the retrieved pages are contiguous,
+ * so forbid non-zero-order requests here.
+ */
+if ( a->extent_order != 0 )
+{
+gdprintk(XENLOG_WARNING,
+ "Cannot allocate static order-%u pages for static 
%pd\n",
+ a->extent_order, d);
+goto out;
+}
+
+mfn = acquire_reserved_page(d, a->memflags);
+if ( mfn_eq(mfn, INVALID_MFN) )
+{
+gdprintk(XENLOG_WARNING,
+ "%pd: failed to retrieve a reserved page\n",
+ d);
+goto out;
+}
+}
 else
 {
 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index bdd2e62865..9448552bab 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2661,9 +2661,8 @@ void free_staticmem_pages(struct page_info *pg, unsigned 
long nr_mfns,
 }
 }
 
-static bool __init prepare_staticmem_pages(struct page_info *pg,
-   unsigned long nr_mfns,
-   unsigned int memflags)
+static bool prepare_staticmem_pages(struct page_info *pg, unsigned long 
nr_mfns,
+unsigned int memflags)
 {
 bool need_tlbflush = false;
 uint32_t tlbflush_timestamp = 0;
@@ -2744,21 +2743,9 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 return pg;
 }
 
-/*
- * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
- * then assign them to one specific domain #d.
- */
-int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
-   unsigned int nr_mfns, unsigned int memflags)
+static int assign_domstatic_pages(struct domain *d, struct page_info *pg,
+  unsigned int nr_mfns, unsigned int memflags)
 {
-struct page_info *pg;
-
-ASSERT(!in_irq());
-
-pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
-if ( !pg )
-return -ENOENT;
-
 if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
 {
 /*
@@ -2777,6 +2764,55 @@ int __init acquire_domstatic_pages(struct domain *d, 
mfn_t smfn,
 
 return 0;
 }
+
+/*
+ * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ */
+int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
+   unsigned int nr_mfns, unsigned int memflags)
+{
+struct page_info *pg;
+
+ASSERT(!in_irq());
+
+pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
+if ( !pg )
+return -ENOENT;
+
+if ( assign_domstatic_pages(d, pg, nr_mfns, memflags) )
+return -EINVAL;
+
+return 0;
+}
+
+/*
+ * Acquire a page from reserved page list(resv_page_list), when populating
+ * memory for static domain on runtime.
+ */
+mfn_t acquire_reserved_page(

[PATCH v5 6/9] xen/arm: introduce CDF_staticmem

2022-05-30 Thread Penny Zheng
In order to have an easy and quick way to find out whether this domain memory
is statically configured, this commit introduces a new flag CDF_staticmem and a
new helper is_domain_using_staticmem() to tell.

Signed-off-by: Penny Zheng 
---
v5 changes:
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
---
v4 changes:
- no changes
---
v3 changes:
- change name from "is_domain_static()" to "is_domain_using_staticmem"
---
v2 changes:
- change name from "is_domain_on_static_allocation" to "is_domain_static()"
---
 xen/arch/arm/domain_build.c   | 5 -
 xen/arch/arm/include/asm/domain.h | 4 
 xen/include/xen/domain.h  | 6 ++
 3 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 7ddd16c26d..f6e2e44c1e 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -3287,9 +3287,12 @@ void __init create_domUs(void)
 if ( !dt_device_is_compatible(node, "xen,domain") )
 continue;
 
+if ( dt_find_property(node, "xen,static-mem", NULL) )
+flags |= CDF_staticmem;
+
 if ( dt_property_read_bool(node, "direct-map") )
 {
-if ( !IS_ENABLED(CONFIG_STATIC_MEMORY) || !dt_find_property(node, 
"xen,static-mem", NULL) )
+if ( !IS_ENABLED(CONFIG_STATIC_MEMORY) || !(flags & CDF_staticmem) 
)
 panic("direct-map is not valid for domain %s without static 
allocation.\n",
   dt_node_name(node));
 
diff --git a/xen/arch/arm/include/asm/domain.h 
b/xen/arch/arm/include/asm/domain.h
index fe7a029ebf..6bb999aff0 100644
--- a/xen/arch/arm/include/asm/domain.h
+++ b/xen/arch/arm/include/asm/domain.h
@@ -31,6 +31,10 @@ enum domain_type {
 
 #define is_domain_direct_mapped(d) ((d)->cdf & CDF_directmap)
 
+#ifdef CONFIG_STATIC_MEMORY
+#define is_domain_using_staticmem(d) ((d)->cdf & CDF_staticmem)
+#endif
+
 /*
  * Is the domain using the host memory layout?
  *
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 1c3c88a14d..c613afa57e 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -34,6 +34,12 @@ void arch_get_domain_info(const struct domain *d,
 #ifdef CONFIG_ARM
 /* Should domain memory be directly mapped? */
 #define CDF_directmap(1U << 1)
+/* Is domain memory on static allocation? */
+#define CDF_staticmem(1U << 2)
+#endif
+
+#ifndef is_domain_using_staticmem
+#define is_domain_using_staticmem(d) ((void)(d), false)
 #endif
 
 /*
-- 
2.25.1




[PATCH v5 8/9] xen: introduce prepare_staticmem_pages

2022-05-30 Thread Penny Zheng
Later, we want to use acquire_domstatic_pages() for populating memory
for static domain on runtime, however, there are a lot of pointless work
(checking mfn_valid(), scrubbing the free part, cleaning the cache...)
considering we know the page is valid and belong to the guest.

This commit splits acquire_staticmem_pages() in two parts, and
introduces prepare_staticmem_pages to bypass all "pointless work".

Signed-off-by: Penny Zheng 
---
v5 changes:
- new commit
---
 xen/common/page_alloc.c | 61 -
 1 file changed, 36 insertions(+), 25 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index b1350fc238..bdd2e62865 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2661,26 +2661,13 @@ void free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 }
 }
 
-/*
- * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
- * static memory.
- * This function needs to be reworked if used outside of boot.
- */
-static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
- unsigned long nr_mfns,
- unsigned int memflags)
+static bool __init prepare_staticmem_pages(struct page_info *pg,
+   unsigned long nr_mfns,
+   unsigned int memflags)
 {
 bool need_tlbflush = false;
 uint32_t tlbflush_timestamp = 0;
 unsigned long i;
-struct page_info *pg;
-
-ASSERT(nr_mfns);
-for ( i = 0; i < nr_mfns; i++ )
-if ( !mfn_valid(mfn_add(smfn, i)) )
-return NULL;
-
-pg = mfn_to_page(smfn);
 
 spin_lock(&heap_lock);
 
@@ -2691,7 +2678,7 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 {
 printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
-   i, mfn_x(smfn) + i,
+   i, mfn_x(page_to_mfn(pg)) + i,
pg[i].count_info, pg[i].tlbflush_timestamp);
 goto out_err;
 }
@@ -2715,6 +2702,38 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 if ( need_tlbflush )
 filtered_flush_tlb_mask(tlbflush_timestamp);
 
+return true;
+
+ out_err:
+while ( i-- )
+pg[i].count_info = PGC_staticmem | PGC_state_free;
+
+spin_unlock(&heap_lock);
+
+return false;
+}
+
+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ * This function needs to be reworked if used outside of boot.
+ */
+static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
+ unsigned long nr_mfns,
+ unsigned int memflags)
+{
+unsigned long i;
+struct page_info *pg;
+
+ASSERT(nr_mfns);
+for ( i = 0; i < nr_mfns; i++ )
+if ( !mfn_valid(mfn_add(smfn, i)) )
+return NULL;
+
+pg = mfn_to_page(smfn);
+if ( !prepare_staticmem_pages(pg, nr_mfns, memflags) )
+return NULL;
+
 /*
  * Ensure cache and RAM are consistent for platforms where the guest
  * can control its own visibility of/through the cache.
@@ -2723,14 +2742,6 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
 
 return pg;
-
- out_err:
-while ( i-- )
-pg[i].count_info = PGC_staticmem | PGC_state_free;
-
-spin_unlock(&heap_lock);
-
-return NULL;
 }
 
 /*
-- 
2.25.1




[PATCH v5 7/9] xen/arm: unpopulate memory when domain is static

2022-05-30 Thread Penny Zheng
Today when a domain unpopulates the memory on runtime, they will always
hand the memory back to the heap allocator. And it will be a problem if domain
is static.

Pages as guest RAM for static domain shall be reserved to only this domain
and not be used for any other purposes, so they shall never go back to heap
allocator.

This commit puts reserved pages on the new list resv_page_list only after
having taken them off the "normal" list, when the last ref dropped.

Signed-off-by: Penny Zheng 
---
v5 changes:
- adapt this patch for PGC_staticmem
---
v4 changes:
- no changes
---
v3 changes:
- have page_list_del() just once out of the if()
- remove resv_pages counter
- make arch_free_heap_page be an expression, not a compound statement.
---
v2 changes:
- put reserved pages on resv_page_list after having taken them off
the "normal" list
---
 xen/arch/arm/include/asm/mm.h | 12 
 xen/common/domain.c   |  4 
 xen/include/xen/sched.h   |  3 +++
 3 files changed, 19 insertions(+)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 56d0939318..ca384a3939 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -360,6 +360,18 @@ void clear_and_clean_page(struct page_info *page);
 
 unsigned int arch_get_dma_bitsize(void);
 
+/*
+ * Put free pages on the resv page list after having taken them
+ * off the "normal" page list, when pages from static memory
+ */
+#ifdef CONFIG_STATIC_MEMORY
+#define arch_free_heap_page(d, pg) ({   \
+page_list_del(pg, page_to_list(d, pg)); \
+if ( (pg)->count_info & PGC_staticmem )  \
+page_list_add_tail(pg, &(d)->resv_page_list);   \
+})
+#endif
+
 #endif /*  __ARCH_ARM_MM__ */
 /*
  * Local variables:
diff --git a/xen/common/domain.c b/xen/common/domain.c
index a3ef991bd1..a49574fa24 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -604,6 +604,10 @@ struct domain *domain_create(domid_t domid,
 INIT_PAGE_LIST_HEAD(&d->page_list);
 INIT_PAGE_LIST_HEAD(&d->extra_page_list);
 INIT_PAGE_LIST_HEAD(&d->xenpage_list);
+#ifdef CONFIG_STATIC_MEMORY
+INIT_PAGE_LIST_HEAD(&d->resv_page_list);
+#endif
+
 
 spin_lock_init(&d->node_affinity_lock);
 d->node_affinity = NODE_MASK_ALL;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 5191853c18..3e22c77333 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -381,6 +381,9 @@ struct domain
 struct page_list_head page_list;  /* linked list */
 struct page_list_head extra_page_list; /* linked list (size extra_pages) */
 struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
+#ifdef CONFIG_STATIC_MEMORY
+struct page_list_head resv_page_list; /* linked list (size resv_pages) */
+#endif
 
 /*
  * This field should only be directly accessed by domain_adjust_tot_pages()
-- 
2.25.1




[PATCH v5 5/9] xen: add field "flags" to cover all internal CDF_XXX

2022-05-30 Thread Penny Zheng
With more and more CDF_xxx internal flags in and to save the space, this
commit introduces a new field "flags" in struct domain to store CDF_*
internal flags directly.

Another new CDF_xxx will be introduced in the next patch.

Signed-off-by: Penny Zheng 
Acked-by: Julien Grall 
---
v5 changes:
- no change
---
v4 changes:
- no change
---
v3 changes:
- change fixed width type uint32_t to unsigned int
- change "flags" to a more descriptive name "cdf"
---
v2 changes:
- let "flags" live in the struct domain. So other arch can take
advantage of it in the future
- fix coding style
---
 xen/arch/arm/domain.c | 2 --
 xen/arch/arm/include/asm/domain.h | 3 +--
 xen/common/domain.c   | 3 +++
 xen/include/xen/sched.h   | 3 +++
 4 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 8110c1df86..74189d9878 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -709,8 +709,6 @@ int arch_domain_create(struct domain *d,
 ioreq_domain_init(d);
 #endif
 
-d->arch.directmap = flags & CDF_directmap;
-
 /* p2m_init relies on some value initialized by the IOMMU subsystem */
 if ( (rc = iommu_domain_init(d, config->iommu_opts)) != 0 )
 goto fail;
diff --git a/xen/arch/arm/include/asm/domain.h 
b/xen/arch/arm/include/asm/domain.h
index ed63c2b6f9..fe7a029ebf 100644
--- a/xen/arch/arm/include/asm/domain.h
+++ b/xen/arch/arm/include/asm/domain.h
@@ -29,7 +29,7 @@ enum domain_type {
 #define is_64bit_domain(d) (0)
 #endif
 
-#define is_domain_direct_mapped(d) (d)->arch.directmap
+#define is_domain_direct_mapped(d) ((d)->cdf & CDF_directmap)
 
 /*
  * Is the domain using the host memory layout?
@@ -103,7 +103,6 @@ struct arch_domain
 void *tee;
 #endif
 
-bool directmap;
 }  __cacheline_aligned;
 
 struct arch_vcpu
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 7570eae91a..a3ef991bd1 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -567,6 +567,9 @@ struct domain *domain_create(domid_t domid,
 /* Sort out our idea of is_system_domain(). */
 d->domain_id = domid;
 
+/* Holding CDF_* internal flags. */
+d->cdf = flags;
+
 /* Debug sanity. */
 ASSERT(is_system_domain(d) ? config == NULL : config != NULL);
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 463d41ffb6..5191853c18 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -596,6 +596,9 @@ struct domain
 struct ioreq_server *server[MAX_NR_IOREQ_SERVERS];
 } ioreq_server;
 #endif
+
+/* Holding CDF_* constant. Internal flags for domain creation. */
+unsigned int cdf;
 };
 
 static inline struct page_list_head *page_to_list(
-- 
2.25.1




[PATCH v5 0/9] populate/unpopulate memory when domain on static allocation

2022-05-30 Thread Penny Zheng
Today when a domain unpopulates the memory on runtime, they will always
hand the memory over to the heap allocator. And it will be a problem if it
is a static domain.
Pages used as guest RAM for static domain shall always be reserved to this
domain only, and not be used for any other purposes, so they shall never go
back to heap allocator.

This patch serie intends to fix this issue, by adding pages on the new list
resv_page_list after having taken them off the "normal" list, when unpopulating
memory, and retrieving pages from resv page list(resv_page_list) when
populating memory.

---
v5 changes:
- introduce three new commits
- In order to avoid stub functions, we #define PGC_staticmem to non-zero only
when CONFIG_STATIC_MEMORY
- use "unlikely()" around pg->count_info & PGC_staticmem
- remove pointless "if", since mark_page_free() is going to set count_info
to PGC_state_free and by consequence clear PGC_staticmem
- move #define PGC_staticmem 0 to mm.h
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
- extract common codes for assigning pages into a helper assign_domstatic_pages
- refine commit message
- remove stub function acquire_reserved_page
- Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock
---
v4 changes:
- commit message refinement
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
- guard "is_domain_using_staticmem" under CONFIG_STATIC_MEMORY
- #define is_domain_using_staticmem zero if undefined
---
v3 changes:
- fix possible racy issue in free_staticmem_pages()
- introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case
- move the change to free_heap_pages() to cover other potential call sites
- change fixed width type uint32_t to unsigned int
- change "flags" to a more descriptive name "cdf"
- change name from "is_domain_static()" to "is_domain_using_staticmem"
- have page_list_del() just once out of the if()
- remove resv_pages counter
- make arch_free_heap_page be an expression, not a compound statement.
- move #ifndef is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- let "flags" live in the struct domain. So other arch can take
advantage of it in the future
- change name from "is_domain_on_static_allocation" to "is_domain_static()"
- put reserved pages on resv_page_list after having taken them off
the "normal" list
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
- fix coding style

Penny Zheng (9):
  xen/arm: rename PGC_reserved to PGC_staticmem
  xen: do not free reserved memory into heap
  xen: update SUPPORT.md for static allocation
  xen: do not merge reserved pages in free_heap_pages()
  xen: add field "flags" to cover all internal CDF_XXX
  xen/arm: introduce CDF_staticmem
  xen/arm: unpopulate memory when domain is static
  xen: introduce prepare_staticmem_pages
  xen: retrieve reserved pages on populate_physmap

 SUPPORT.md|   7 ++
 xen/arch/arm/domain.c |   2 -
 xen/arch/arm/domain_build.c   |   5 +-
 xen/arch/arm/include/asm/domain.h |   7 +-
 xen/arch/arm/include/asm/mm.h |  20 +++-
 xen/common/domain.c   |   7 ++
 xen/common/memory.c   |  23 +
 xen/common/page_alloc.c   | 147 --
 xen/include/xen/domain.h  |   6 ++
 xen/include/xen/mm.h  |   7 +-
 xen/include/xen/sched.h   |   6 ++
 11 files changed, 180 insertions(+), 57 deletions(-)

-- 
2.25.1




[PATCH v5 4/9] xen: do not merge reserved pages in free_heap_pages()

2022-05-30 Thread Penny Zheng
The code in free_heap_pages() will try to merge pages with the
successor/predecessor if pages are suitably aligned. So if the pages
reserved are right next to the pages given to the heap allocator,
free_heap_pages() will merge them, and give the reserved pages to heap
allocator accidently as a result.

So in order to avoid the above scenario, this commit updates free_heap_pages()
to check whether the predecessor and/or successor has PGC_reserved set,
when trying to merge the about-to-be-freed chunk with the predecessor
and/or successor.

Suggested-by: Julien Grall 
Signed-off-by: Penny Zheng 
Reviewed-by: Jan Beulich 
Reviewed-by: Julien Grall 
---
v5 changes:
- change PGC_reserved to adapt to PGC_staticmem
---
v4 changes:
- no changes
---
v3 changes:
- no changes
---
v2 changes:
- new commit
---
 xen/common/page_alloc.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 6425761116..b1350fc238 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1482,6 +1482,7 @@ static void free_heap_pages(
 /* Merge with predecessor block? */
 if ( !mfn_valid(page_to_mfn(predecessor)) ||
  !page_state_is(predecessor, free) ||
+ (predecessor->count_info & PGC_staticmem) ||
  (PFN_ORDER(predecessor) != order) ||
  (phys_to_nid(page_to_maddr(predecessor)) != node) )
 break;
@@ -1505,6 +1506,7 @@ static void free_heap_pages(
 /* Merge with successor block? */
 if ( !mfn_valid(page_to_mfn(successor)) ||
  !page_state_is(successor, free) ||
+ (successor->count_info & PGC_staticmem) ||
  (PFN_ORDER(successor) != order) ||
  (phys_to_nid(page_to_maddr(successor)) != node) )
 break;
-- 
2.25.1




[PATCH v5 3/9] xen: update SUPPORT.md for static allocation

2022-05-30 Thread Penny Zheng
SUPPORT.md doesn't seem to explicitly say whether static memory is
supported, so this commit updates SUPPORT.md to add feature static
allocation tech preview for now.

Signed-off-by: Penny Zheng 
---
v5 changes:
- new commit
---
 SUPPORT.md | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/SUPPORT.md b/SUPPORT.md
index ee2cd319e2..5980a82c4b 100644
--- a/SUPPORT.md
+++ b/SUPPORT.md
@@ -278,6 +278,13 @@ to boot with memory < maxmem.
 
 Status, x86 HVM: Supported
 
+### Static Allocation
+
+Static allocation refers to system or sub-system(domains) for which memory
+areas are pre-defined by configuration using physical address ranges.
+
+Status, ARM: Tech Preview
+
 ### Memory Sharing
 
 Allow sharing of identical pages between guests
-- 
2.25.1




[PATCH v5 2/9] xen: do not free reserved memory into heap

2022-05-30 Thread Penny Zheng
Pages used as guest RAM for static domain, shall be reserved to this
domain only.
So in case reserved pages being used for other purpose, users
shall not free them back to heap, even when last ref gets dropped.

free_staticmem_pages will be called by free_heap_pages in runtime
for static domain freeing memory resource, so let's drop the __init
flag.

Signed-off-by: Penny Zheng 
---
v5 changes:
- In order to avoid stub functions, we #define PGC_staticmem to non-zero only
when CONFIG_STATIC_MEMORY
- use "unlikely()" around pg->count_info & PGC_staticmem
- remove pointless "if", since mark_page_free() is going to set count_info
to PGC_state_free and by consequence clear PGC_staticmem
- move #define PGC_staticmem 0 to mm.h
---
v4 changes:
- no changes
---
v3 changes:
- fix possible racy issue in free_staticmem_pages()
- introduce a stub free_staticmem_pages() for the !CONFIG_STATIC_MEMORY case
- move the change to free_heap_pages() to cover other potential call sites
- fix the indentation
---
v2 changes:
- new commit
---
 xen/arch/arm/include/asm/mm.h |  2 ++
 xen/common/page_alloc.c   | 16 +---
 xen/include/xen/mm.h  |  6 +-
 3 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 1226700085..56d0939318 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -108,9 +108,11 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap PG_shift(2)
 #define PGC_xen_heap  PG_mask(1, 2)
+#ifdef CONFIG_STATIC_MEMORY
   /* Page is static memory */
 #define _PGC_staticmemPG_shift(3)
 #define PGC_staticmem PG_mask(1, 3)
+#endif
 /* ... */
 /* Page is broken? */
 #define _PGC_broken   PG_shift(7)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 44600dd9cd..6425761116 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,10 +151,6 @@
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
-#ifndef PGC_staticmem
-#define PGC_staticmem 0
-#endif
-
 /*
  * Comma-separated list of hexadecimal page numbers containing bad bytes.
  * e.g. 'badpage=0x3f45,0x8a321'.
@@ -1443,6 +1439,13 @@ static void free_heap_pages(
 
 ASSERT(order <= MAX_ORDER);
 
+if ( unlikely(pg->count_info & PGC_staticmem) )
+{
+/* Pages of static memory shall not go back to the heap. */
+free_staticmem_pages(pg, 1UL << order, need_scrub);
+return;
+}
+
 spin_lock(&heap_lock);
 
 for ( i = 0; i < (1 << order); i++ )
@@ -2636,8 +2639,8 @@ struct domain *get_pg_owner(domid_t domid)
 
 #ifdef CONFIG_STATIC_MEMORY
 /* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
-void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
- bool need_scrub)
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+  bool need_scrub)
 {
 mfn_t mfn = page_to_mfn(pg);
 unsigned long i;
@@ -2652,7 +2655,6 @@ void __init free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 scrub_one_page(pg);
 }
 
-/* In case initializing page of static memory, mark it PGC_staticmem. 
*/
 pg[i].count_info |= PGC_staticmem;
 }
 }
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 3be754da92..ca2c6f033e 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -85,10 +85,10 @@ bool scrub_free_pages(void);
 } while ( false )
 #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
 
-#ifdef CONFIG_STATIC_MEMORY
 /* These functions are for static memory */
 void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
   bool need_scrub);
+#ifdef CONFIG_STATIC_MEMORY
 int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
 unsigned int memflags);
 #endif
@@ -212,6 +212,10 @@ extern struct domain *dom_cow;
 
 #include 
 
+#ifndef PGC_staticmem
+#define PGC_staticmem 0
+#endif
+
 static inline bool is_special_page(const struct page_info *page)
 {
 return is_xen_heap_page(page) || (page->count_info & PGC_extra);
-- 
2.25.1




[PATCH v5 1/9] xen/arm: rename PGC_reserved to PGC_staticmem

2022-05-30 Thread Penny Zheng
PGC_reserved could be ambiguous, and we have to tell what the pages are
reserved for, so this commit intends to rename PGC_reserved to
PGC_staticmem, which clearly indicates the page is reserved for static
memory.

Signed-off-by: Penny Zheng 
---
v5 changes:
- new commit
---
 xen/arch/arm/include/asm/mm.h |  6 +++---
 xen/common/page_alloc.c   | 20 ++--
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 424aaf2823..1226700085 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -108,9 +108,9 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap PG_shift(2)
 #define PGC_xen_heap  PG_mask(1, 2)
-  /* Page is reserved */
-#define _PGC_reserved PG_shift(3)
-#define PGC_reserved  PG_mask(1, 3)
+  /* Page is static memory */
+#define _PGC_staticmemPG_shift(3)
+#define PGC_staticmem PG_mask(1, 3)
 /* ... */
 /* Page is broken? */
 #define _PGC_broken   PG_shift(7)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 319029140f..44600dd9cd 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,8 +151,8 @@
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
-#ifndef PGC_reserved
-#define PGC_reserved 0
+#ifndef PGC_staticmem
+#define PGC_staticmem 0
 #endif
 
 /*
@@ -2286,7 +2286,7 @@ int assign_pages(
 
 for ( i = 0; i < nr; i++ )
 {
-ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
+ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_staticmem)));
 if ( pg[i].count_info & PGC_extra )
 extra_pages++;
 }
@@ -2346,7 +2346,7 @@ int assign_pages(
 page_set_owner(&pg[i], d);
 smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
 pg[i].count_info =
-(pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 
1;
+(pg[i].count_info & (PGC_extra | PGC_staticmem)) | PGC_allocated | 
1;
 
 page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
 }
@@ -2652,8 +2652,8 @@ void __init free_staticmem_pages(struct page_info *pg, 
unsigned long nr_mfns,
 scrub_one_page(pg);
 }
 
-/* In case initializing page of static memory, mark it PGC_reserved. */
-pg[i].count_info |= PGC_reserved;
+/* In case initializing page of static memory, mark it PGC_staticmem. 
*/
+pg[i].count_info |= PGC_staticmem;
 }
 }
 
@@ -2683,7 +2683,7 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 for ( i = 0; i < nr_mfns; i++ )
 {
 /* The page should be reserved and not yet allocated. */
-if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
+if ( pg[i].count_info != (PGC_state_free | PGC_staticmem) )
 {
 printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
@@ -2697,10 +2697,10 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 &tlbflush_timestamp);
 
 /*
- * Preserve flag PGC_reserved and change page state
+ * Preserve flag PGC_staticmem and change page state
  * to PGC_state_inuse.
  */
-pg[i].count_info = PGC_reserved | PGC_state_inuse;
+pg[i].count_info = PGC_staticmem | PGC_state_inuse;
 /* Initialise fields which have other uses for free pages. */
 pg[i].u.inuse.type_info = 0;
 page_set_owner(&pg[i], NULL);
@@ -2722,7 +2722,7 @@ static struct page_info * __init 
acquire_staticmem_pages(mfn_t smfn,
 
  out_err:
 while ( i-- )
-pg[i].count_info = PGC_reserved | PGC_state_free;
+pg[i].count_info = PGC_staticmem | PGC_state_free;
 
 spin_unlock(&heap_lock);
 
-- 
2.25.1




RE: [PATCH v4 1/6] xen: do not free reserved memory into heap

2022-05-17 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Tuesday, May 17, 2022 5:29 PM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Andrew Cooper
> ; George Dunlap ;
> Jan Beulich ; Stefano Stabellini ;
> Wei Liu 
> Subject: Re: [PATCH v4 1/6] xen: do not free reserved memory into heap
> 
> Hi,
> 
> On 17/05/2022 09:21, Penny Zheng wrote:
> > Yes,  I remembered that asynchronous is still on the to-do list for static
> memory.
> >
> > If it doesn't bother too much to you, I would like to ask some help on this
> issue, ;).
> > I only knew basic knowledge on the scrubbing,
> My kwnoledge on the scrubbing code is not much better than yours :).
> 
> > I knew that dirty pages is placed at the
> > end of list heap(node, zone, order) for scrubbing and "first_dirty" is used 
> > to
> track down
> > the dirty pages. IMO, Both two parts are restricted to the heap thingy,  not
> reusable for
> > static memory,
> 
> That's correct.
> 
> > so maybe I need to re-write scrub_free_page for static memory, and also
> > link the need-to-scrub reserved pages to a new global list e.g.  
> > dirty_resv_list
> for aync
> > scrubbing?
> 
> So I can foresee two problems with scrubbing static memory:
>1) Once the page is scrubbed, we need to know which domain it belongs
> so we can link the page again
>2) A page may still wait for scrubbing when the domain allocate
> memory (IOW the reserved list may be empty). So we need to find a page
> belonging to the domain and then scrubbed.
>

Understood, thanks for the to-the-point instruction! ;)
For scrubbing on runtime, un-populating memory will free reserved pages
to reserved list, then async scrubbing will move them to a per-domain list. 
Later
when scrubbing is finished, we need to again move it back to the reserved
list.
And if we failed on acquiring a page from reserved list, then trying to get a 
page
from the per-domain list and scrub it. 

And with initial scrubbing, since the concept of domain is not constructed, a
global list is better.
Right now, we always allocate static memory from specified starting address,
so just make sure that page is scrubbed before allocation.

> The two problems above would indicate that a per-domain scrub list would
> be the best here. We would need to deal with initial scrubbing
> differently (maybe a global list as you suggested).
> 
> I expect it will take some times to implement it properly. While writing
> this, I was wondering if there is actually any point to scrub pages when
> the domain is releasing them. Even if they are free they are still
> belonging to the domain, so scrubbing them is technically not necessary.
> 

True, true, if static memory used as guest memory, even if they are free, they 
are still
belonging to the domain. Even as static shared memory, it is pre-configured in 
boot-time
and could not be used for any other purpose.
Hmmm, may I ask that if we reboot the domain and didn't scrub the pages before, 
the
old stale contents will not affect the rebooting machine?
Or it should be the guest's responsibility to do the cleaning up before using 
it?

If it is the guest's responsibility, yeah, maybe scrubbing them is technically 
not
necessary, flushing TLB and cleaning cache is enough~ So should I remove the 
scrubbing
totally for static memory?

> Any thoughts?
> 
> >>>{
> >>>mfn_t mfn = page_to_mfn(pg);
> >>>unsigned long i;
> >>> @@ -2653,7 +2657,8 @@ void __init free_staticmem_pages(struct
> page_info
> >> *pg, unsigned long nr_mfns,
> >>>}
> >>>
> >>>/* In case initializing page of static memory, mark it 
> >>> PGC_reserved. */
> >>> -pg[i].count_info |= PGC_reserved;
> >>> +if ( !(pg[i].count_info & PGC_reserved) )
> >>
> >> NIT: I understand the flag may have already been set, but I am not
> convinced if
> >> it is worth checking it and then set.
> >>
> >
> > Jan suggested that since we remove the __init from free_staticmem_pages,
> it's now in preemptable
> > state at runtime, so better be adding this check here.
> 
> Well, count_info is already modified within that loop (see
> mark_page_free()). So I think the impact of setting PGC_reserved is
> going to be meaningless.
> 
> However... mark_page_free() is going to set count_info to PGC_state_free
> and by consequence clear PGC_reserved. Theferore, in the current
> implementation we always need to re-set PGC_reserved.
> 
> So effectively, the "if" is pointless here.
> 
> Cheers,
> 
> --
> Julien Grall


RE: [PATCH v4 2/8] xen/arm: allocate static shared memory to the default owner dom_io

2022-05-17 Thread Penny Zheng
Hi Jan 

> -Original Message-
> From: Jan Beulich 
> Sent: Wednesday, May 18, 2022 12:01 AM
> To: Penny Zheng 
> Cc: Wei Chen ; Stefano Stabellini
> ; Julien Grall ; Bertrand Marquis
> ; Volodymyr Babchuk
> ; Andrew Cooper
> ; George Dunlap ;
> Wei Liu ; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v4 2/8] xen/arm: allocate static shared memory to the
> default owner dom_io
> 
> On 17.05.2022 11:05, Penny Zheng wrote:
> > --- a/xen/common/domain.c
> > +++ b/xen/common/domain.c
> > @@ -780,6 +780,11 @@ void __init setup_system_domains(void)
> >   * This domain owns I/O pages that are within the range of the 
> > page_info
> >   * array. Mappings occur at the priv of the caller.
> >   * Quarantined PCI devices will be associated with this domain.
> > + *
> > + * DOMID_IO could also be used for mapping memory when no explicit
> > + * domain is specified.
> > + * For instance, DOMID_IO is the owner of memory pre-shared among
> > + * multiple domains at boot time, when no explicit owner is specified.
> >   */
> >  dom_io = domain_create(DOMID_IO, NULL, 0);
> >  if ( IS_ERR(dom_io) )
> 
> I'm sorry: The comment change is definitely better now than it was, but it is
> still written in a way requiring further knowledge to understand what it talks
> about. Without further context, "when no explicit domain is specified" only
> raises questions. I would have tried to make a suggestion, but I can't really
> figure what it is that you want to get across here.

How about I only retain the "For instance, xxx" and make it more in details.
"
DOMID_IO is also the default owner of memory pre-shared among multiple domains 
at
boot time, when no explicit owner is specified with "owner" property in static 
shared
memory device node. See section docs/misc/arm/device-tree/booting.txt: Static 
Shared Memory
for more details. 
"

> 
> Jan



RE: [PATCH v4 1/6] xen: do not free reserved memory into heap

2022-05-17 Thread Penny Zheng
Hi Jan and Julien

> -Original Message-
> From: Jan Beulich 
> Sent: Wednesday, May 18, 2022 12:11 AM
> To: Penny Zheng 
> Cc: Wei Chen ; Andrew Cooper
> ; George Dunlap ;
> Julien Grall ; Stefano Stabellini ; 
> Wei
> Liu ; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v4 1/6] xen: do not free reserved memory into heap
> 
> On 10.05.2022 04:27, Penny Zheng wrote:
> > @@ -2762,6 +2767,12 @@ int __init acquire_domstatic_pages(struct
> > domain *d, mfn_t smfn,
> >
> >  return 0;
> >  }
> > +#else
> > +void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> > +  bool need_scrub) {
> > +ASSERT_UNREACHABLE();
> > +}
> >  #endif
> 
> As before I do not agree that we need this (or similar) stub functions. As
> already suggested I think that instead Arm wants to #define PGC_reserved (to
> non-zero) only when !CONFIG_STATIC_MEMORY, just like is already the case
> on x86.
> 

Ok, if you do not like the stub function, then what about I putting the 
#ifdef-array back
to the common where free_staticmem_pages is used:
#ifdef CONFIG_STATIC_MEMORY
if ( pg->count_info & PGC_reserved )
/* Reserved page shall not go back to the heap. */
return free_staticmem_pages(pg, 1UL << order, need_scrub);
#endif
If this is not the option here too, before I make the change about guarding the
PGC_reserved with CONFIG_STATIC_MEMORY on ARM, I'd like to cc Julien here,
Since in the very beginning when we introduced PGC_reserved flag, he might have
concerns about limiting the usage of PGC_reserved to static memory, if he is
okay now, I'm fine too. ;)

> Jan



[PATCH v4 8/8] xen/arm: enable statically shared memory on Dom0

2022-05-17 Thread Penny Zheng
From: Penny Zheng 

To add statically shared memory nodes in Dom0, user shall put according
static shared memory configuration under /chosen node.

This commit adds shm-processing function process_shm in construct_dom0
to enable statically shared memory on Dom0.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v4 change:
- no change
---
v3 change:
- no change
---
v2 change:
- no change
---
 xen/arch/arm/domain_build.c | 14 ++
 1 file changed, 14 insertions(+)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index ba044cab60..bbf5461595 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -2614,6 +2614,11 @@ static int __init handle_node(struct domain *d, struct 
kernel_info *kinfo,
 if ( res )
 return res;
 }
+
+res = make_resv_memory_node(d, kinfo->fdt, addrcells, sizecells,
+&kinfo->shm_mem);
+if ( res )
+return res;
 }
 
 res = fdt_end_node(kinfo->fdt);
@@ -3637,6 +3642,9 @@ static int __init construct_dom0(struct domain *d)
 {
 struct kernel_info kinfo = {};
 int rc;
+#ifdef CONFIG_STATIC_SHM
+const struct dt_device_node *chosen = dt_find_node_by_path("/chosen");
+#endif
 
 /* Sanity! */
 BUG_ON(d->domain_id != 0);
@@ -3671,6 +3679,12 @@ static int __init construct_dom0(struct domain *d)
 allocate_memory_11(d, &kinfo);
 find_gnttab_region(d, &kinfo);
 
+#ifdef CONFIG_STATIC_SHM
+rc = process_shm(d, &kinfo, chosen);
+if ( rc < 0 )
+return rc;
+#endif
+
 /* Map extra GIC MMIO, irqs and other hw stuffs to dom0. */
 rc = gic_map_hwdom_extra_mappings(d);
 if ( rc < 0 )
-- 
2.25.1




[PATCH v4 6/8] xen/arm: set up shared memory foreign mapping for borrower domain

2022-05-17 Thread Penny Zheng
This commit sets up shared memory foreign mapping for borrower domain.

If owner domain is the default dom_io, all shared domain are treated as
borrower domain.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v4 changes:
- no change
---
v3 change:
- use map_regions_p2mt instead
---
v2 change:
- remove guest_physmap_add_shm, since for borrower domain, we only
do P2M foreign memory mapping now.
---
 xen/arch/arm/domain_build.c | 9 +
 1 file changed, 9 insertions(+)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 3a20247836..fcdd87468d 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -957,6 +957,15 @@ static int __init process_shm(struct domain *d,
 if ( ret )
 return ret;
 }
+
+if ( owner_dom_io || (strcmp(role_str, "borrower") == 0) )
+{
+/* Set up P2M foreign mapping for borrower domain. */
+ret = map_regions_p2mt(d, _gfn(PFN_UP(gbase)), PFN_DOWN(psize),
+   _mfn(PFN_UP(pbase)), p2m_map_foreign_rw);
+if ( ret )
+return ret;
+}
 }
 
 return 0;
-- 
2.25.1




[PATCH v4 7/8] xen/arm: create shared memory nodes in guest device tree

2022-05-17 Thread Penny Zheng
We expose the shared memory to the domU using the "xen,shared-memory-v1"
reserved-memory binding. See
Documentation/devicetree/bindings/reserved-memory/xen,shared-memory.txt
in Linux for the corresponding device tree binding.

To save the cost of re-parsing shared memory device tree configuration when
creating shared memory nodes in guest device tree, this commit adds new field
"shm_mem" to store shm-info per domain.

For each shared memory region, a range is exposed under
the /reserved-memory node as a child node. Each range sub-node is
named xen-shmem@ and has the following properties:
- compatible:
compatible = "xen,shared-memory-v1"
- reg:
the base guest physical address and size of the shared memory region
- xen,id:
a string that identifies the shared memory region.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v4 change:
- no change
---
v3 change:
- move field "shm_mem" to kernel_info
---
v2 change:
- using xzalloc
- shm_id should be uint8_t
- make reg a local variable
- add #address-cells and #size-cells properties
- fix alignment
---
 xen/arch/arm/domain_build.c   | 143 +-
 xen/arch/arm/include/asm/kernel.h |   1 +
 xen/arch/arm/include/asm/setup.h  |   1 +
 3 files changed, 143 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index fcdd87468d..ba044cab60 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -895,7 +895,22 @@ static int __init allocate_shared_memory(struct domain *d,
 return ret;
 }
 
-static int __init process_shm(struct domain *d,
+static int __init append_shm_bank_to_domain(struct kernel_info *kinfo,
+paddr_t start, paddr_t size,
+u32 shm_id)
+{
+if ( (kinfo->shm_mem.nr_banks + 1) > NR_MEM_BANKS )
+return -ENOMEM;
+
+kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].start = start;
+kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].size = size;
+kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].shm_id = shm_id;
+kinfo->shm_mem.nr_banks++;
+
+return 0;
+}
+
+static int __init process_shm(struct domain *d, struct kernel_info *kinfo,
   const struct dt_device_node *node)
 {
 struct dt_device_node *shm_node;
@@ -966,6 +981,14 @@ static int __init process_shm(struct domain *d,
 if ( ret )
 return ret;
 }
+
+/*
+ * Record static shared memory region info for later setting
+ * up shm-node in guest device tree.
+ */
+ret = append_shm_bank_to_domain(kinfo, gbase, psize, shm_id);
+if ( ret )
+return ret;
 }
 
 return 0;
@@ -1296,6 +1319,117 @@ static int __init make_memory_node(const struct domain 
*d,
 return res;
 }
 
+#ifdef CONFIG_STATIC_SHM
+static int __init make_shm_memory_node(const struct domain *d,
+   void *fdt,
+   int addrcells, int sizecells,
+   struct meminfo *mem)
+{
+unsigned long i = 0;
+int res = 0;
+
+if ( mem->nr_banks == 0 )
+return -ENOENT;
+
+/*
+ * For each shared memory region, a range is exposed under
+ * the /reserved-memory node as a child node. Each range sub-node is
+ * named xen-shmem@.
+ */
+dt_dprintk("Create xen-shmem node\n");
+
+for ( ; i < mem->nr_banks; i++ )
+{
+uint64_t start = mem->bank[i].start;
+uint64_t size = mem->bank[i].size;
+uint8_t shm_id = mem->bank[i].shm_id;
+/* Placeholder for xen-shmem@ + a 64-bit number + \0 */
+char buf[27];
+const char compat[] = "xen,shared-memory-v1";
+__be32 reg[4];
+__be32 *cells;
+unsigned int len = (addrcells + sizecells) * sizeof(__be32);
+
+snprintf(buf, sizeof(buf), "xen-shmem@%"PRIx64, mem->bank[i].start);
+res = fdt_begin_node(fdt, buf);
+if ( res )
+return res;
+
+res = fdt_property(fdt, "compatible", compat, sizeof(compat));
+if ( res )
+return res;
+
+cells = reg;
+dt_child_set_range(&cells, addrcells, sizecells, start, size);
+
+res = fdt_property(fdt, "reg", reg, len);
+if ( res )
+return res;
+
+dt_dprintk("Shared memory bank %lu: %#"PRIx64"->%#"PRIx64"\n",
+   i, start, start + size);
+
+res = fdt_property_cell(fdt, "xen,id", shm_id);
+if ( res )
+return res;
+
+res = fdt_end_node(fdt);
+if ( res )
+return res;
+}
+
+return res;
+}
+#else
+static int __init make_shm_memory_node(const struct domain

[PATCH v4 5/8] xen/arm: Add additional reference to owner domain when the owner is allocated

2022-05-17 Thread Penny Zheng
Borrower domain will fail to get a page ref using the owner domain
during allocation, when the owner is created after borrower.

So here, we decide to get and add the right amount of reference, which
is the number of borrowers, when the owner is allocated.

Signed-off-by: Penny Zheng 
---
v4 changes:
- no change
---
v3 change:
- printk rather than dprintk since it is a serious error
---
v2 change:
- new commit
---
 xen/arch/arm/domain_build.c | 62 +
 1 file changed, 62 insertions(+)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 59591e3c6e..3a20247836 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -794,6 +794,34 @@ static mfn_t __init acquire_shared_memory_bank(struct 
domain *d,
 
 }
 
+static int __init acquire_nr_borrower_domain(struct domain *d,
+ paddr_t pbase, paddr_t psize,
+ unsigned long *nr_borrowers)
+{
+unsigned long bank;
+
+/* Iterate reserved memory to find requested shm bank. */
+for ( bank = 0 ; bank < bootinfo.reserved_mem.nr_banks; bank++ )
+{
+paddr_t bank_start = bootinfo.reserved_mem.bank[bank].start;
+paddr_t bank_size = bootinfo.reserved_mem.bank[bank].size;
+
+if ( pbase == bank_start && psize == bank_size )
+break;
+}
+
+if ( bank == bootinfo.reserved_mem.nr_banks )
+return -ENOENT;
+
+if ( d == dom_io )
+*nr_borrowers = bootinfo.reserved_mem.bank[bank].nr_shm_domain;
+else
+/* Exclude the owner domain itself. */
+*nr_borrowers = bootinfo.reserved_mem.bank[bank].nr_shm_domain - 1;
+
+return 0;
+}
+
 /*
  * Func allocate_shared_memory is supposed to be only called
  * from the owner.
@@ -805,6 +833,8 @@ static int __init allocate_shared_memory(struct domain *d,
 {
 mfn_t smfn;
 int ret = 0;
+unsigned long nr_pages, nr_borrowers, i;
+struct page_info *page;
 
 dprintk(XENLOG_INFO,
 "Allocate static shared memory BANK %#"PRIpaddr"-%#"PRIpaddr".\n",
@@ -819,6 +849,7 @@ static int __init allocate_shared_memory(struct domain *d,
  * DOMID_IO is the domain, like DOMID_XEN, that is not auto-translated.
  * It sees RAM 1:1 and we do not need to create P2M mapping for it
  */
+nr_pages = PFN_DOWN(psize);
 if ( d != dom_io )
 {
 ret = guest_physmap_add_pages(d, gaddr_to_gfn(gbase), smfn, 
PFN_DOWN(psize));
@@ -830,6 +861,37 @@ static int __init allocate_shared_memory(struct domain *d,
 }
 }
 
+/*
+ * Get the right amount of references per page, which is the number of
+ * borrow domains.
+ */
+ret = acquire_nr_borrower_domain(d, pbase, psize, &nr_borrowers);
+if ( ret )
+return ret;
+
+/*
+ * Instead of let borrower domain get a page ref, we add as many
+ * additional reference as the number of borrowers when the owner
+ * is allocated, since there is a chance that owner is created
+ * after borrower.
+ */
+page = mfn_to_page(smfn);
+for ( i = 0; i < nr_pages; i++ )
+{
+if ( !get_page_nr(page + i, d, nr_borrowers) )
+{
+printk(XENLOG_ERR
+   "Failed to add %lu references to page %"PRI_mfn".\n",
+   nr_borrowers, mfn_x(smfn) + i);
+goto fail;
+}
+}
+
+return 0;
+
+ fail:
+while ( --i >= 0 )
+put_page_nr(page + i, nr_borrowers);
 return ret;
 }
 
-- 
2.25.1




[PATCH v4 4/8] xen/arm: introduce put_page_nr and get_page_nr

2022-05-17 Thread Penny Zheng
Later, we need to add the right amount of references, which should be
the number of borrower domains, to the owner domain. Since we only have
get_page() to increment the page reference by 1, a loop is needed per
page, which is inefficient and time-consuming.

To save the loop time, this commit introduces a set of new helpers
put_page_nr() and get_page_nr() to increment/drop the page reference by nr.

Signed-off-by: Penny Zheng 
---
v4 changes:
- make sure that the right equation return is at least equal to n
- simplify the underflow
---
v3 changes:
- check overflow with "n"
- remove spurious change
- bring back the check that we enter the loop only when count_info is
greater than 0
---
v2 change:
- new commit
---
 xen/arch/arm/include/asm/mm.h |  4 
 xen/arch/arm/mm.c | 42 +++
 2 files changed, 37 insertions(+), 9 deletions(-)

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 424aaf2823..c737d51e4d 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -347,6 +347,10 @@ void free_init_memory(void);
 int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
   unsigned int order);
 
+extern bool get_page_nr(struct page_info *page, const struct domain *domain,
+unsigned long nr);
+extern void put_page_nr(struct page_info *page, unsigned long nr);
+
 extern void put_page_type(struct page_info *page);
 static inline void put_page_and_type(struct page_info *page)
 {
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 7b1f2f4906..8c8a8f6378 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1537,21 +1537,29 @@ long arch_memory_op(int op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 return 0;
 }
 
-struct domain *page_get_owner_and_reference(struct page_info *page)
+static struct domain *page_get_owner_and_nr_reference(struct page_info *page,
+  unsigned long nr)
 {
 unsigned long x, y = page->count_info;
 struct domain *owner;
 
+/* Restrict nr to avoid "double" overflow */
+if ( nr >= PGC_count_mask )
+{
+ASSERT_UNREACHABLE();
+return NULL;
+}
+
 do {
 x = y;
 /*
  * Count ==  0: Page is not allocated, so we cannot take a reference.
  * Count == -1: Reference count would wrap, which is invalid.
  */
-if ( unlikely(((x + 1) & PGC_count_mask) <= 1) )
+if ( unlikely(((x + nr) & PGC_count_mask) <= nr) )
 return NULL;
 }
-while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
+while ( (y = cmpxchg(&page->count_info, x, x + nr)) != x );
 
 owner = page_get_owner(page);
 ASSERT(owner);
@@ -1559,14 +1567,19 @@ struct domain *page_get_owner_and_reference(struct 
page_info *page)
 return owner;
 }
 
-void put_page(struct page_info *page)
+struct domain *page_get_owner_and_reference(struct page_info *page)
+{
+return page_get_owner_and_nr_reference(page, 1);
+}
+
+void put_page_nr(struct page_info *page, unsigned long nr)
 {
 unsigned long nx, x, y = page->count_info;
 
 do {
-ASSERT((y & PGC_count_mask) != 0);
+ASSERT((y & PGC_count_mask) >= nr);
 x  = y;
-nx = x - 1;
+nx = x - nr;
 }
 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
 
@@ -1576,19 +1589,30 @@ void put_page(struct page_info *page)
 }
 }
 
-bool get_page(struct page_info *page, const struct domain *domain)
+void put_page(struct page_info *page)
+{
+put_page_nr(page, 1);
+}
+
+bool get_page_nr(struct page_info *page, const struct domain *domain,
+ unsigned long nr)
 {
-const struct domain *owner = page_get_owner_and_reference(page);
+const struct domain *owner = page_get_owner_and_nr_reference(page, nr);
 
 if ( likely(owner == domain) )
 return true;
 
 if ( owner != NULL )
-put_page(page);
+put_page_nr(page, nr);
 
 return false;
 }
 
+bool get_page(struct page_info *page, const struct domain *domain)
+{
+return get_page_nr(page, domain, 1);
+}
+
 /* Common code requires get_page_type and put_page_type.
  * We don't care about typecounts so we just do the minimum to make it
  * happy. */
-- 
2.25.1




[PATCH v4 3/8] xen/arm: allocate static shared memory to a specific owner domain

2022-05-17 Thread Penny Zheng
If owner property is defined, then owner domain of a static shared memory
region is not the default dom_io anymore, but a specific domain.

This commit implements allocating static shared memory to a specific domain
when owner property is defined.

Coding flow for dealing borrower domain will be introduced later in the
following commits.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v4 change:
- no changes
---
v3 change:
- simplify the code since o_gbase is not used if the domain is dom_io
---
v2 change:
- P2M mapping is restricted to normal domain
- in-code comment fix
---
 xen/arch/arm/domain_build.c | 44 +++--
 1 file changed, 33 insertions(+), 11 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 1746c15b7c..59591e3c6e 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -800,9 +800,11 @@ static mfn_t __init acquire_shared_memory_bank(struct 
domain *d,
  */
 static int __init allocate_shared_memory(struct domain *d,
  u32 addr_cells, u32 size_cells,
- paddr_t pbase, paddr_t psize)
+ paddr_t pbase, paddr_t psize,
+ paddr_t gbase)
 {
 mfn_t smfn;
+int ret = 0;
 
 dprintk(XENLOG_INFO,
 "Allocate static shared memory BANK %#"PRIpaddr"-%#"PRIpaddr".\n",
@@ -817,8 +819,18 @@ static int __init allocate_shared_memory(struct domain *d,
  * DOMID_IO is the domain, like DOMID_XEN, that is not auto-translated.
  * It sees RAM 1:1 and we do not need to create P2M mapping for it
  */
-ASSERT(d == dom_io);
-return 0;
+if ( d != dom_io )
+{
+ret = guest_physmap_add_pages(d, gaddr_to_gfn(gbase), smfn, 
PFN_DOWN(psize));
+if ( ret )
+{
+printk(XENLOG_ERR
+   "Failed to map shared memory to %pd.\n", d);
+return ret;
+}
+}
+
+return ret;
 }
 
 static int __init process_shm(struct domain *d,
@@ -831,6 +843,8 @@ static int __init process_shm(struct domain *d,
 u32 shm_id;
 u32 addr_cells, size_cells;
 paddr_t gbase, pbase, psize;
+const char *role_str;
+bool owner_dom_io = true;
 
 dt_for_each_child_node(node, shm_node)
 {
@@ -857,19 +871,27 @@ static int __init process_shm(struct domain *d,
 ASSERT(IS_ALIGNED(pbase, PAGE_SIZE) && IS_ALIGNED(psize, PAGE_SIZE));
 gbase = dt_read_number(cells, addr_cells);
 
-/* TODO: Consider owner domain is not the default dom_io. */
+/*
+ * "role" property is optional and if it is defined explicitly,
+ * then the owner domain is not the default "dom_io" domain.
+ */
+if ( dt_property_read_string(shm_node, "role", &role_str) == 0 )
+owner_dom_io = false;
+
 /*
  * Per static shared memory region could be shared between multiple
  * domains.
- * In case re-allocating the same shared memory region, we check
- * if it is already allocated to the default owner dom_io before
- * the actual allocation.
+ * So when owner domain is the default dom_io, in case re-allocating
+ * the same shared memory region, we check if it is already allocated
+ * to the default owner dom_io before the actual allocation.
  */
-if ( !is_shm_allocated_to_domio(pbase) )
+if ( (owner_dom_io && !is_shm_allocated_to_domio(pbase)) ||
+ (!owner_dom_io && strcmp(role_str, "owner") == 0) )
 {
-/* Allocate statically shared pages to the default owner dom_io. */
-ret = allocate_shared_memory(dom_io, addr_cells, size_cells,
- pbase, psize);
+/* Allocate statically shared pages to the owner domain. */
+ret = allocate_shared_memory(owner_dom_io ? dom_io : d,
+ addr_cells, size_cells,
+ pbase, psize, gbase);
 if ( ret )
 return ret;
 }
-- 
2.25.1




[PATCH v4 2/8] xen/arm: allocate static shared memory to the default owner dom_io

2022-05-17 Thread Penny Zheng
From: Penny Zheng 

This commit introduces process_shm to cope with static shared memory in
domain construction.

DOMID_IO will be the default owner of memory pre-shared among multiple domains
at boot time, when no explicit owner is specified.

This commit only considers allocating static shared memory to dom_io
when owner domain is not explicitly defined in device tree, all the left,
including the "borrower" code path, the "explicit owner" code path, shall
be introduced later in the following patches.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v4 change:
- no changes
---
v3 change:
- refine in-code comment
---
v2 change:
- instead of introducing a new system domain, reuse the existing dom_io
- make dom_io a non-auto-translated domain, then no need to create P2M
for it
- change dom_io definition and make it wider to support static shm here too
- introduce is_shm_allocated_to_domio to check whether static shm is
allocated yet, instead of using shm_mask bitmap
- add in-code comment
---
 xen/arch/arm/domain_build.c | 132 +++-
 xen/common/domain.c |   5 ++
 2 files changed, 136 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index aa41bd..1746c15b7c 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -522,6 +522,10 @@ static bool __init append_static_memory_to_bank(struct 
domain *d,
 return true;
 }
 
+/*
+ * If cell is NULL, pbase and psize should hold valid values.
+ * Otherwise, cell will be populated together with pbase and psize.
+ */
 static mfn_t __init acquire_static_memory_bank(struct domain *d,
const __be32 **cell,
u32 addr_cells, u32 size_cells,
@@ -530,7 +534,8 @@ static mfn_t __init acquire_static_memory_bank(struct 
domain *d,
 mfn_t smfn;
 int res;
 
-device_tree_get_reg(cell, addr_cells, size_cells, pbase, psize);
+if ( cell )
+device_tree_get_reg(cell, addr_cells, size_cells, pbase, psize);
 ASSERT(IS_ALIGNED(*pbase, PAGE_SIZE) && IS_ALIGNED(*psize, PAGE_SIZE));
 if ( PFN_DOWN(*psize) > UINT_MAX )
 {
@@ -754,6 +759,125 @@ static void __init assign_static_memory_11(struct domain 
*d,
 panic("Failed to assign requested static memory for direct-map domain 
%pd.",
   d);
 }
+
+#ifdef CONFIG_STATIC_SHM
+/*
+ * This function checks whether the static shared memory region is
+ * already allocated to dom_io.
+ */
+static bool __init is_shm_allocated_to_domio(paddr_t pbase)
+{
+struct page_info *page;
+
+page = maddr_to_page(pbase);
+ASSERT(page);
+
+if ( page_get_owner(page) == NULL )
+return false;
+
+ASSERT(page_get_owner(page) == dom_io);
+return true;
+}
+
+static mfn_t __init acquire_shared_memory_bank(struct domain *d,
+   u32 addr_cells, u32 size_cells,
+   paddr_t *pbase, paddr_t *psize)
+{
+/*
+ * Pages of statically shared memory shall be included
+ * in domain_tot_pages().
+ */
+d->max_pages += PFN_DOWN(*psize);
+
+return acquire_static_memory_bank(d, NULL, addr_cells, size_cells,
+  pbase, psize);
+
+}
+
+/*
+ * Func allocate_shared_memory is supposed to be only called
+ * from the owner.
+ */
+static int __init allocate_shared_memory(struct domain *d,
+ u32 addr_cells, u32 size_cells,
+ paddr_t pbase, paddr_t psize)
+{
+mfn_t smfn;
+
+dprintk(XENLOG_INFO,
+"Allocate static shared memory BANK %#"PRIpaddr"-%#"PRIpaddr".\n",
+pbase, pbase + psize);
+
+smfn = acquire_shared_memory_bank(d, addr_cells, size_cells, &pbase,
+  &psize);
+if ( mfn_eq(smfn, INVALID_MFN) )
+return -EINVAL;
+
+/*
+ * DOMID_IO is the domain, like DOMID_XEN, that is not auto-translated.
+ * It sees RAM 1:1 and we do not need to create P2M mapping for it
+ */
+ASSERT(d == dom_io);
+return 0;
+}
+
+static int __init process_shm(struct domain *d,
+  const struct dt_device_node *node)
+{
+struct dt_device_node *shm_node;
+int ret = 0;
+const struct dt_property *prop;
+const __be32 *cells;
+u32 shm_id;
+u32 addr_cells, size_cells;
+paddr_t gbase, pbase, psize;
+
+dt_for_each_child_node(node, shm_node)
+{
+if ( !dt_device_is_compatible(shm_node, "xen,domain-shared-memory-v1") 
)
+continue;
+
+if ( !dt_property_read_u32(shm_node, "xen,shm-id", &shm_id) )
+{
+printk("Shared memory node does not provide \"xen,shm-id\" 
property.\n");
+ 

[PATCH v4 0/8] static shared memory on dom0less system

2022-05-17 Thread Penny Zheng
In safety-critical environment, it is not considered safe to
dynamically change important configurations at runtime. Everything
should be statically defined and statically verified.

In this case, if the system configuration knows a priori that there are
only 2 VMs and they need to communicate over shared memory, it is safer
to pre-configure the shared memory at build time rather than let the VMs
attempt to share memory at runtime. And it is faster too.

Furthermore, on dom0less system, the legacy way to build up communication
channels between domains, like grant table, are normally absent there.

So this patch serie introduces a set of static shared memory device tree nodes
to allow users to statically set up shared memory on dom0less system, enabling
domains to do shm-based communication.

The only way to trigger this static shared memory configuration should
be via device tree, which is at the same level as the XSM rules.

It was inspired by the patch serie of ["xl/libxl-based shared mem](
https://marc.info/?l=xen-devel&m=154404821731186ory";).

Looking into related [design link](
https://lore.kernel.org/all/a50d9fde-1d06-7cda-2779-9eea9e1c0...@xen.org/T/)
for more details.

Penny Zheng (8):
  xen/arm: introduce static shared memory
  xen/arm: allocate static shared memory to the default owner dom_io
  xen/arm: allocate static shared memory to a specific owner domain
  xen/arm: introduce put_page_nr and get_page_nr
  xen/arm: Add additional reference to owner domain when the owner is
allocated
  xen/arm: set up shared memory foreign mapping for borrower domain
  xen/arm: create shared memory nodes in guest device tree
  xen/arm: enable statically shared memory on Dom0

 docs/misc/arm/device-tree/booting.txt | 120 
 xen/arch/arm/Kconfig  |   6 +
 xen/arch/arm/bootfdt.c|  68 +
 xen/arch/arm/domain_build.c   | 378 +-
 xen/arch/arm/include/asm/kernel.h |   1 +
 xen/arch/arm/include/asm/mm.h |   4 +
 xen/arch/arm/include/asm/setup.h  |   4 +
 xen/arch/arm/mm.c |  42 ++-
 xen/common/domain.c   |   5 +
 9 files changed, 618 insertions(+), 10 deletions(-)

-- 
2.25.1




[PATCH v4 1/8] xen/arm: introduce static shared memory

2022-05-17 Thread Penny Zheng
From: Penny Zheng 

This patch serie introduces a new feature: setting up static
shared memory on a dom0less system, through device tree configuration.

This commit parses shared memory node at boot-time, and reserve it in
bootinfo.reserved_mem to avoid other use.

This commits proposes a new Kconfig CONFIG_STATIC_SHM to wrap
static-shm-related codes, and this option depends on static memory(
CONFIG_STATIC_MEMORY). That's because that later we want to reuse a few
helpers, guarded with CONFIG_STATIC_MEMORY, like acquire_staticmem_pages, etc,
on static shared memory.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v4 change:
- nit fix on doc
---
v3 change:
- make nr_shm_domain unsigned int
---
v2 change:
- document refinement
- remove bitmap and use the iteration to check
- add a new field nr_shm_domain to keep the number of shared domain
---
 docs/misc/arm/device-tree/booting.txt | 120 ++
 xen/arch/arm/Kconfig  |   6 ++
 xen/arch/arm/bootfdt.c|  68 +++
 xen/arch/arm/include/asm/setup.h  |   3 +
 4 files changed, 197 insertions(+)

diff --git a/docs/misc/arm/device-tree/booting.txt 
b/docs/misc/arm/device-tree/booting.txt
index 7b4a29a2c2..b2a7f3f590 100644
--- a/docs/misc/arm/device-tree/booting.txt
+++ b/docs/misc/arm/device-tree/booting.txt
@@ -360,3 +360,123 @@ device-tree:
 
 This will reserve a 512MB region starting at the host physical address
 0x3000 to be exclusively used by DomU1.
+
+Static Shared Memory
+
+
+The static shared memory device tree nodes allow users to statically set up
+shared memory on dom0less system, enabling domains to do shm-based
+communication.
+
+- compatible
+
+"xen,domain-shared-memory-v1"
+
+- xen,shm-id
+
+An 8-bit integer that represents the unique identifier of the shared memory
+region. The maximum identifier shall be "xen,shm-id = <0xff>".
+
+- xen,shared-mem
+
+An array takes a physical address, which is the base address of the
+shared memory region in host physical address space, a size, and a guest
+physical address, as the target address of the mapping. The number of cells
+for the host address (and size) is the same as the guest pseudo-physical
+address and they are inherited from the parent node.
+
+- role (Optional)
+
+A string property specifying the ownership of a shared memory region,
+the value must be one of the following: "owner", or "borrower"
+A shared memory region could be explicitly backed by one domain, which is
+called "owner domain", and all the other domains who are also sharing
+this region are called "borrower domain".
+If not specified, the default value is "borrower" and owner is
+"dom_shared", a system domain.
+
+As an example:
+
+chosen {
+#address-cells = <0x1>;
+#size-cells = <0x1>;
+xen,xen-bootargs = "console=dtuart dtuart=serial0 bootscrub=0";
+
+..
+
+/* this is for Dom0 */
+dom0-shared-mem@1000 {
+compatible = "xen,domain-shared-memory-v1";
+role = "owner";
+xen,shm-id = <0x0>;
+xen,shared-mem = <0x1000 0x1000 0x1000>;
+}
+
+domU1 {
+compatible = "xen,domain";
+#address-cells = <0x1>;
+#size-cells = <0x1>;
+memory = <0 131072>;
+cpus = <2>;
+vpl011;
+
+/*
+ * shared memory region identified as 0x0(xen,shm-id = <0x0>)
+ * is shared between Dom0 and DomU1.
+ */
+domU1-shared-mem@1000 {
+compatible = "xen,domain-shared-memory-v1";
+role = "borrower";
+xen,shm-id = <0x0>;
+xen,shared-mem = <0x1000 0x1000 0x5000>;
+}
+
+/*
+ * shared memory region identified as 0x1(xen,shm-id = <0x1>)
+ * is shared between DomU1 and DomU2.
+ */
+domU1-shared-mem@5000 {
+compatible = "xen,domain-shared-memory-v1";
+xen,shm-id = <0x1>;
+xen,shared-mem = <0x5000 0x2000 0x6000>;
+}
+
+..
+
+};
+
+domU2 {
+compatible = "xen,domain";
+#address-cells = <0x1>;
+#size-cells = <0x1>;
+memory = <0 65536>;
+cpus = <1>;
+
+/*
+ * shared memory region identified as 0x1(xen,shm-id = <0x1>)
+ * is shared between domU1 and domU2.
+ */
+domU2-shared-mem@5000 {
+compatible = "xen,domain-shared-memory-v1";
+xen,shm-id = <0x1>;
+xen,shared-mem = <0x5000 0x2000 0x7000>;
+}
+
+..
+};
+};

RE: [PATCH v4 1/6] xen: do not free reserved memory into heap

2022-05-17 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Tuesday, May 17, 2022 2:01 AM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Andrew Cooper
> ; George Dunlap ;
> Jan Beulich ; Stefano Stabellini ;
> Wei Liu 
> Subject: Re: [PATCH v4 1/6] xen: do not free reserved memory into heap
> 
> Hi Penny,
> 
> On 10/05/2022 03:27, Penny Zheng wrote:
> > Pages used as guest RAM for static domain, shall be reserved to this
> > domain only.
> > So in case reserved pages being used for other purpose, users shall
> > not free them back to heap, even when last ref gets dropped.
> >
> > free_staticmem_pages will be called by free_heap_pages in runtime for
> > static domain freeing memory resource, so let's drop the __init flag.
> >
> > Signed-off-by: Penny Zheng 
> > ---
> > v4 changes:
> > - no changes
> > ---
> > v3 changes:
> > - fix possible racy issue in free_staticmem_pages()
> > - introduce a stub free_staticmem_pages() for the
> > !CONFIG_STATIC_MEMORY case
> > - move the change to free_heap_pages() to cover other potential call
> > sites
> > - fix the indentation
> > ---
> > v2 changes:
> > - new commit
> > ---
> >   xen/common/page_alloc.c | 17 ++---
> >   xen/include/xen/mm.h|  2 +-
> >   2 files changed, 15 insertions(+), 4 deletions(-)
> >
> > diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index
> > 319029140f..5e569a48a2 100644
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -1443,6 +1443,10 @@ static void free_heap_pages(
> >
> >   ASSERT(order <= MAX_ORDER);
> >
> > +if ( pg->count_info & PGC_reserved )
> 
> NIT: I would suggest to use "unlikely()" here.
> 
> > +/* Reserved page shall not go back to the heap. */
> > +return free_staticmem_pages(pg, 1UL << order, need_scrub);
> > +
> >   spin_lock(&heap_lock);
> >
> >   for ( i = 0; i < (1 << order); i++ ) @@ -2636,8 +2640,8 @@
> > struct domain *get_pg_owner(domid_t domid)
> >
> >   #ifdef CONFIG_STATIC_MEMORY
> >   /* Equivalent of free_heap_pages to free nr_mfns pages of static
> > memory. */ -void __init free_staticmem_pages(struct page_info *pg,
> unsigned long nr_mfns,
> > - bool need_scrub)
> > +void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> > +  bool need_scrub)
> 
> Looking at the implementation of free_staticmem_pages(), the page will be
> scrubbed synchronously.
> 
> If I am not mistaken, static memory is not yet supported so I would be OK to
> continue with synchronous scrubbing. However, this will need to be
> asynchronous before we even consider to security support it.
> 

Yes,  I remembered that asynchronous is still on the to-do list for static 
memory.

If it doesn't bother too much to you, I would like to ask some help on this 
issue, ;).
I only knew basic knowledge on the scrubbing, I knew that dirty pages is placed 
at the
end of list heap(node, zone, order) for scrubbing and "first_dirty" is used to 
track down
the dirty pages. IMO, Both two parts are restricted to the heap thingy,  not 
reusable for
static memory, so maybe I need to re-write scrub_free_page for static memory, 
and also
link the need-to-scrub reserved pages to a new global list e.g.  
dirty_resv_list for aync
scrubbing?
 Any suggestions?

> BTW, SUPPORT.md doesn't seem to explicitely say whether static memory is
> supported. Would you be able to send a patch to update it? I think this should
> be tech preview for now.
> 

Sure, will do.

> >   {
> >   mfn_t mfn = page_to_mfn(pg);
> >   unsigned long i;
> > @@ -2653,7 +2657,8 @@ void __init free_staticmem_pages(struct page_info
> *pg, unsigned long nr_mfns,
> >   }
> >
> >   /* In case initializing page of static memory, mark it 
> > PGC_reserved. */
> > -pg[i].count_info |= PGC_reserved;
> > +if ( !(pg[i].count_info & PGC_reserved) )
> 
> NIT: I understand the flag may have already been set, but I am not convinced 
> if
> it is worth checking it and then set.
> 

Jan suggested that since we remove the __init from free_staticmem_pages, it's 
now in preemptable
state at runtime, so better be adding this check here. 

> > +pg[i].count_info |= PGC_reserved;
> 
> 
> >   }
> >   }
> >
> > @@ -2762,6 +2767,12 @@ int __init acquire_domstatic_pages(struct
> > domain *d, mfn_t smfn

RE: [PATCH v4 6/6] xen: retrieve reserved pages on populate_physmap

2022-05-16 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Tuesday, May 17, 2022 2:29 AM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Andrew Cooper
> ; George Dunlap ;
> Jan Beulich ; Stefano Stabellini ;
> Wei Liu 
> Subject: Re: [PATCH v4 6/6] xen: retrieve reserved pages on populate_physmap
> 
> Hi Penny,
> 
> On 10/05/2022 03:27, Penny Zheng wrote:
> > When static domain populates memory through populate_physmap on
> > runtime,
> 
> Typo: s/when static/when a static/ or "when static domains populate"
> 
> s/on runtime/at runtime/
> 

Sure, 

> > other than allocating from heap, it shall retrieve reserved pages from
> 
> I am not sure to understand the part before the comma. But it doens't sound
> necessary so maybe drop it?
>  

Sure,

> > resv_page_list to make sure that guest RAM is still restricted in
> > statically configured memory regions. And this commit introduces a new
> > helper acquire_reserved_page to make it work.
> >
> > Signed-off-by: Penny Zheng 
> 
> [...]
> 
> > diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index
> > 290526adaf..06e7037a28 100644
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -2740,8 +2740,8 @@ static struct page_info * __init
> acquire_staticmem_pages(mfn_t smfn,
> >* Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
> >* then assign them to one specific domain #d.
> >*/
> > -int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
> > -   unsigned int nr_mfns, unsigned int 
> > memflags)
> > +int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int
> nr_mfns,
> > +unsigned int memflags)
> >   {
> >   struct page_info *pg;
> >
> > @@ -2769,12 +2769,43 @@ int __init acquire_domstatic_pages(struct
> > domain *d, mfn_t smfn,
> >
> >   return 0;
> >   }
> > +
> > +/*
> > + * Acquire a page from reserved page list(resv_page_list), when
> > +populating
> > + * memory for static domain on runtime.
> > + */
> > +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> > +{
> > +struct page_info *page;
> > +mfn_t smfn;
> > +
> > +/* Acquire a page from reserved page list(resv_page_list). */
> > +page = page_list_remove_head(&d->resv_page_list);
> Alloc/free of memory can happen concurrently. So access to rsv_page_list
> needs to be protected with a spinlock (mostly like d->page_alloc_lock).
>

Oh, understood, will fix.
 
> > +if ( unlikely(!page) )
> > +return INVALID_MFN;
> > +
> > +smfn = page_to_mfn(page);
> > +
> > +if ( acquire_domstatic_pages(d, smfn, 1, memflags) )
> 
> I am OK if we call acquire_domstatic_pages() for now. But long term, I think 
> we
> should consider to optimize it because we know the page is valid and belong
> to the guest. So there are a lot of pointless work (checking mfn_valid(),
> scrubbing in the free part, cleaning the cache...).
> 

I'm willing to fix it here since this fix is not blocking any other patch 
serie~~
I'm considering that maybe we could add a new memflag MEMF_xxx, (oh,
Naming something is really "killing" me), then all these pointless work, 
checking
mfn_valid, flushing TLB and cache, we could exclude them by checking
memflags & MEMF_.
Wdyt?

> > +{
> > +page_list_add_tail(page, &d->resv_page_list);
> > +return INVALID_MFN;
> > +}
> > +
> > +return smfn;
> > +}
> >   #else
> >   void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> > bool need_scrub)
> >   {
> >   ASSERT_UNREACHABLE();
> >   }
> > +
> > +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> > +{
> > +ASSERT_UNREACHABLE();
> > +return INVALID_MFN;
> > +}
> >   #endif
> >
> >   /*
> > diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h index
> > 35dc7143a4..c613afa57e 100644
> > --- a/xen/include/xen/domain.h
> > +++ b/xen/include/xen/domain.h
> > @@ -38,6 +38,10 @@ void arch_get_domain_info(const struct domain *d,
> >   #define CDF_staticmem(1U << 2)
> >   #endif
> >
> > +#ifndef is_domain_using_staticmem
> > +#define is_domain_using_staticmem(d) ((void)(d), false) #endif
> > +
> >   /*
> >* Arch-specifics.
> >*/
> > diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index
> > 9fd95deaec..74810e1f54 100644
> > --- a/xen/include/xen/mm.h
> > +++ b/xen/include/xen/mm.h
> > @@ -92,6 +92,7 @@ void free_staticmem_pages(struct page_info *pg,
> unsigned long nr_mfns,
> >   int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int
> nr_mfns,
> >   unsigned int memflags);
> >   #endif
> > +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags);
> >
> >   /* Map machine page range in Xen virtual address space. */
> >   int map_pages_to_xen(
> 
> Cheers,
> 
> --
> Julien Grall


RE: [PATCH v3 5/8] xen/arm: Add additional reference to owner domain when the owner is allocated

2022-05-12 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Thursday, May 12, 2022 6:54 PM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk 
> Subject: Re: [PATCH v3 5/8] xen/arm: Add additional reference to owner
> domain when the owner is allocated
> 
> 
> 
> On 12/05/2022 10:11, Penny Zheng wrote:
> > @@ -827,6 +858,37 @@ static int __init allocate_shared_memory(struct
> domain *d,
> >   }
> >   }
> >
> > +/*
> > + * Get the right amount of references per page, which is the number of
> > + * borrow domains.
> > + */
> > +ret = acquire_nr_borrower_domain(d, pbase, psize, &nr_borrowers);
> > +if ( ret )
> > +return ret;
> > +
> > +/*
> > + * Instead of let borrower domain get a page ref, we add as many
> > + * additional reference as the number of borrowers when the owner
> > + * is allocated, since there is a chance that owner is created
> > + * after borrower.
> > + */
> > +page = mfn_to_page(smfn);
> Where do you check that the range [smfn, smfn + nr_pages] is actual RAM?
> If there are none, then you should use mfn_valid() for each mfn to ensure
> there will be a struct page for it.
>

Actually, before this, in acquire_shared_memory_bank, we will finally call into
acquire_staticmem_pages to acquire [smfn, smfn + nr_pages], and in there, we
will do the check. See xen/common/page_alloc.c:acquire_staticmem_pages:2676
 
> Cheers,
> 
> --
> Julien Grall


RE: [PATCH v3 4/8] xen/arm: introduce put_page_nr and get_page_nr

2022-05-12 Thread Penny Zheng
Hi Julien

> -Original Message-
> From: Julien Grall 
> Sent: Thursday, May 12, 2022 6:14 PM
> To: Penny Zheng ; xen-devel@lists.xenproject.org
> Cc: Wei Chen ; Stefano Stabellini
> ; Bertrand Marquis ;
> Volodymyr Babchuk 
> Subject: Re: [PATCH v3 4/8] xen/arm: introduce put_page_nr and get_page_nr
> 
> 
> 
> On 12/05/2022 10:11, Penny Zheng wrote:
> > Later, we need to add the right amount of references, which should be
> > the number of borrower domains, to the owner domain. Since we only
> > have
> > get_page() to increment the page reference by 1, a loop is needed per
> > page, which is inefficient and time-consuming.
> >
> > To save the loop time, this commit introduces a set of new helpers
> > put_page_nr() and get_page_nr() to increment/drop the page reference by
> nr.
> >
> > Signed-off-by: Penny Zheng 
> > ---
> > v3 changes:
> > - check overflow with "n"
> > - remove spurious change
> > - bring back the check that we enter the loop only when count_info is
> > greater than 0
> > ---
> > v2 change:
> > - new commit
> > ---
> >   xen/arch/arm/include/asm/mm.h |  4 
> >   xen/arch/arm/mm.c | 36 ++-
> >   2 files changed, 31 insertions(+), 9 deletions(-)
> >
> > diff --git a/xen/arch/arm/include/asm/mm.h
> > b/xen/arch/arm/include/asm/mm.h index 424aaf2823..c737d51e4d 100644
> > --- a/xen/arch/arm/include/asm/mm.h
> > +++ b/xen/arch/arm/include/asm/mm.h
> > @@ -347,6 +347,10 @@ void free_init_memory(void);
> >   int guest_physmap_mark_populate_on_demand(struct domain *d,
> unsigned long gfn,
> > unsigned int order);
> >
> > +extern bool get_page_nr(struct page_info *page, const struct domain
> *domain,
> > +unsigned long nr); extern void
> > +put_page_nr(struct page_info *page, unsigned long nr);
> > +
> >   extern void put_page_type(struct page_info *page);
> >   static inline void put_page_and_type(struct page_info *page)
> >   {
> > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index
> > 7b1f2f4906..a9461e07aa 100644
> > --- a/xen/arch/arm/mm.c
> > +++ b/xen/arch/arm/mm.c
> > @@ -1537,7 +1537,8 @@ long arch_memory_op(int op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> >   return 0;
> >   }
> >
> > -struct domain *page_get_owner_and_reference(struct page_info *page)
> > +static struct domain *page_get_owner_and_nr_reference(struct page_info
> *page,
> > +  unsigned long
> > +nr)
> >   {
> >   unsigned long x, y = page->count_info;
> >   struct domain *owner;
> > @@ -1548,10 +1549,10 @@ struct domain
> *page_get_owner_and_reference(struct page_info *page)
> >* Count ==  0: Page is not allocated, so we cannot take a 
> > reference.
> >* Count == -1: Reference count would wrap, which is invalid.
> >*/
> > -if ( unlikely(((x + 1) & PGC_count_mask) <= 1) )
> > +if ( unlikely(((x + nr) & PGC_count_mask) <= 1) )
> 
> This check looks wrong to me. You want to make sure that the right equation
> return is at least equal to n otherwise.
> 

Right, right, I haven't considered thoroughly! A thousand thanks for the 
following
detailed explanation~

> Furthermore, I think we need to restrict 'nr' to PGC_count_mask to fully catch
> any overflow.
> 
> Before the loop, the code would look like:
> 
> /* Restrict nr to avoid "double" overflow */ if ( nr >= PGC_count_mask ) {
>  ASSERT_UNREACHABLE();
>  return NULL;
> }
> 
> The check in the loop would look like:
> 
> if ( unlikely((x + nr) & PGC_count_mask) <= n )
> 
> That said, it might be easier to read the overflow check if we do:
> 
> count = x & PGC_count_mask;
> if ( !count || ((PGC_count_mask - count) <= n) )
> 
> I haven't measured and check which of the two options would result to better
> code and performance (get_page() is often called).
>

Correct me if I understand wrongly:
IMO, only option two is actually catching any overflow? Let (PGC_count_mask - 
count) <= nr
stay in the loop, not before the loop like option 1, to cover the changeable 
page->count_info.

> >   return NULL;
> >   }
> > -while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
> > +while ( (y = cmpxchg(&page->count_info, x, x + nr)) != x );
> >
> >   owner = page_get_owner(page);
> >  

[PATCH v3 7/8] xen/arm: create shared memory nodes in guest device tree

2022-05-12 Thread Penny Zheng
We expose the shared memory to the domU using the "xen,shared-memory-v1"
reserved-memory binding. See
Documentation/devicetree/bindings/reserved-memory/xen,shared-memory.txt
in Linux for the corresponding device tree binding.

To save the cost of re-parsing shared memory device tree configuration when
creating shared memory nodes in guest device tree, this commit adds new field
"shm_mem" to store shm-info per domain.

For each shared memory region, a range is exposed under
the /reserved-memory node as a child node. Each range sub-node is
named xen-shmem@ and has the following properties:
- compatible:
compatible = "xen,shared-memory-v1"
- reg:
the base guest physical address and size of the shared memory region
- xen,id:
a string that identifies the shared memory region.

Signed-off-by: Penny Zheng 
---
v3 change:
- move field "shm_mem" to kernel_info
---
v2 change:
- using xzalloc
- shm_id should be uint8_t
- make reg a local variable
- add #address-cells and #size-cells properties
- fix alignment
---
 xen/arch/arm/domain_build.c   | 143 +-
 xen/arch/arm/include/asm/kernel.h |   1 +
 xen/arch/arm/include/asm/setup.h  |   1 +
 3 files changed, 143 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index aa7c264e23..fb9146b6e0 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -892,7 +892,22 @@ static int __init allocate_shared_memory(struct domain *d,
 return ret;
 }
 
-static int __init process_shm(struct domain *d,
+static int __init append_shm_bank_to_domain(struct kernel_info *kinfo,
+paddr_t start, paddr_t size,
+u32 shm_id)
+{
+if ( (kinfo->shm_mem.nr_banks + 1) > NR_MEM_BANKS )
+return -ENOMEM;
+
+kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].start = start;
+kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].size = size;
+kinfo->shm_mem.bank[kinfo->shm_mem.nr_banks].shm_id = shm_id;
+kinfo->shm_mem.nr_banks++;
+
+return 0;
+}
+
+static int __init process_shm(struct domain *d, struct kernel_info *kinfo,
   const struct dt_device_node *node)
 {
 struct dt_device_node *shm_node;
@@ -963,6 +978,14 @@ static int __init process_shm(struct domain *d,
 if ( ret )
 return ret;
 }
+
+/*
+ * Record static shared memory region info for later setting
+ * up shm-node in guest device tree.
+ */
+ret = append_shm_bank_to_domain(kinfo, gbase, psize, shm_id);
+if ( ret )
+return ret;
 }
 
 return 0;
@@ -1293,6 +1316,117 @@ static int __init make_memory_node(const struct domain 
*d,
 return res;
 }
 
+#ifdef CONFIG_STATIC_SHM
+static int __init make_shm_memory_node(const struct domain *d,
+   void *fdt,
+   int addrcells, int sizecells,
+   struct meminfo *mem)
+{
+unsigned long i = 0;
+int res = 0;
+
+if ( mem->nr_banks == 0 )
+return -ENOENT;
+
+/*
+ * For each shared memory region, a range is exposed under
+ * the /reserved-memory node as a child node. Each range sub-node is
+ * named xen-shmem@.
+ */
+dt_dprintk("Create xen-shmem node\n");
+
+for ( ; i < mem->nr_banks; i++ )
+{
+uint64_t start = mem->bank[i].start;
+uint64_t size = mem->bank[i].size;
+uint8_t shm_id = mem->bank[i].shm_id;
+/* Placeholder for xen-shmem@ + a 64-bit number + \0 */
+char buf[27];
+const char compat[] = "xen,shared-memory-v1";
+__be32 reg[4];
+__be32 *cells;
+unsigned int len = (addrcells + sizecells) * sizeof(__be32);
+
+snprintf(buf, sizeof(buf), "xen-shmem@%"PRIx64, mem->bank[i].start);
+res = fdt_begin_node(fdt, buf);
+if ( res )
+return res;
+
+res = fdt_property(fdt, "compatible", compat, sizeof(compat));
+if ( res )
+return res;
+
+cells = reg;
+dt_child_set_range(&cells, addrcells, sizecells, start, size);
+
+res = fdt_property(fdt, "reg", reg, len);
+if ( res )
+return res;
+
+dt_dprintk("Shared memory bank %lu: %#"PRIx64"->%#"PRIx64"\n",
+   i, start, start + size);
+
+res = fdt_property_cell(fdt, "xen,id", shm_id);
+if ( res )
+return res;
+
+res = fdt_end_node(fdt);
+if ( res )
+return res;
+}
+
+return res;
+}
+#else
+static int __init make_shm_memory_node(const struct domain *d,
+   void *fdt,
+  

[PATCH v3 6/8] xen/arm: set up shared memory foreign mapping for borrower domain

2022-05-12 Thread Penny Zheng
This commit sets up shared memory foreign mapping for borrower domain.

If owner domain is the default dom_io, all shared domain are treated as
borrower domain.

Signed-off-by: Penny Zheng 
Reviewed-by: Stefano Stabellini 
---
v3 change:
- use map_regions_p2mt instead
---
v2 change:
- remove guest_physmap_add_shm, since for borrower domain, we only
do P2M foreign memory mapping now.
---
 xen/arch/arm/domain_build.c | 9 +
 1 file changed, 9 insertions(+)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 293e79e4e7..aa7c264e23 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -954,6 +954,15 @@ static int __init process_shm(struct domain *d,
 if ( ret )
 return ret;
 }
+
+if ( owner_dom_io || (strcmp(role_str, "borrower") == 0) )
+{
+/* Set up P2M foreign mapping for borrower domain. */
+ret = map_regions_p2mt(d, _gfn(PFN_UP(gbase)), PFN_DOWN(psize),
+   _mfn(PFN_UP(pbase)), p2m_map_foreign_rw);
+if ( ret )
+return ret;
+}
 }
 
 return 0;
-- 
2.25.1




<    1   2   3   4   5   6   7   8   >