Re: [Xen-devel] [PATCH 2/5] allow domain heap allocations to specify more than one NUMA node

2015-03-03 Thread Jan Beulich
 On 02.03.15 at 18:12, ian.campb...@citrix.com wrote:
 On Thu, 2015-02-26 at 13:53 +, Jan Beulich wrote:
 ... using struct domain as a container for passing the respective
 affinity mask: Quite a number of allocations are domain specific, yet
 not to be accounted for that domain. Introduce a flag suppressing the
 accounting altogether (i.e. going beyond MEMF_no_refcount) and use it
 right away in common code (x86 and IOMMU code will get adjusted
 subsequently).
 
 Signed-off-by: Jan Beulich jbeul...@suse.com
 
 Acked-by: Ian Campbell ian.campb...@citrix.com

Thanks.

 Does this patch constitute all the not just(x86) from the initial
 mail? I'll assume so unless I hear otherwise.

No, this part of patch 1

Note that this gives meaning to MEMF_exact_node specified alone (i.e.
 implicitly combined with NUMA_NO_NODE): In such a case any node inside
 the domain's node mask is acceptable, but no other node. This changed
 behavior is (implicitly) being exposed through the memop hypercalls.

does too. (Patches 4 and 5 are only indirectly x86-specific, as the
IOMMU code touched there is used on x86 only. But that's of no
concern to you anyway.)

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH 2/5] allow domain heap allocations to specify more than one NUMA node

2015-03-02 Thread Ian Campbell
On Thu, 2015-02-26 at 13:53 +, Jan Beulich wrote:
 ... using struct domain as a container for passing the respective
 affinity mask: Quite a number of allocations are domain specific, yet
 not to be accounted for that domain. Introduce a flag suppressing the
 accounting altogether (i.e. going beyond MEMF_no_refcount) and use it
 right away in common code (x86 and IOMMU code will get adjusted
 subsequently).
 
 Signed-off-by: Jan Beulich jbeul...@suse.com

Acked-by: Ian Campbell ian.campb...@citrix.com

Does this patch constitute all the not just(x86) from the initial
mail? I'll assume so unless I hear otherwise.



___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH 2/5] allow domain heap allocations to specify more than one NUMA node

2015-02-27 Thread Dario Faggioli
On Thu, 2015-02-26 at 13:53 +, Jan Beulich wrote:
 ... using struct domain as a container for passing the respective
 affinity mask: Quite a number of allocations are domain specific, yet
 not to be accounted for that domain. Introduce a flag suppressing the
 accounting altogether (i.e. going beyond MEMF_no_refcount) and use it
 right away in common code (x86 and IOMMU code will get adjusted
 subsequently).
 
 Signed-off-by: Jan Beulich jbeul...@suse.com
 
Reviewed-by: Dario Faggioli dario.faggi...@citrix.com

Regards,
Dario


signature.asc
Description: This is a digitally signed message part
___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


[Xen-devel] [PATCH 2/5] allow domain heap allocations to specify more than one NUMA node

2015-02-26 Thread Jan Beulich
... using struct domain as a container for passing the respective
affinity mask: Quite a number of allocations are domain specific, yet
not to be accounted for that domain. Introduce a flag suppressing the
accounting altogether (i.e. going beyond MEMF_no_refcount) and use it
right away in common code (x86 and IOMMU code will get adjusted
subsequently).

Signed-off-by: Jan Beulich jbeul...@suse.com

--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1657,7 +1657,8 @@ gnttab_transfer(
 struct page_info *new_page;
 void *sp, *dp;
 
-new_page = alloc_domheap_page(NULL, MEMF_bits(max_bitsize));
+new_page = alloc_domheap_page(e, MEMF_no_owner |
+ MEMF_bits(max_bitsize));
 if ( new_page == NULL )
 {
 gop.status = GNTST_address_too_big;
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -462,7 +462,8 @@ static long memory_exchange(XEN_GUEST_HA
 /* Allocate a chunk's worth of anonymous output pages. */
 for ( j = 0; j  (1UL  out_chunk_order); j++ )
 {
-page = alloc_domheap_pages(NULL, exch.out.extent_order, memflags);
+page = alloc_domheap_pages(d, exch.out.extent_order,
+   MEMF_no_owner | memflags);
 if ( unlikely(page == NULL) )
 {
 rc = -ENOMEM;
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1685,10 +1685,14 @@ struct page_info *alloc_domheap_pages(
 
 ASSERT(!in_irq());
 
-bits = domain_clamp_alloc_bitsize(d, bits ? : (BITS_PER_LONG+PAGE_SHIFT));
+bits = domain_clamp_alloc_bitsize(memflags  MEMF_no_owner ? NULL : d,
+  bits ? : (BITS_PER_LONG+PAGE_SHIFT));
 if ( (zone_hi = min_t(unsigned int, bits_to_zone(bits), zone_hi)) == 0 )
 return NULL;
 
+if ( memflags  MEMF_no_owner )
+memflags |= MEMF_no_refcount;
+
 if ( dma_bitsize  ((dma_zone = bits_to_zone(dma_bitsize))  zone_hi) )
 pg = alloc_heap_pages(dma_zone + 1, zone_hi, order, memflags, d);
 
@@ -1698,7 +1702,8 @@ struct page_info *alloc_domheap_pages(
   memflags, d)) == NULL)) )
  return NULL;
 
-if ( (d != NULL)  assign_pages(d, pg, order, memflags) )
+if ( d  !(memflags  MEMF_no_owner) 
+ assign_pages(d, pg, order, memflags) )
 {
 free_heap_pages(pg, order);
 return NULL;
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -120,6 +120,8 @@ struct npfec {
 #define  MEMF_no_dma  (1U_MEMF_no_dma)
 #define _MEMF_exact_node  4
 #define  MEMF_exact_node  (1U_MEMF_exact_node)
+#define _MEMF_no_owner5
+#define  MEMF_no_owner(1U_MEMF_no_owner)
 #define _MEMF_node8
 #define  MEMF_node_mask   ((1U  (8 * sizeof(nodeid_t))) - 1)
 #define  MEMF_node(n) n) + 1)  MEMF_node_mask)  _MEMF_node)



allow domain heap allocations to specify more than one NUMA node

... using struct domain as a container for passing the respective
affinity mask: Quite a number of allocations are domain specific, yet
not to be accounted for that domain. Introduce a flag suppressing the
accounting altogether (i.e. going beyond MEMF_no_refcount) and use it
right away in common code (x86 and IOMMU code will get adjusted
subsequently).

Signed-off-by: Jan Beulich jbeul...@suse.com

--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1657,7 +1657,8 @@ gnttab_transfer(
 struct page_info *new_page;
 void *sp, *dp;
 
-new_page = alloc_domheap_page(NULL, MEMF_bits(max_bitsize));
+new_page = alloc_domheap_page(e, MEMF_no_owner |
+ MEMF_bits(max_bitsize));
 if ( new_page == NULL )
 {
 gop.status = GNTST_address_too_big;
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -462,7 +462,8 @@ static long memory_exchange(XEN_GUEST_HA
 /* Allocate a chunk's worth of anonymous output pages. */
 for ( j = 0; j  (1UL  out_chunk_order); j++ )
 {
-page = alloc_domheap_pages(NULL, exch.out.extent_order, memflags);
+page = alloc_domheap_pages(d, exch.out.extent_order,
+   MEMF_no_owner | memflags);
 if ( unlikely(page == NULL) )
 {
 rc = -ENOMEM;
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1685,10 +1685,14 @@ struct page_info *alloc_domheap_pages(
 
 ASSERT(!in_irq());
 
-bits = domain_clamp_alloc_bitsize(d, bits ? : (BITS_PER_LONG+PAGE_SHIFT));
+bits = domain_clamp_alloc_bitsize(memflags  MEMF_no_owner ? NULL : d,
+  bits ? : (BITS_PER_LONG+PAGE_SHIFT));
 if ( (zone_hi = min_t(unsigned int, bits_to_zone(bits), zone_hi)) == 0 )
 return NULL;
 
+if ( memflags  MEMF_no_owner )
+