[IA64] remove static-partitioned xenheap

This patch is ia64 counter part of 19054:845aa241e163

Signed-off-by: Isaku Yamahata <yamah...@valinux.co.jp>

diff --git a/xen/arch/ia64/linux-xen/mm_contig.c 
b/xen/arch/ia64/linux-xen/mm_contig.c
--- a/xen/arch/ia64/linux-xen/mm_contig.c
+++ b/xen/arch/ia64/linux-xen/mm_contig.c
@@ -190,7 +190,7 @@ per_cpu_allocate(void *xen_heap_start, u
        unsigned long end = start + size;
 
        if (__pa(end) < end_in_pa) {
-               init_xenheap_pages(__pa(xen_heap_start), __pa(start));
+               init_boot_pages(__pa(xen_heap_start), __pa(start));
                xen_heap_start = (void*)end;
                percpu_area = (void*)virt_to_xenva(start);
                printk("allocate percpu area 0x...@0x%lx 0x%p\n",
diff --git a/xen/arch/ia64/vmx/vmx_init.c b/xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c
+++ b/xen/arch/ia64/vmx/vmx_init.c
@@ -146,7 +146,7 @@ vmx_init_env(void *start, unsigned long 
                        VM_BUFFER_ALIGN_UP((unsigned long)start);
                unsigned long e_vm_buffer = s_vm_buffer + buffer_size;
                if (__pa(e_vm_buffer) < end_in_pa) {
-                       init_xenheap_pages(__pa(start), __pa(s_vm_buffer));
+                       init_boot_pages(__pa(start), __pa(s_vm_buffer));
                        start = (void*)e_vm_buffer;
                        vm_buffer = virt_to_xenva(s_vm_buffer);
                        printk("vm_buffer: 0x%lx\n", vm_buffer);
diff --git a/xen/arch/ia64/xen/machine_kexec.c 
b/xen/arch/ia64/xen/machine_kexec.c
--- a/xen/arch/ia64/xen/machine_kexec.c
+++ b/xen/arch/ia64/xen/machine_kexec.c
@@ -159,8 +159,10 @@ static int machine_kexec_get_xen(xen_kex
 static int machine_kexec_get_xenheap(xen_kexec_range_t *range)
 {
        range->start = (ia64_tpa(_end) + (ELF_PAGE_SIZE - 1)) & ELF_PAGE_MASK;
-       range->size = (unsigned long)xenheap_phys_end -
-                     (unsigned long)range->start;
+       range->size =
+               (((unsigned long)range->start + KERNEL_TR_PAGE_SIZE) &
+         ~(KERNEL_TR_PAGE_SIZE - 1))
+               - (unsigned long)range->start;
        return 0;
 }
 
@@ -195,7 +197,6 @@ int machine_kexec_get(xen_kexec_range_t 
 
 void arch_crash_save_vmcoreinfo(void)
 {
-    VMCOREINFO_SYMBOL(xenheap_phys_end);
        VMCOREINFO_SYMBOL(dom_xen);
        VMCOREINFO_SYMBOL(dom_io);
        VMCOREINFO_SYMBOL(xen_pstart);
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -466,7 +466,7 @@ share_xen_page_with_guest(struct page_in
 
     page_set_owner(page, d);
     wmb(); /* install valid domain ptr before updating refcnt. */
-    ASSERT(page->count_info == 0);
+    ASSERT((page->count_info & (PGC_allocated | PGC_count_mask))== 0);
 
     /* Only add to the allocation list if the domain isn't dying. */
     if ( !d->is_dying )
@@ -987,7 +987,7 @@ assign_domain_page(struct domain *d,
     struct page_info* page = mfn_to_page(physaddr >> PAGE_SHIFT);
 
     BUG_ON((physaddr & _PAGE_PPN_MASK) != physaddr);
-    BUG_ON(page->count_info != (PGC_allocated | 1));
+    BUG_ON((page->count_info & ~PGC_xen_heap) != (PGC_allocated | 1));
     set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
     // because __assign_domain_page() uses set_pte_rel() which has
     // release semantics, smp_mb() isn't needed.
@@ -2894,7 +2894,8 @@ guest_physmap_add_page(struct domain *d,
 
     for (i = 0; i < (1UL << page_order); i++) {
         BUG_ON(!mfn_valid(mfn));
-        BUG_ON(mfn_to_page(mfn)->count_info != (PGC_allocated | 1));
+        BUG_ON((mfn_to_page(mfn)->count_info & ~PGC_xen_heap) !=
+               (PGC_allocated | 1));
         __guest_physmap_add_page(d, gpfn, mfn);
         mfn++;
         gpfn++;
diff --git a/xen/arch/ia64/xen/xensetup.c b/xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c
+++ b/xen/arch/ia64/xen/xensetup.c
@@ -33,7 +33,7 @@
 #include <asm/sn/simulator.h>
 #include <asm/sal.h>
 
-unsigned long xenheap_phys_end, total_pages;
+unsigned long total_pages;
 
 char saved_command_line[COMMAND_LINE_SIZE];
 char __initdata dom0_command_line[COMMAND_LINE_SIZE];
@@ -72,25 +72,11 @@ integer_param("xencons", opt_xencons);
 static int __initdata opt_xencons_poll;
 boolean_param("xencons_poll", opt_xencons_poll);
 
+#define XENHEAP_DEFAULT_SIZE    KERNEL_TR_PAGE_SIZE
+#define XENHEAP_SIZE_MIN        (16 * 1024 * 1024)      /* 16MBytes */
 unsigned long xenheap_size = XENHEAP_DEFAULT_SIZE;
 unsigned long xen_pstart;
 
-static void __init parse_xenheap_megabytes(char *s)
-{
-    unsigned long megabytes = simple_strtoll(s, NULL, 0);
-
-#define XENHEAP_MEGABYTES_MIN   16UL
-    if (megabytes < XENHEAP_MEGABYTES_MIN)
-        megabytes = XENHEAP_MEGABYTES_MIN;
-
-#define XENHEAP_MEGABYTES_MAX   4096UL  /* need more? */
-    if (megabytes > XENHEAP_MEGABYTES_MAX)
-        megabytes = XENHEAP_MEGABYTES_MAX;
-
-    xenheap_size =  megabytes * 1024 * 1024;
-}
-custom_param("xenheap_megabytes", parse_xenheap_megabytes);
-
 static int __init
 xen_count_pages(u64 start, u64 end, void *arg)
 {
@@ -315,7 +301,7 @@ init_xenheap_mds(unsigned long start, un
             unsigned long s = max(start, max(__pa(desc->xen_heap_start),
                                              md->phys_addr));
             unsigned long e = min(end, min(md_end, desc->xenheap_phys_end));
-            init_xenheap_pages(s, e);
+            init_boot_pages(s, e);
         }
     }
 
@@ -362,6 +348,7 @@ void __init start_kernel(void)
     struct domain *idle_domain;
     struct vcpu *dom0_vcpu0;
     efi_memory_desc_t *kern_md, *last_md, *md;
+    unsigned long xenheap_phys_end;
     void *xen_heap_start;
     struct xen_heap_desc heap_desc;
 #ifdef CONFIG_SMP
@@ -422,11 +409,9 @@ void __init start_kernel(void)
      * for the actual xenheap.
      */
     max_page = efi_get_max_addr() >> PAGE_SHIFT;
-    while ((max_page >> 3) > xenheap_size - (XENHEAP_MEGABYTES_MIN << 20))
+    while ((max_page >> 3) > xenheap_size - XENHEAP_SIZE_MIN)
         xenheap_size <<= 1;
 
-    BUG_ON(xenheap_size > (XENHEAP_MEGABYTES_MAX << 20));
-
     xenheap_phys_end = xen_pstart + xenheap_size;
     printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n",
            xen_pstart, xenheap_phys_end);
@@ -731,8 +716,8 @@ void arch_get_xen_caps(xen_capabilities_
 
 int xen_in_range(paddr_t start, paddr_t end)
 {
-    start = max_t(paddr_t, start, xen_pstart);
-    end = min_t(paddr_t, end, xen_pstart + XENHEAP_DEFAULT_SIZE);
+    paddr_t xs = __pa(&_start);
+    paddr_t xe = __pa(&_end);
 
-    return start < end;
+    return (start < xe) && (end > xs);
 }
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -631,7 +631,7 @@ void __init scrub_heap_pages(void)
  * XEN-HEAP SUB-ALLOCATOR
  */
 
-#ifndef __x86_64__
+#if !defined(__x86_64__) && !defined(__ia64__)
 
 void init_xenheap_pages(paddr_t ps, paddr_t pe)
 {
diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h
+++ b/xen/include/asm-ia64/config.h
@@ -86,7 +86,6 @@ typedef unsigned long paddr_t;
 // FIXME?: x86-ism used in xen/mm.h
 #define LOCK_PREFIX
 
-extern unsigned long xenheap_phys_end;
 extern unsigned long total_pages;
 extern unsigned long xen_pstart;
 extern unsigned long xenheap_size;
@@ -119,9 +118,6 @@ extern char _end[]; /* standard ELF symb
 // FIXME SMP: leave SMP for a later time
 ///////////////////////////////////////////////////////////////
 // xen/include/asm/config.h
-// Natural boundary upon TR size to define xenheap space
-#define XENHEAP_DEFAULT_MB (1 << (KERNEL_TR_PAGE_SHIFT - 20))
-#define XENHEAP_DEFAULT_SIZE   (1 << KERNEL_TR_PAGE_SHIFT)
 #define        ELFSIZE 64
 
 ///////////////////////////////////////////////////////////////
diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h
+++ b/xen/include/asm-ia64/mm.h
@@ -114,16 +114,18 @@ struct page_info
  /* Cleared when the owning guest 'frees' this page. */
 #define _PGC_allocated    PG_shift(1)
 #define PGC_allocated     PG_mask(1, 1)
- /* bit PG_shift(2) reserved. See asm-x86/mm.h */
+ /* Page is Xen heap? */
+# define _PGC_xen_heap    PG_shift(2)
+# define PGC_xen_heap     PG_mask(1, 2)
  /* bit PG_shift(3) reserved. See asm-x86/mm.h */
  /* PG_mask(7, 6) reserved. See asm-x86/mm.h*/
  /* Count of references to this frame. */
 #define PGC_count_width   PG_shift(6)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
-#define is_xen_heap_mfn(mfn)   (((mfn) < paddr_to_pfn(xenheap_phys_end)) \
-                                && ((mfn) >= paddr_to_pfn(xen_pstart)))
-#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
+#define is_xen_heap_page(page)  ((page)->count_info & PGC_xen_heap)
+#define is_xen_heap_mfn(mfn)    (mfn_valid(mfn) &&                      \
+                                 is_xen_heap_page(mfn_to_page(mfn)))
 
 #define page_get_owner(_p)      ((struct domain *)(_p)->u.inuse._domain)
 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = (unsigned long)(_d))
[IA64] remove static-partitioned xenheap

This patch is ia64 counter part of 19054:845aa241e163

Signed-off-by: Isaku Yamahata <yamah...@valinux.co.jp>

diff --git a/xen/arch/ia64/linux-xen/mm_contig.c b/xen/arch/ia64/linux-xen/mm_contig.c
--- a/xen/arch/ia64/linux-xen/mm_contig.c
+++ b/xen/arch/ia64/linux-xen/mm_contig.c
@@ -190,7 +190,7 @@ per_cpu_allocate(void *xen_heap_start, u
 	unsigned long end = start + size;
 
 	if (__pa(end) < end_in_pa) {
-		init_xenheap_pages(__pa(xen_heap_start), __pa(start));
+		init_boot_pages(__pa(xen_heap_start), __pa(start));
 		xen_heap_start = (void*)end;
 		percpu_area = (void*)virt_to_xenva(start);
 		printk("allocate percpu area 0x...@0x%lx 0x%p\n",
diff --git a/xen/arch/ia64/vmx/vmx_init.c b/xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c
+++ b/xen/arch/ia64/vmx/vmx_init.c
@@ -146,7 +146,7 @@ vmx_init_env(void *start, unsigned long 
 			VM_BUFFER_ALIGN_UP((unsigned long)start);
 		unsigned long e_vm_buffer = s_vm_buffer + buffer_size;
 		if (__pa(e_vm_buffer) < end_in_pa) {
-			init_xenheap_pages(__pa(start), __pa(s_vm_buffer));
+			init_boot_pages(__pa(start), __pa(s_vm_buffer));
 			start = (void*)e_vm_buffer;
 			vm_buffer = virt_to_xenva(s_vm_buffer);
 			printk("vm_buffer: 0x%lx\n", vm_buffer);
diff --git a/xen/arch/ia64/xen/machine_kexec.c b/xen/arch/ia64/xen/machine_kexec.c
--- a/xen/arch/ia64/xen/machine_kexec.c
+++ b/xen/arch/ia64/xen/machine_kexec.c
@@ -159,8 +159,10 @@ static int machine_kexec_get_xen(xen_kex
 static int machine_kexec_get_xenheap(xen_kexec_range_t *range)
 {
 	range->start = (ia64_tpa(_end) + (ELF_PAGE_SIZE - 1)) & ELF_PAGE_MASK;
-	range->size = (unsigned long)xenheap_phys_end -
-		      (unsigned long)range->start;
+	range->size =
+		(((unsigned long)range->start + KERNEL_TR_PAGE_SIZE) &
+         ~(KERNEL_TR_PAGE_SIZE - 1))
+		- (unsigned long)range->start;
 	return 0;
 }
 
@@ -195,7 +197,6 @@ int machine_kexec_get(xen_kexec_range_t 
 
 void arch_crash_save_vmcoreinfo(void)
 {
-    VMCOREINFO_SYMBOL(xenheap_phys_end);
 	VMCOREINFO_SYMBOL(dom_xen);
 	VMCOREINFO_SYMBOL(dom_io);
 	VMCOREINFO_SYMBOL(xen_pstart);
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -466,7 +466,7 @@ share_xen_page_with_guest(struct page_in
 
     page_set_owner(page, d);
     wmb(); /* install valid domain ptr before updating refcnt. */
-    ASSERT(page->count_info == 0);
+    ASSERT((page->count_info & (PGC_allocated | PGC_count_mask))== 0);
 
     /* Only add to the allocation list if the domain isn't dying. */
     if ( !d->is_dying )
@@ -987,7 +987,7 @@ assign_domain_page(struct domain *d,
     struct page_info* page = mfn_to_page(physaddr >> PAGE_SHIFT);
 
     BUG_ON((physaddr & _PAGE_PPN_MASK) != physaddr);
-    BUG_ON(page->count_info != (PGC_allocated | 1));
+    BUG_ON((page->count_info & ~PGC_xen_heap) != (PGC_allocated | 1));
     set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
     // because __assign_domain_page() uses set_pte_rel() which has
     // release semantics, smp_mb() isn't needed.
@@ -2894,7 +2894,8 @@ guest_physmap_add_page(struct domain *d,
 
     for (i = 0; i < (1UL << page_order); i++) {
         BUG_ON(!mfn_valid(mfn));
-        BUG_ON(mfn_to_page(mfn)->count_info != (PGC_allocated | 1));
+        BUG_ON((mfn_to_page(mfn)->count_info & ~PGC_xen_heap) !=
+               (PGC_allocated | 1));
         __guest_physmap_add_page(d, gpfn, mfn);
         mfn++;
         gpfn++;
diff --git a/xen/arch/ia64/xen/xensetup.c b/xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c
+++ b/xen/arch/ia64/xen/xensetup.c
@@ -33,7 +33,7 @@
 #include <asm/sn/simulator.h>
 #include <asm/sal.h>
 
-unsigned long xenheap_phys_end, total_pages;
+unsigned long total_pages;
 
 char saved_command_line[COMMAND_LINE_SIZE];
 char __initdata dom0_command_line[COMMAND_LINE_SIZE];
@@ -72,25 +72,11 @@ integer_param("xencons", opt_xencons);
 static int __initdata opt_xencons_poll;
 boolean_param("xencons_poll", opt_xencons_poll);
 
+#define XENHEAP_DEFAULT_SIZE    KERNEL_TR_PAGE_SIZE
+#define XENHEAP_SIZE_MIN        (16 * 1024 * 1024)      /* 16MBytes */
 unsigned long xenheap_size = XENHEAP_DEFAULT_SIZE;
 unsigned long xen_pstart;
 
-static void __init parse_xenheap_megabytes(char *s)
-{
-    unsigned long megabytes = simple_strtoll(s, NULL, 0);
-
-#define XENHEAP_MEGABYTES_MIN   16UL
-    if (megabytes < XENHEAP_MEGABYTES_MIN)
-        megabytes = XENHEAP_MEGABYTES_MIN;
-
-#define XENHEAP_MEGABYTES_MAX   4096UL  /* need more? */
-    if (megabytes > XENHEAP_MEGABYTES_MAX)
-        megabytes = XENHEAP_MEGABYTES_MAX;
-
-    xenheap_size =  megabytes * 1024 * 1024;
-}
-custom_param("xenheap_megabytes", parse_xenheap_megabytes);
-
 static int __init
 xen_count_pages(u64 start, u64 end, void *arg)
 {
@@ -315,7 +301,7 @@ init_xenheap_mds(unsigned long start, un
             unsigned long s = max(start, max(__pa(desc->xen_heap_start),
                                              md->phys_addr));
             unsigned long e = min(end, min(md_end, desc->xenheap_phys_end));
-            init_xenheap_pages(s, e);
+            init_boot_pages(s, e);
         }
     }
 
@@ -362,6 +348,7 @@ void __init start_kernel(void)
     struct domain *idle_domain;
     struct vcpu *dom0_vcpu0;
     efi_memory_desc_t *kern_md, *last_md, *md;
+    unsigned long xenheap_phys_end;
     void *xen_heap_start;
     struct xen_heap_desc heap_desc;
 #ifdef CONFIG_SMP
@@ -422,11 +409,9 @@ void __init start_kernel(void)
      * for the actual xenheap.
      */
     max_page = efi_get_max_addr() >> PAGE_SHIFT;
-    while ((max_page >> 3) > xenheap_size - (XENHEAP_MEGABYTES_MIN << 20))
+    while ((max_page >> 3) > xenheap_size - XENHEAP_SIZE_MIN)
         xenheap_size <<= 1;
 
-    BUG_ON(xenheap_size > (XENHEAP_MEGABYTES_MAX << 20));
-
     xenheap_phys_end = xen_pstart + xenheap_size;
     printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n",
            xen_pstart, xenheap_phys_end);
@@ -731,8 +716,8 @@ void arch_get_xen_caps(xen_capabilities_
 
 int xen_in_range(paddr_t start, paddr_t end)
 {
-    start = max_t(paddr_t, start, xen_pstart);
-    end = min_t(paddr_t, end, xen_pstart + XENHEAP_DEFAULT_SIZE);
+    paddr_t xs = __pa(&_start);
+    paddr_t xe = __pa(&_end);
 
-    return start < end;
+    return (start < xe) && (end > xs);
 }
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -631,7 +631,7 @@ void __init scrub_heap_pages(void)
  * XEN-HEAP SUB-ALLOCATOR
  */
 
-#ifndef __x86_64__
+#if !defined(__x86_64__) && !defined(__ia64__)
 
 void init_xenheap_pages(paddr_t ps, paddr_t pe)
 {
diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h
+++ b/xen/include/asm-ia64/config.h
@@ -86,7 +86,6 @@ typedef unsigned long paddr_t;
 // FIXME?: x86-ism used in xen/mm.h
 #define LOCK_PREFIX
 
-extern unsigned long xenheap_phys_end;
 extern unsigned long total_pages;
 extern unsigned long xen_pstart;
 extern unsigned long xenheap_size;
@@ -119,9 +118,6 @@ extern char _end[]; /* standard ELF symb
 // FIXME SMP: leave SMP for a later time
 ///////////////////////////////////////////////////////////////
 // xen/include/asm/config.h
-// Natural boundary upon TR size to define xenheap space
-#define XENHEAP_DEFAULT_MB (1 << (KERNEL_TR_PAGE_SHIFT - 20))
-#define XENHEAP_DEFAULT_SIZE	(1 << KERNEL_TR_PAGE_SHIFT)
 #define	ELFSIZE	64
 
 ///////////////////////////////////////////////////////////////
diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h
+++ b/xen/include/asm-ia64/mm.h
@@ -114,16 +114,18 @@ struct page_info
  /* Cleared when the owning guest 'frees' this page. */
 #define _PGC_allocated    PG_shift(1)
 #define PGC_allocated     PG_mask(1, 1)
- /* bit PG_shift(2) reserved. See asm-x86/mm.h */
+ /* Page is Xen heap? */
+# define _PGC_xen_heap    PG_shift(2)
+# define PGC_xen_heap     PG_mask(1, 2)
  /* bit PG_shift(3) reserved. See asm-x86/mm.h */
  /* PG_mask(7, 6) reserved. See asm-x86/mm.h*/
  /* Count of references to this frame. */
 #define PGC_count_width   PG_shift(6)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
-#define is_xen_heap_mfn(mfn)   (((mfn) < paddr_to_pfn(xenheap_phys_end)) \
-                                && ((mfn) >= paddr_to_pfn(xen_pstart)))
-#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
+#define is_xen_heap_page(page)  ((page)->count_info & PGC_xen_heap)
+#define is_xen_heap_mfn(mfn)    (mfn_valid(mfn) &&                      \
+                                 is_xen_heap_page(mfn_to_page(mfn)))
 
 #define page_get_owner(_p)      ((struct domain *)(_p)->u.inuse._domain)
 #define page_set_owner(_p, _d)	((_p)->u.inuse._domain = (unsigned long)(_d))
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@lists.xensource.com
http://lists.xensource.com/xen-ia64-devel

Reply via email to