From: David Vrabel <[email protected]>

In xen_memory_setup(), if a page that is being released has a VA
mapping this must also be updated.  Otherwise, the page will be not
released completely -- it will still be referenced in Xen and won't be
freed util the mapping is removed and this prevents it from being
reallocated at a different PFN.

This was already being done for the ISA memory region in
xen_ident_map_ISA() but on many systems this was omitting a few pages
as many systems marked a few pages below the ISA memory region as
reserved in the e820 map.

This fixes errors such as:

(XEN) page_alloc.c:1148:d0 Over-allocation for domain 0: 2097153 > 2097152
(XEN) memory.c:133:d0 Could not allocate order=0 extent: id=0 memflags=0 (0 of 
17)

Signed-off-by: David Vrabel <[email protected]>
Signed-off-by: Konrad Rzeszutek Wilk <[email protected]>
(cherry picked from commit 83d51ab473dddde7df858015070ed22b84ebe9a9)

Signed-off-by: Daniel Kiper <[email protected]>
Tested-by: Daniel Kiper <[email protected]>
Tested-by: Konrad Rzeszutek Wilk <[email protected]>
---
 arch/x86/xen/enlighten.c |    1 -
 arch/x86/xen/mmu.c       |   23 -----------------------
 arch/x86/xen/setup.c     |   41 ++++++++++++++++++++++++++++++++++-------
 arch/x86/xen/xen-ops.h   |    1 -
 4 files changed, 34 insertions(+), 32 deletions(-)

diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 9598038..d0825fb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1390,7 +1390,6 @@ asmlinkage void __init xen_start_kernel(void)
 
        xen_raw_console_write("mapping kernel into physical memory\n");
        pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
-       xen_ident_map_ISA();
 
        /* Allocate and initialize top and mid mfn levels for p2m structure */
        xen_build_mfn_list_list();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index cf7fe36..6b5a93a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1953,29 +1953,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t 
phys, pgprot_t prot)
 #endif
 }
 
-void __init xen_ident_map_ISA(void)
-{
-       unsigned long pa;
-
-       /*
-        * If we're dom0, then linear map the ISA machine addresses into
-        * the kernel's address space.
-        */
-       if (!xen_initial_domain())
-               return;
-
-       xen_raw_printk("Xen: setup ISA identity maps\n");
-
-       for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
-               pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
-
-               if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
-                       BUG();
-       }
-
-       xen_flush_tlb();
-}
-
 static void __init xen_post_allocator_init(void)
 {
        pv_mmu_ops.set_pte = xen_set_pte;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 282e807..24d2522 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -147,6 +147,13 @@ static unsigned long __init xen_do_chunk(unsigned long 
start,
 
        return len;
 }
+
+static unsigned long __init xen_release_chunk(unsigned long start,
+                                             unsigned long end)
+{
+       return xen_do_chunk(start, end, true);
+}
+
 static unsigned long __init xen_populate_chunk(
        const struct e820entry *list, size_t map_size,
        unsigned long max_pfn, unsigned long *last_pfn,
@@ -205,6 +212,29 @@ static unsigned long __init xen_populate_chunk(
        }
        return done;
 }
+
+static void __init xen_set_identity_and_release_chunk(
+       unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+       unsigned long *released, unsigned long *identity)
+{
+       unsigned long pfn;
+
+       /*
+        * If the PFNs are currently mapped, the VA mapping also needs
+        * to be updated to be 1:1.
+        */
+       for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
+               (void)HYPERVISOR_update_va_mapping(
+                       (unsigned long)__va(pfn << PAGE_SHIFT),
+                       mfn_pte(pfn, PAGE_KERNEL_IO), 0);
+
+       if (start_pfn < nr_pages)
+               *released += xen_release_chunk(
+                       start_pfn, min(end_pfn, nr_pages));
+
+       *identity += set_phys_range_identity(start_pfn, end_pfn);
+}
+
 static unsigned long __init xen_set_identity_and_release(
        const struct e820entry *list, size_t map_size, unsigned long nr_pages)
 {
@@ -234,14 +264,11 @@ static unsigned long __init xen_set_identity_and_release(
                        if (entry->type == E820_RAM)
                                end_pfn = PFN_UP(entry->addr);
 
-                       if (start_pfn < end_pfn) {
-                               if (start_pfn < nr_pages)
-                                       released += xen_do_chunk(
-                                               start_pfn, min(end_pfn, 
nr_pages), true);
+                       if (start_pfn < end_pfn)
+                               xen_set_identity_and_release_chunk(
+                                       start_pfn, end_pfn, nr_pages,
+                                       &released, &identity);
 
-                               identity += set_phys_range_identity(
-                                       start_pfn, end_pfn);
-                       }
                        start = end;
                }
        }
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index b095739..506fa08 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
 void xen_build_mfn_list_list(void);
 void xen_setup_machphys_mapping(void);
 pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
-void xen_ident_map_ISA(void);
 void xen_reserve_top(void);
 extern unsigned long xen_max_p2m_pfn;
 
-- 
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to