---
 arch/x86/kernel/setup.c     |   29 ---------------------
 arch/x86/mm/init.c          |   59 --------------------------------------------
 arch/x86/platform/efi/efi.c |   15 +----------
 3 files changed, 2 insertions(+), 101 deletions(-)

Index: linux-yinghai/arch/x86/kernel/setup.c
===================================================================
--- linux-yinghai.orig/arch/x86/kernel/setup.c
+++ linux-yinghai/arch/x86/kernel/setup.c
@@ -921,35 +921,6 @@ void __init setup_arch(char **cmdline_p)
 
 	init_mem_mapping();
 
-<<<<<<< HEAD
-	/* max_pfn_mapped is updated here */
-	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
-	max_pfn_mapped = max_low_pfn_mapped;
-
-#ifdef CONFIG_X86_64
-	if (max_pfn > max_low_pfn) {
-		int i;
-		unsigned long start, end;
-		unsigned long start_pfn, end_pfn;
-
-		for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn,
-							 NULL) {
-
-			end = PFN_PHYS(end_pfn);
-			if (end <= (1UL<<32))
-				continue;
-
-			start = PFN_PHYS(start_pfn);
-			max_pfn_mapped = init_memory_mapping(
-						max((1UL<<32), start), end);
-		}
-
-		/* can we preseve max_low_pfn ?*/
-		max_low_pfn = max_pfn;
-	}
-#endif
-=======
->>>>>>> for-x86-mm
 	memblock.current_limit = get_max_mapped();
 	dma_contiguous_reserve(0);
 
Index: linux-yinghai/arch/x86/mm/init.c
===================================================================
--- linux-yinghai.orig/arch/x86/mm/init.c
+++ linux-yinghai/arch/x86/mm/init.c
@@ -26,46 +26,6 @@ static unsigned long __initdata pgt_buf_
 static unsigned long min_pfn_mapped;
 
 /*
-<<<<<<< HEAD
- * First calculate space needed for kernel direct mapping page tables to cover
- * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
- * pages. Then find enough contiguous space for those page tables.
- */
-static void __init find_early_table_space(struct map_range *mr, int nr_range)
-{
-	int i;
-	unsigned long puds = 0, pmds = 0, ptes = 0, tables;
-	unsigned long start = 0, good_end;
-	phys_addr_t base;
-
-	for (i = 0; i < nr_range; i++) {
-		unsigned long range, extra;
-
-		range = mr[i].end - mr[i].start;
-		puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
-
-		if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
-			extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
-			pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
-		} else {
-			pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
-		}
-
-		if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
-			extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
-#ifdef CONFIG_X86_32
-			extra += PMD_SIZE;
-#endif
-			ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
-		} else {
-			ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
-		}
-	}
-
-	tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
-	tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
-	tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
-=======
  * Pages returned are already directly mapped.
  *
  * Changing that is likely to break Xen, see commit
@@ -110,7 +70,6 @@ __ref void *alloc_low_pages(unsigned int
 
 	return __va(pfn << PAGE_SHIFT);
 }
->>>>>>> for-x86-mm
 
 /* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */
 #define INIT_PGT_BUF_SIZE	(5 * PAGE_SIZE)
@@ -135,11 +94,6 @@ int direct_gbpages
 #endif
 ;
 
-<<<<<<< HEAD
-	printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
-		mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
-		(pgt_buf_top << PAGE_SHIFT) - 1);
-=======
 static void __init init_gbpages(void)
 {
 #ifdef CONFIG_X86_64
@@ -148,7 +102,6 @@ static void __init init_gbpages(void)
 	else
 		direct_gbpages = 0;
 #endif
->>>>>>> for-x86-mm
 }
 
 struct map_range {
@@ -340,17 +293,6 @@ static int __meminit split_mem_range(str
 			(mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
 			 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
 
-<<<<<<< HEAD
-	/*
-	 * Find space for the kernel direct mapping tables.
-	 *
-	 * Later we should allocate these tables in the local node of the
-	 * memory mapped. Unfortunately this is done currently before the
-	 * nodes are discovered.
-	 */
-	if (!after_bootmem)
-		find_early_table_space(mr, nr_range);
-=======
 	return nr_range;
 }
 
@@ -399,7 +341,6 @@ unsigned long __init_refok init_memory_m
 	memset(mr, 0, sizeof(mr));
 	nr_range = 0;
 	nr_range = split_mem_range(mr, nr_range, start, end);
->>>>>>> for-x86-mm
 
 	for (i = 0; i < nr_range; i++)
 		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
Index: linux-yinghai/arch/x86/platform/efi/efi.c
===================================================================
--- linux-yinghai.orig/arch/x86/platform/efi/efi.c
+++ linux-yinghai/arch/x86/platform/efi/efi.c
@@ -835,11 +835,7 @@ void __init efi_enter_virtual_mode(void)
 	efi_memory_desc_t *md, *prev_md = NULL;
 	efi_status_t status;
 	unsigned long size;
-<<<<<<< HEAD
-	u64 end, systab, end_pfn;
-=======
-	u64 end, systab, addr, npages, start_pfn, end_pfn;
->>>>>>> for-x86-mm
+	u64 end, systab, start_pfn, end_pfn;
 	void *p, *va, *new_memmap = NULL;
 	int count = 0;
 
@@ -894,14 +890,7 @@ void __init efi_enter_virtual_mode(void)
 
 		start_pfn = PFN_DOWN(md->phys_addr);
 		end_pfn = PFN_UP(end);
-<<<<<<< HEAD
-		if (end_pfn <= max_low_pfn_mapped
-		    || (end_pfn > (1UL << (32 - PAGE_SHIFT))
-			&& end_pfn <= max_pfn_mapped)) {
-=======
-
-		if (pfn_range_is_mapped(start_pfn, end_pfn))
->>>>>>> for-x86-mm
+		if (pfn_range_is_mapped(start_pfn, end_pfn)) {
 			va = __va(md->phys_addr);
 
 			if (!(md->attribute & EFI_MEMORY_WB))
