[PATCH -v3 10/14] x86: Fixup code testing if a pfn is direct mapped

2012-09-04 Thread Yinghai Lu
From: Jacob Shin 

Update code that previously assumed pfns [ 0 - max_low_pfn_mapped ) and
[ 4GB - max_pfn_mapped ) were always direct mapped, to now look up
pfn_mapped ranges instead.


-v2: change applying sequence to keep git bisecting working.
 so add dummy pfn_range_is_mapped(). - Yinghai Lu

Signed-off-by: Jacob Shin 
Signed-off-by: Yinghai Lu 
---
 arch/x86/include/asm/page_types.h |8 
 arch/x86/kernel/cpu/amd.c |8 +++-
 arch/x86/platform/efi/efi.c   |8 
 3 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/page_types.h 
b/arch/x86/include/asm/page_types.h
index e21fdd1..45aae6e 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -51,6 +51,14 @@ static inline phys_addr_t get_max_mapped(void)
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
 }
 
+static inline bool pfn_range_is_mapped(unsigned long start_pfn,
+   unsigned long end_pfn)
+{
+   return end_pfn <= max_low_pfn_mapped ||
+  (end_pfn > (1UL << (32 - PAGE_SHIFT)) &&
+   end_pfn <= max_pfn_mapped);
+}
+
 extern unsigned long init_memory_mapping(unsigned long start,
 unsigned long end);
 
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 9d92e19..4235553 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -676,12 +676,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 * benefit in doing so.
 */
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, )) {
+   unsigned long pfn = tseg >> PAGE_SHIFT;
+
printk(KERN_DEBUG "tseg: %010llx\n", tseg);
-   if ((tseg>>PMD_SHIFT) <
-   (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
-   ((tseg>>PMD_SHIFT) <
-   (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
-   (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT
+   if (pfn_range_is_mapped(pfn, pfn + 1))
set_memory_4k((unsigned long)__va(tseg), 1);
}
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 92660eda..f1facde 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -776,7 +776,7 @@ void __init efi_enter_virtual_mode(void)
efi_memory_desc_t *md, *prev_md = NULL;
efi_status_t status;
unsigned long size;
-   u64 end, systab, addr, npages, end_pfn;
+   u64 end, systab, addr, npages, start_pfn, end_pfn;
void *p, *va, *new_memmap = NULL;
int count = 0;
 
@@ -827,10 +827,10 @@ void __init efi_enter_virtual_mode(void)
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
 
+   start_pfn = PFN_DOWN(md->phys_addr);
end_pfn = PFN_UP(end);
-   if (end_pfn <= max_low_pfn_mapped
-   || (end_pfn > (1UL << (32 - PAGE_SHIFT))
-   && end_pfn <= max_pfn_mapped))
+
+   if (pfn_range_is_mapped(start_pfn, end_pfn))
va = __va(md->phys_addr);
else
va = efi_ioremap(md->phys_addr, size, md->type);
-- 
1.7.7

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH -v3 10/14] x86: Fixup code testing if a pfn is direct mapped

2012-09-04 Thread Yinghai Lu
From: Jacob Shin jacob.s...@amd.com

Update code that previously assumed pfns [ 0 - max_low_pfn_mapped ) and
[ 4GB - max_pfn_mapped ) were always direct mapped, to now look up
pfn_mapped ranges instead.


-v2: change applying sequence to keep git bisecting working.
 so add dummy pfn_range_is_mapped(). - Yinghai Lu

Signed-off-by: Jacob Shin jacob.s...@amd.com
Signed-off-by: Yinghai Lu ying...@kernel.org
---
 arch/x86/include/asm/page_types.h |8 
 arch/x86/kernel/cpu/amd.c |8 +++-
 arch/x86/platform/efi/efi.c   |8 
 3 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/page_types.h 
b/arch/x86/include/asm/page_types.h
index e21fdd1..45aae6e 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -51,6 +51,14 @@ static inline phys_addr_t get_max_mapped(void)
return (phys_addr_t)max_pfn_mapped  PAGE_SHIFT;
 }
 
+static inline bool pfn_range_is_mapped(unsigned long start_pfn,
+   unsigned long end_pfn)
+{
+   return end_pfn = max_low_pfn_mapped ||
+  (end_pfn  (1UL  (32 - PAGE_SHIFT)) 
+   end_pfn = max_pfn_mapped);
+}
+
 extern unsigned long init_memory_mapping(unsigned long start,
 unsigned long end);
 
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 9d92e19..4235553 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -676,12 +676,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 * benefit in doing so.
 */
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, tseg)) {
+   unsigned long pfn = tseg  PAGE_SHIFT;
+
printk(KERN_DEBUG tseg: %010llx\n, tseg);
-   if ((tsegPMD_SHIFT) 
-   (max_low_pfn_mapped(PMD_SHIFT-PAGE_SHIFT)) ||
-   ((tsegPMD_SHIFT) 
-   (max_pfn_mapped(PMD_SHIFT-PAGE_SHIFT)) 
-   (tsegPMD_SHIFT) = (1ULL(32 - PMD_SHIFT
+   if (pfn_range_is_mapped(pfn, pfn + 1))
set_memory_4k((unsigned long)__va(tseg), 1);
}
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 92660eda..f1facde 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -776,7 +776,7 @@ void __init efi_enter_virtual_mode(void)
efi_memory_desc_t *md, *prev_md = NULL;
efi_status_t status;
unsigned long size;
-   u64 end, systab, addr, npages, end_pfn;
+   u64 end, systab, addr, npages, start_pfn, end_pfn;
void *p, *va, *new_memmap = NULL;
int count = 0;
 
@@ -827,10 +827,10 @@ void __init efi_enter_virtual_mode(void)
size = md-num_pages  EFI_PAGE_SHIFT;
end = md-phys_addr + size;
 
+   start_pfn = PFN_DOWN(md-phys_addr);
end_pfn = PFN_UP(end);
-   if (end_pfn = max_low_pfn_mapped
-   || (end_pfn  (1UL  (32 - PAGE_SHIFT))
-end_pfn = max_pfn_mapped))
+
+   if (pfn_range_is_mapped(start_pfn, end_pfn))
va = __va(md-phys_addr);
else
va = efi_ioremap(md-phys_addr, size, md-type);
-- 
1.7.7

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/