Re: [PATCH] [3/9] GBPAGES: Split LARGE_PAGE_SIZE/MASK into PUD_PAGE_SIZE/PMD_PAGE_SIZE
On Tue, 29 Jan 2008, Andi Kleen wrote: > Split the existing LARGE_PAGE_SIZE/MASK macro into two new macros > PUD_PAGE_SIZE/MASK and PMD_PAGE_SIZE/MASK. This is not splitting anything. It adds PUD_PAGE_xxx and renames LARGE_PAGE_XXX to PMD_PAGE_XXX. Please split the patch into one, which does the rename and one which adds the new defines. > > +/* Eventually 32bit should be moved over to the new names too */ > +#define LARGE_PAGE_SIZE PMD_PAGE_SIZE > +#define LARGE_PAGE_MASK PMD_PAGE_MASK Grep tells, that there is no remaining user of LARGE_PAGE_XXX, so this is useless. Even if there would be users we'd better fix them right away. Thanks, tglx -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH] [3/9] GBPAGES: Split LARGE_PAGE_SIZE/MASK into PUD_PAGE_SIZE/PMD_PAGE_SIZE
On Tue, 29 Jan 2008, Andi Kleen wrote: Split the existing LARGE_PAGE_SIZE/MASK macro into two new macros PUD_PAGE_SIZE/MASK and PMD_PAGE_SIZE/MASK. This is not splitting anything. It adds PUD_PAGE_xxx and renames LARGE_PAGE_XXX to PMD_PAGE_XXX. Please split the patch into one, which does the rename and one which adds the new defines. +/* Eventually 32bit should be moved over to the new names too */ +#define LARGE_PAGE_SIZE PMD_PAGE_SIZE +#define LARGE_PAGE_MASK PMD_PAGE_MASK Grep tells, that there is no remaining user of LARGE_PAGE_XXX, so this is useless. Even if there would be users we'd better fix them right away. Thanks, tglx -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
[PATCH] [3/9] GBPAGES: Split LARGE_PAGE_SIZE/MASK into PUD_PAGE_SIZE/PMD_PAGE_SIZE
Split the existing LARGE_PAGE_SIZE/MASK macro into two new macros PUD_PAGE_SIZE/MASK and PMD_PAGE_SIZE/MASK. Fix up all callers to use the new names. Signed-off-by: Andi Kleen <[EMAIL PROTECTED]> --- arch/x86/boot/compressed/head_64.S |8 arch/x86/kernel/head_64.S |4 ++-- arch/x86/kernel/pci-gart_64.c |2 +- arch/x86/mm/init_64.c |6 +++--- arch/x86/mm/pageattr.c |2 +- include/asm-x86/page.h |4 ++-- include/asm-x86/page_32.h |4 include/asm-x86/page_64.h |3 +++ 8 files changed, 20 insertions(+), 13 deletions(-) Index: linux/include/asm-x86/page_64.h === --- linux.orig/include/asm-x86/page_64.h +++ linux/include/asm-x86/page_64.h @@ -23,6 +23,9 @@ #define MCE_STACK 5 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) +#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) + #define __PAGE_OFFSET _AC(0x8100, UL) #define __PHYSICAL_START CONFIG_PHYSICAL_START Index: linux/arch/x86/boot/compressed/head_64.S === --- linux.orig/arch/x86/boot/compressed/head_64.S +++ linux/arch/x86/boot/compressed/head_64.S @@ -80,8 +80,8 @@ startup_32: #ifdef CONFIG_RELOCATABLE movl%ebp, %ebx - addl$(LARGE_PAGE_SIZE -1), %ebx - andl$LARGE_PAGE_MASK, %ebx + addl$(PMD_PAGE_SIZE -1), %ebx + andl$PMD_PAGE_MASK, %ebx #else movl$CONFIG_PHYSICAL_START, %ebx #endif @@ -220,8 +220,8 @@ ENTRY(startup_64) /* Start with the delta to where the kernel will run at. */ #ifdef CONFIG_RELOCATABLE leaqstartup_32(%rip) /* - $startup_32 */, %rbp - addq$(LARGE_PAGE_SIZE - 1), %rbp - andq$LARGE_PAGE_MASK, %rbp + addq$(PMD_PAGE_SIZE - 1), %rbp + andq$PMD_PAGE_MASK, %rbp movq%rbp, %rbx #else movq$CONFIG_PHYSICAL_START, %rbp Index: linux/arch/x86/kernel/pci-gart_64.c === --- linux.orig/arch/x86/kernel/pci-gart_64.c +++ linux/arch/x86/kernel/pci-gart_64.c @@ -501,7 +501,7 @@ static __init unsigned long check_iommu_ } a = aper + iommu_size; - iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a; + iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; if (iommu_size < 64*1024*1024) { printk(KERN_WARNING Index: linux/arch/x86/kernel/head_64.S === --- linux.orig/arch/x86/kernel/head_64.S +++ linux/arch/x86/kernel/head_64.S @@ -63,7 +63,7 @@ startup_64: /* Is the address not 2M aligned? */ movq%rbp, %rax - andl$~LARGE_PAGE_MASK, %eax + andl$~PMD_PAGE_MASK, %eax testl %eax, %eax jnz bad_address @@ -88,7 +88,7 @@ startup_64: /* Add an Identity mapping if I am above 1G */ leaq_text(%rip), %rdi - andq$LARGE_PAGE_MASK, %rdi + andq$PMD_PAGE_MASK, %rdi movq%rdi, %rax shrq$PUD_SHIFT, %rax Index: linux/arch/x86/mm/init_64.c === --- linux.orig/arch/x86/mm/init_64.c +++ linux/arch/x86/mm/init_64.c @@ -443,10 +443,10 @@ __clear_kernel_mapping(unsigned long add { unsigned long end = address + size; - BUG_ON(address & ~LARGE_PAGE_MASK); - BUG_ON(size & ~LARGE_PAGE_MASK); + BUG_ON(address & ~PMD_PAGE_MASK); + BUG_ON(size & ~PMD_PAGE_MASK); - for (; address < end; address += LARGE_PAGE_SIZE) { + for (; address < end; address += PMD_PAGE_SIZE) { pgd_t *pgd = pgd_offset_k(address); pud_t *pud; pmd_t *pmd; Index: linux/include/asm-x86/page_32.h === --- linux.orig/include/asm-x86/page_32.h +++ linux/include/asm-x86/page_32.h @@ -13,6 +13,10 @@ */ #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) +/* Eventually 32bit should be moved over to the new names too */ +#define LARGE_PAGE_SIZE PMD_PAGE_SIZE +#define LARGE_PAGE_MASK PMD_PAGE_MASK + #ifdef CONFIG_X86_PAE #define __PHYSICAL_MASK_SHIFT 36 #define __VIRTUAL_MASK_SHIFT 32 Index: linux/include/asm-x86/page.h === --- linux.orig/include/asm-x86/page.h +++ linux/include/asm-x86/page.h @@ -13,8 +13,8 @@ #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK) #define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK)) -#define LARGE_PAGE_SIZE(_AC(1,UL) << PMD_SHIFT) -#define LARGE_PAGE_MASK(~(LARGE_PAGE_SIZE-1)) +#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) +#define PMD_PAGE_MASK
[PATCH] [3/9] GBPAGES: Split LARGE_PAGE_SIZE/MASK into PUD_PAGE_SIZE/PMD_PAGE_SIZE
Split the existing LARGE_PAGE_SIZE/MASK macro into two new macros PUD_PAGE_SIZE/MASK and PMD_PAGE_SIZE/MASK. Fix up all callers to use the new names. Signed-off-by: Andi Kleen [EMAIL PROTECTED] --- arch/x86/boot/compressed/head_64.S |8 arch/x86/kernel/head_64.S |4 ++-- arch/x86/kernel/pci-gart_64.c |2 +- arch/x86/mm/init_64.c |6 +++--- arch/x86/mm/pageattr.c |2 +- include/asm-x86/page.h |4 ++-- include/asm-x86/page_32.h |4 include/asm-x86/page_64.h |3 +++ 8 files changed, 20 insertions(+), 13 deletions(-) Index: linux/include/asm-x86/page_64.h === --- linux.orig/include/asm-x86/page_64.h +++ linux/include/asm-x86/page_64.h @@ -23,6 +23,9 @@ #define MCE_STACK 5 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#define PUD_PAGE_SIZE (_AC(1, UL) PUD_SHIFT) +#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) + #define __PAGE_OFFSET _AC(0x8100, UL) #define __PHYSICAL_START CONFIG_PHYSICAL_START Index: linux/arch/x86/boot/compressed/head_64.S === --- linux.orig/arch/x86/boot/compressed/head_64.S +++ linux/arch/x86/boot/compressed/head_64.S @@ -80,8 +80,8 @@ startup_32: #ifdef CONFIG_RELOCATABLE movl%ebp, %ebx - addl$(LARGE_PAGE_SIZE -1), %ebx - andl$LARGE_PAGE_MASK, %ebx + addl$(PMD_PAGE_SIZE -1), %ebx + andl$PMD_PAGE_MASK, %ebx #else movl$CONFIG_PHYSICAL_START, %ebx #endif @@ -220,8 +220,8 @@ ENTRY(startup_64) /* Start with the delta to where the kernel will run at. */ #ifdef CONFIG_RELOCATABLE leaqstartup_32(%rip) /* - $startup_32 */, %rbp - addq$(LARGE_PAGE_SIZE - 1), %rbp - andq$LARGE_PAGE_MASK, %rbp + addq$(PMD_PAGE_SIZE - 1), %rbp + andq$PMD_PAGE_MASK, %rbp movq%rbp, %rbx #else movq$CONFIG_PHYSICAL_START, %rbp Index: linux/arch/x86/kernel/pci-gart_64.c === --- linux.orig/arch/x86/kernel/pci-gart_64.c +++ linux/arch/x86/kernel/pci-gart_64.c @@ -501,7 +501,7 @@ static __init unsigned long check_iommu_ } a = aper + iommu_size; - iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a; + iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; if (iommu_size 64*1024*1024) { printk(KERN_WARNING Index: linux/arch/x86/kernel/head_64.S === --- linux.orig/arch/x86/kernel/head_64.S +++ linux/arch/x86/kernel/head_64.S @@ -63,7 +63,7 @@ startup_64: /* Is the address not 2M aligned? */ movq%rbp, %rax - andl$~LARGE_PAGE_MASK, %eax + andl$~PMD_PAGE_MASK, %eax testl %eax, %eax jnz bad_address @@ -88,7 +88,7 @@ startup_64: /* Add an Identity mapping if I am above 1G */ leaq_text(%rip), %rdi - andq$LARGE_PAGE_MASK, %rdi + andq$PMD_PAGE_MASK, %rdi movq%rdi, %rax shrq$PUD_SHIFT, %rax Index: linux/arch/x86/mm/init_64.c === --- linux.orig/arch/x86/mm/init_64.c +++ linux/arch/x86/mm/init_64.c @@ -443,10 +443,10 @@ __clear_kernel_mapping(unsigned long add { unsigned long end = address + size; - BUG_ON(address ~LARGE_PAGE_MASK); - BUG_ON(size ~LARGE_PAGE_MASK); + BUG_ON(address ~PMD_PAGE_MASK); + BUG_ON(size ~PMD_PAGE_MASK); - for (; address end; address += LARGE_PAGE_SIZE) { + for (; address end; address += PMD_PAGE_SIZE) { pgd_t *pgd = pgd_offset_k(address); pud_t *pud; pmd_t *pmd; Index: linux/include/asm-x86/page_32.h === --- linux.orig/include/asm-x86/page_32.h +++ linux/include/asm-x86/page_32.h @@ -13,6 +13,10 @@ */ #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) +/* Eventually 32bit should be moved over to the new names too */ +#define LARGE_PAGE_SIZE PMD_PAGE_SIZE +#define LARGE_PAGE_MASK PMD_PAGE_MASK + #ifdef CONFIG_X86_PAE #define __PHYSICAL_MASK_SHIFT 36 #define __VIRTUAL_MASK_SHIFT 32 Index: linux/include/asm-x86/page.h === --- linux.orig/include/asm-x86/page.h +++ linux/include/asm-x86/page.h @@ -13,8 +13,8 @@ #define PHYSICAL_PAGE_MASK (PAGE_MASK __PHYSICAL_MASK) #define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK)) -#define LARGE_PAGE_SIZE(_AC(1,UL) PMD_SHIFT) -#define LARGE_PAGE_MASK(~(LARGE_PAGE_SIZE-1)) +#define PMD_PAGE_SIZE (_AC(1, UL) PMD_SHIFT) +#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) #define