Re: [ 02/79] sparc32: vm_area_struct access for old Sun SPARCs.

2013-07-27 Thread Ben Hutchings
On Fri, 2013-07-26 at 13:46 -0700, Greg Kroah-Hartman wrote:
> 3.10-stable review patch.  If anyone has any objections, please let me know.
> 
> --
> 
> From: Olivier DANET 
[...]

This is missing the upstream reference.  It was commit
961246b4ed8da3bcf4ee1eb9147f341013553e3c.

Ben.

-- 
Ben Hutchings
Once a job is fouled up, anything done to improve it makes it worse.


signature.asc
Description: This is a digitally signed message part


Re: [ 02/79] sparc32: vm_area_struct access for old Sun SPARCs.

2013-07-27 Thread Ben Hutchings
On Fri, 2013-07-26 at 13:46 -0700, Greg Kroah-Hartman wrote:
 3.10-stable review patch.  If anyone has any objections, please let me know.
 
 --
 
 From: Olivier DANET oda...@caramail.com
[...]

This is missing the upstream reference.  It was commit
961246b4ed8da3bcf4ee1eb9147f341013553e3c.

Ben.

-- 
Ben Hutchings
Once a job is fouled up, anything done to improve it makes it worse.


signature.asc
Description: This is a digitally signed message part


[ 02/79] sparc32: vm_area_struct access for old Sun SPARCs.

2013-07-26 Thread Greg Kroah-Hartman
3.10-stable review patch.  If anyone has any objections, please let me know.

--

From: Olivier DANET 

Commit e4c6bfd2d79d063017ab19a18915f0bc759f32d9 ("mm: rearrange
vm_area_struct for fewer cache misses") changed the layout of the
vm_area_struct structure, it broke several SPARC32 assembly routines
which used numerical constants for accessing the vm_mm field.

This patch defines the VMA_VM_MM constant to replace the immediate values.

Signed-off-by: Olivier DANET 
Signed-off-by: David S. Miller 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/sparc/kernel/asm-offsets.c |2 ++
 arch/sparc/mm/hypersparc.S  |8 
 arch/sparc/mm/swift.S   |8 
 arch/sparc/mm/tsunami.S |6 +++---
 arch/sparc/mm/viking.S  |   10 +-
 5 files changed, 18 insertions(+), 16 deletions(-)

--- a/arch/sparc/kernel/asm-offsets.c
+++ b/arch/sparc/kernel/asm-offsets.c
@@ -49,6 +49,8 @@ int foo(void)
DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
BLANK();
DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
+   BLANK();
+   DEFINE(VMA_VM_MM,offsetof(struct vm_area_struct, vm_mm));
 
/* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
return 0;
--- a/arch/sparc/mm/hypersparc.S
+++ b/arch/sparc/mm/hypersparc.S
@@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out:
 
/* The things we do for performance... */
 hypersparc_flush_cache_range:
-   ld  [%o0 + 0x0], %o0/* XXX vma->vm_mm, GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
 #ifndef CONFIG_SMP
ld  [%o0 + AOFF_mm_context], %g1
cmp %g1, -1
@@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out:
 */
/* Verified, my ass... */
 hypersparc_flush_cache_page:
-   ld  [%o0 + 0x0], %o0/* XXX vma->vm_mm, GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
ld  [%o0 + AOFF_mm_context], %g2
 #ifndef CONFIG_SMP
cmp %g2, -1
@@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out:
 sta%g5, [%g1] ASI_M_MMUREGS
 
 hypersparc_flush_tlb_range:
-   ld  [%o0 + 0x00], %o0   /* XXX vma->vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld  [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
@@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out:
 sta%g5, [%g1] ASI_M_MMUREGS
 
 hypersparc_flush_tlb_page:
-   ld  [%o0 + 0x00], %o0   /* XXX vma->vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld  [%o0 + AOFF_mm_context], %o3
andn%o1, (PAGE_SIZE - 1), %o1
--- a/arch/sparc/mm/swift.S
+++ b/arch/sparc/mm/swift.S
@@ -105,7 +105,7 @@ swift_flush_cache_mm_out:
 
.globl  swift_flush_cache_range
 swift_flush_cache_range:
-   ld  [%o0 + 0x0], %o0/* XXX vma->vm_mm, GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
sub %o2, %o1, %o2
sethi   %hi(4096), %o3
cmp %o2, %o3
@@ -116,7 +116,7 @@ swift_flush_cache_range:
 
.globl  swift_flush_cache_page
 swift_flush_cache_page:
-   ld  [%o0 + 0x0], %o0/* XXX vma->vm_mm, GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
 70:
ld  [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -219,7 +219,7 @@ swift_flush_sig_insns:
.globl  swift_flush_tlb_range
.globl  swift_flush_tlb_all
 swift_flush_tlb_range:
-   ld  [%o0 + 0x00], %o0   /* XXX vma->vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
 swift_flush_tlb_mm:
ld  [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -233,7 +233,7 @@ swift_flush_tlb_all_out:
 
.globl  swift_flush_tlb_page
 swift_flush_tlb_page:
-   ld  [%o0 + 0x00], %o0   /* XXX vma->vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld  [%o0 + AOFF_mm_context], %o3
andn%o1, (PAGE_SIZE - 1), %o1
--- a/arch/sparc/mm/tsunami.S
+++ b/arch/sparc/mm/tsunami.S
@@ -24,7 +24,7 @@
/* Sliiick... */
 tsunami_flush_cache_page:
 tsunami_flush_cache_range:
-   ld  [%o0 + 0x0], %o0/* XXX vma->vm_mm, GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
 tsunami_flush_cache_mm:
ld  [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -46,7 +46,7 @@ tsunami_flush_sig_insns:
 
/* More slick stuff... */
 tsunami_flush_tlb_range:
-   ld  [%o0 + 0x00], %o0   /* XXX vma->vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
 tsunami_flush_tlb_mm:
ld  [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -65,7 +65,7 @@ tsunami_flush_tlb_out:
 
/* This one can be done in a fine grained manner... */
 tsunami_flush_tlb_page:
-   ld  [%o0 + 0x00], %o0   /* XXX vma->vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], 

[ 02/79] sparc32: vm_area_struct access for old Sun SPARCs.

2013-07-26 Thread Greg Kroah-Hartman
3.10-stable review patch.  If anyone has any objections, please let me know.

--

From: Olivier DANET oda...@caramail.com

Commit e4c6bfd2d79d063017ab19a18915f0bc759f32d9 (mm: rearrange
vm_area_struct for fewer cache misses) changed the layout of the
vm_area_struct structure, it broke several SPARC32 assembly routines
which used numerical constants for accessing the vm_mm field.

This patch defines the VMA_VM_MM constant to replace the immediate values.

Signed-off-by: Olivier DANET oda...@caramail.com
Signed-off-by: David S. Miller da...@davemloft.net
Signed-off-by: Greg Kroah-Hartman gre...@linuxfoundation.org
---
 arch/sparc/kernel/asm-offsets.c |2 ++
 arch/sparc/mm/hypersparc.S  |8 
 arch/sparc/mm/swift.S   |8 
 arch/sparc/mm/tsunami.S |6 +++---
 arch/sparc/mm/viking.S  |   10 +-
 5 files changed, 18 insertions(+), 16 deletions(-)

--- a/arch/sparc/kernel/asm-offsets.c
+++ b/arch/sparc/kernel/asm-offsets.c
@@ -49,6 +49,8 @@ int foo(void)
DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
BLANK();
DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
+   BLANK();
+   DEFINE(VMA_VM_MM,offsetof(struct vm_area_struct, vm_mm));
 
/* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE28); */
return 0;
--- a/arch/sparc/mm/hypersparc.S
+++ b/arch/sparc/mm/hypersparc.S
@@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out:
 
/* The things we do for performance... */
 hypersparc_flush_cache_range:
-   ld  [%o0 + 0x0], %o0/* XXX vma-vm_mm, GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
 #ifndef CONFIG_SMP
ld  [%o0 + AOFF_mm_context], %g1
cmp %g1, -1
@@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out:
 */
/* Verified, my ass... */
 hypersparc_flush_cache_page:
-   ld  [%o0 + 0x0], %o0/* XXX vma-vm_mm, GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
ld  [%o0 + AOFF_mm_context], %g2
 #ifndef CONFIG_SMP
cmp %g2, -1
@@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out:
 sta%g5, [%g1] ASI_M_MMUREGS
 
 hypersparc_flush_tlb_range:
-   ld  [%o0 + 0x00], %o0   /* XXX vma-vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld  [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
@@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out:
 sta%g5, [%g1] ASI_M_MMUREGS
 
 hypersparc_flush_tlb_page:
-   ld  [%o0 + 0x00], %o0   /* XXX vma-vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld  [%o0 + AOFF_mm_context], %o3
andn%o1, (PAGE_SIZE - 1), %o1
--- a/arch/sparc/mm/swift.S
+++ b/arch/sparc/mm/swift.S
@@ -105,7 +105,7 @@ swift_flush_cache_mm_out:
 
.globl  swift_flush_cache_range
 swift_flush_cache_range:
-   ld  [%o0 + 0x0], %o0/* XXX vma-vm_mm, GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
sub %o2, %o1, %o2
sethi   %hi(4096), %o3
cmp %o2, %o3
@@ -116,7 +116,7 @@ swift_flush_cache_range:
 
.globl  swift_flush_cache_page
 swift_flush_cache_page:
-   ld  [%o0 + 0x0], %o0/* XXX vma-vm_mm, GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
 70:
ld  [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -219,7 +219,7 @@ swift_flush_sig_insns:
.globl  swift_flush_tlb_range
.globl  swift_flush_tlb_all
 swift_flush_tlb_range:
-   ld  [%o0 + 0x00], %o0   /* XXX vma-vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
 swift_flush_tlb_mm:
ld  [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -233,7 +233,7 @@ swift_flush_tlb_all_out:
 
.globl  swift_flush_tlb_page
 swift_flush_tlb_page:
-   ld  [%o0 + 0x00], %o0   /* XXX vma-vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1
ld  [%o0 + AOFF_mm_context], %o3
andn%o1, (PAGE_SIZE - 1), %o1
--- a/arch/sparc/mm/tsunami.S
+++ b/arch/sparc/mm/tsunami.S
@@ -24,7 +24,7 @@
/* Sliiick... */
 tsunami_flush_cache_page:
 tsunami_flush_cache_range:
-   ld  [%o0 + 0x0], %o0/* XXX vma-vm_mm, GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
 tsunami_flush_cache_mm:
ld  [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -46,7 +46,7 @@ tsunami_flush_sig_insns:
 
/* More slick stuff... */
 tsunami_flush_tlb_range:
-   ld  [%o0 + 0x00], %o0   /* XXX vma-vm_mm GROSS XXX */
+   ld  [%o0 + VMA_VM_MM], %o0
 tsunami_flush_tlb_mm:
ld  [%o0 + AOFF_mm_context], %g2
cmp %g2, -1
@@ -65,7 +65,7 @@ tsunami_flush_tlb_out:
 
/* This one can be done in a fine grained manner... */
 tsunami_flush_tlb_page:
-   ld  [%o0 + 0x00], %o0