Use LOAD_REG_IMMEDIATE macros

This patch changes all LOAD_REG_ADDR macro calls to LOAD_REG_IMMEDIATE
to make sure that we load the correct address.

Signed-off-by: Mohan Kumar M <[EMAIL PROTECTED]>
---
 arch/powerpc/kernel/entry_64.S          |    4 ++--
 arch/powerpc/mm/hash_low_64.S           |    8 ++++----
 arch/powerpc/mm/slb_low.S               |    2 +-
 arch/powerpc/platforms/pseries/hvCall.S |    2 +-
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 12eb95a..37e5d95 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -714,7 +714,7 @@ _GLOBAL(enter_rtas)
         std	r6,PACASAVEDMSR(r13)
 
 	/* Setup our real return addr */	
-	LOAD_REG_ADDR(r4,.rtas_return_loc)
+	LOAD_REG_IMMEDIATE(r4,.rtas_return_loc)
 	clrldi	r4,r4,2			/* convert to realmode address */
        	mtlr	r4
 
@@ -730,7 +730,7 @@ _GLOBAL(enter_rtas)
 	sync				/* disable interrupts so SRR0/1 */
 	mtmsrd	r0			/* don't get trashed */
 
-	LOAD_REG_ADDR(r4, rtas)
+	LOAD_REG_IMMEDIATE(r4, rtas)
 	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
 	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
 	
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index a719f53..e9ba872 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -83,7 +83,7 @@ _GLOBAL(__hash_page_4K)
 	std	r29,STK_REG(r29)(r1)
 	std	r30,STK_REG(r30)(r1)
 	std	r31,STK_REG(r31)(r1)
-	
+
 	/* Step 1:
 	 *
 	 * Check permissions, atomically mark the linux PTE busy
@@ -168,7 +168,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
 	std	r3,STK_PARM(r4)(r1)
 
 	/* Get htab_hash_mask */
-	ld	r4,[EMAIL PROTECTED](2)
+	LOAD_REG_IMMEDIATE(r4, htab_hash_mask)
 	ld	r27,0(r4)	/* htab_hash_mask -> r27 */
 
 	/* Check if we may already be in the hashtable, in this case, we
@@ -461,7 +461,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
 	std	r3,STK_PARM(r4)(r1)
 
 	/* Get htab_hash_mask */
-	ld	r4,[EMAIL PROTECTED](2)
+	LOAD_REG_IMMEDIATE(r4, htab_hash_mask)
 	ld	r27,0(r4)	/* htab_hash_mask -> r27 */
 
 	/* Check if we may already be in the hashtable, in this case, we
@@ -792,7 +792,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
 	std	r3,STK_PARM(r4)(r1)
 
 	/* Get htab_hash_mask */
-	ld	r4,[EMAIL PROTECTED](2)
+	LOAD_REG_IMMEDIATE(r4, htab_hash_mask)
 	ld	r27,0(r4)	/* htab_hash_mask -> r27 */
 
 	/* Check if we may already be in the hashtable, in this case, we
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index bc44dc4..502099e 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -128,7 +128,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
 	/* Now get to the array and obtain the sllp
 	 */
 	ld	r11,PACATOC(r13)
-	ld	r11,[EMAIL PROTECTED](r11)
+	LOAD_REG_IMMEDIATE(r11, mmu_psize_defs)
 	add	r11,r11,r9
 	ld	r11,MMUPSIZESLLP(r11)
 	ori	r11,r11,SLB_VSID_USER
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index c1427b3..43c18f0 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -55,7 +55,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_PURR);				\
 	/* calculate address of stat structure r4 = opcode */	\
 	srdi	r4,r4,2;		/* index into array */	\
 	mulli	r4,r4,HCALL_STAT_SIZE;				\
-	LOAD_REG_ADDR(r7, per_cpu__hcall_stats);		\
+	LOAD_REG_IMMEDIATE(r7, per_cpu__hcall_stats);		\
 	add	r4,r4,r7;					\
 	ld	r7,PACA_DATA_OFFSET(r13); /* per cpu offset */	\
 	add	r4,r4,r7;					\
-- 
1.5.4

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev

Reply via email to