We should prefer accessing CSRs using their CSR numbers because:
1. It compiles fine with older toolchains.
2. We can use latest CSR names in #define macro names of CSR numbers
   as-per RISC-V spec.
3. We can access newly added CSRs even if toolchain does not recognize
   newly addes CSRs by name.

Signed-off-by: Anup Patel <anup.pa...@wdc.com>
---
 arch/riscv/include/asm/csr.h         | 15 ++++++++-------
 arch/riscv/include/asm/encoding.h    | 16 ++++++++++++++++
 arch/riscv/include/asm/irqflags.h    | 10 +++++-----
 arch/riscv/include/asm/mmu_context.h |  7 +------
 arch/riscv/kernel/entry.S            | 22 +++++++++++-----------
 arch/riscv/kernel/head.S             | 12 ++++++------
 arch/riscv/kernel/perf_event.c       |  4 ++--
 arch/riscv/kernel/smp.c              |  2 +-
 arch/riscv/kernel/traps.c            |  6 +++---
 arch/riscv/mm/fault.c                |  6 +-----
 10 files changed, 54 insertions(+), 46 deletions(-)

diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 8cf698e39463..6bf5652d3565 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -14,6 +14,7 @@
 #ifndef _ASM_RISCV_CSR_H
 #define _ASM_RISCV_CSR_H
 
+#include <asm/asm.h>
 #include <asm/encoding.h>
 
 #ifndef __ASSEMBLY__
@@ -21,7 +22,7 @@
 #define csr_swap(csr, val)                                     \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrrw %0, " #csr ", %1"          \
+       __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
                              : "=r" (__v) : "rK" (__v)         \
                              : "memory");                      \
        __v;                                                    \
@@ -30,7 +31,7 @@
 #define csr_read(csr)                                          \
 ({                                                             \
        register unsigned long __v;                             \
-       __asm__ __volatile__ ("csrr %0, " #csr                  \
+       __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr)        \
                              : "=r" (__v) :                    \
                              : "memory");                      \
        __v;                                                    \
@@ -39,7 +40,7 @@
 #define csr_write(csr, val)                                    \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrw " #csr ", %0"               \
+       __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0"     \
                              : : "rK" (__v)                    \
                              : "memory");                      \
 })
@@ -47,7 +48,7 @@
 #define csr_read_set(csr, val)                                 \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrrs %0, " #csr ", %1"          \
+       __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
                              : "=r" (__v) : "rK" (__v)         \
                              : "memory");                      \
        __v;                                                    \
@@ -56,7 +57,7 @@
 #define csr_set(csr, val)                                      \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrs " #csr ", %0"               \
+       __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0"     \
                              : : "rK" (__v)                    \
                              : "memory");                      \
 })
@@ -64,7 +65,7 @@
 #define csr_read_clear(csr, val)                               \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrrc %0, " #csr ", %1"          \
+       __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
                              : "=r" (__v) : "rK" (__v)         \
                              : "memory");                      \
        __v;                                                    \
@@ -73,7 +74,7 @@
 #define csr_clear(csr, val)                                    \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrc " #csr ", %0"               \
+       __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0"     \
                              : : "rK" (__v)                    \
                              : "memory");                      \
 })
diff --git a/arch/riscv/include/asm/encoding.h 
b/arch/riscv/include/asm/encoding.h
index 4f187854fd8b..717b823ac110 100644
--- a/arch/riscv/include/asm/encoding.h
+++ b/arch/riscv/include/asm/encoding.h
@@ -76,4 +76,20 @@
 #define SIE_STIE               (_AC(0x1, UL) << IRQ_S_TIMER)
 #define SIE_SEIE               (_AC(0x1, UL) << IRQ_S_EXT)
 
+#define CSR_TIME               0xc01
+#define CSR_INSTRET            0xc02
+#define CSR_SSTATUS            0x100
+#define CSR_SIE                        0x104
+#define CSR_STVEC              0x105
+#define CSR_SCOUNTEREN         0x106
+#define CSR_SSCRATCH           0x140
+#define CSR_SEPC               0x141
+#define CSR_SCAUSE             0x142
+#define CSR_STVAL              0x143
+#define CSR_SIP                        0x144
+#define CSR_SATP               0x180
+#define CSR_CYCLEH             0xc80
+#define CSR_TIMEH              0xc81
+#define CSR_INSTRETH           0xc82
+
 #endif /* _ASM_RISCV_ENCODING_H */
diff --git a/arch/riscv/include/asm/irqflags.h 
b/arch/riscv/include/asm/irqflags.h
index 07a3c6d5706f..1a69b3bcd371 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -21,25 +21,25 @@
 /* read interrupt enabled status */
 static inline unsigned long arch_local_save_flags(void)
 {
-       return csr_read(sstatus);
+       return csr_read(CSR_SSTATUS);
 }
 
 /* unconditionally enable interrupts */
 static inline void arch_local_irq_enable(void)
 {
-       csr_set(sstatus, SR_SIE);
+       csr_set(CSR_SSTATUS, SR_SIE);
 }
 
 /* unconditionally disable interrupts */
 static inline void arch_local_irq_disable(void)
 {
-       csr_clear(sstatus, SR_SIE);
+       csr_clear(CSR_SSTATUS, SR_SIE);
 }
 
 /* get status and disable interrupts */
 static inline unsigned long arch_local_irq_save(void)
 {
-       return csr_read_clear(sstatus, SR_SIE);
+       return csr_read_clear(CSR_SSTATUS, SR_SIE);
 }
 
 /* test flags */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
 /* set interrupt enabled status */
 static inline void arch_local_irq_restore(unsigned long flags)
 {
-       csr_set(sstatus, flags & SR_SIE);
+       csr_set(CSR_SSTATUS, flags & SR_SIE);
 }
 
 #endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/mmu_context.h 
b/arch/riscv/include/asm/mmu_context.h
index 336d60ec5698..98c76c821367 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -83,12 +83,7 @@ static inline void switch_mm(struct mm_struct *prev,
                cpumask_clear_cpu(cpu, mm_cpumask(prev));
                cpumask_set_cpu(cpu, mm_cpumask(next));
 
-               /*
-                * Use the old spbtr name instead of using the current satp
-                * name to support binutils 2.29 which doesn't know about the
-                * privileged ISA 1.10 yet.
-                */
-               csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
+               csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
                local_flush_tlb_all();
 
                flush_icache_deferred(next);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index fd9b57c8b4ce..1c1ecc238cfa 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -37,11 +37,11 @@
         * the kernel thread pointer.  If we came from the kernel, sscratch
         * will contain 0, and we should continue on the current TP.
         */
-       csrrw tp, sscratch, tp
+       csrrw tp, CSR_SSCRATCH, tp
        bnez tp, _save_context
 
 _restore_kernel_tpsp:
-       csrr tp, sscratch
+       csrr tp, CSR_SSCRATCH
        REG_S sp, TASK_TI_KERNEL_SP(tp)
 _save_context:
        REG_S sp, TASK_TI_USER_SP(tp)
@@ -87,11 +87,11 @@ _save_context:
        li t0, SR_SUM | SR_FS
 
        REG_L s0, TASK_TI_USER_SP(tp)
-       csrrc s1, sstatus, t0
-       csrr s2, sepc
-       csrr s3, sbadaddr
-       csrr s4, scause
-       csrr s5, sscratch
+       csrrc s1, CSR_SSTATUS, t0
+       csrr s2, CSR_SEPC
+       csrr s3, CSR_STVAL
+       csrr s4, CSR_SCAUSE
+       csrr s5, CSR_SSCRATCH
        REG_S s0, PT_SP(sp)
        REG_S s1, PT_SSTATUS(sp)
        REG_S s2, PT_SEPC(sp)
@@ -107,8 +107,8 @@ _save_context:
        .macro RESTORE_ALL
        REG_L a0, PT_SSTATUS(sp)
        REG_L a2, PT_SEPC(sp)
-       csrw sstatus, a0
-       csrw sepc, a2
+       csrw CSR_SSTATUS, a0
+       csrw CSR_SEPC, a2
 
        REG_L x1,  PT_RA(sp)
        REG_L x3,  PT_GP(sp)
@@ -155,7 +155,7 @@ ENTRY(handle_exception)
         * Set sscratch register to 0, so that if a recursive exception
         * occurs, the exception vector knows it came from the kernel
         */
-       csrw sscratch, x0
+       csrw CSR_SSCRATCH, x0
 
        /* Load the global pointer */
 .option push
@@ -248,7 +248,7 @@ resume_userspace:
         * Save TP into sscratch, so we can find the kernel data structures
         * again.
         */
-       csrw sscratch, tp
+       csrw CSR_SSCRATCH, tp
 
 restore_all:
        RESTORE_ALL
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index fe884cd69abd..041492636b45 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -23,7 +23,7 @@
 __INIT
 ENTRY(_start)
        /* Mask all interrupts */
-       csrw sie, zero
+       csrw CSR_SIE, zero
 
        /* Load the global pointer */
 .option push
@@ -89,7 +89,7 @@ relocate:
        /* Point stvec to virtual address of intruction after satp write */
        la a0, 1f
        add a0, a0, a1
-       csrw stvec, a0
+       csrw CSR_STVEC, a0
 
        /* Compute satp for kernel page tables, but don't load it yet */
        la a2, swapper_pg_dir
@@ -105,12 +105,12 @@ relocate:
        srl a0, a0, PAGE_SHIFT
        or a0, a0, a1
        sfence.vma
-       csrw sptbr, a0
+       csrw CSR_SATP, a0
 .align 2
 1:
        /* Set trap vector to spin forever to help debug */
        la a0, .Lsecondary_park
-       csrw stvec, a0
+       csrw CSR_STVEC, a0
 
        /* Reload the global pointer */
 .option push
@@ -119,7 +119,7 @@ relocate:
 .option pop
 
        /* Switch to kernel page tables */
-       csrw sptbr, a2
+       csrw CSR_SATP, a2
 
        ret
 
@@ -130,7 +130,7 @@ relocate:
 
        /* Set trap vector to spin forever to help debug */
        la a3, .Lsecondary_park
-       csrw stvec, a3
+       csrw CSR_STVEC, a3
 
        slli a3, a0, LGREG
        la a1, __cpu_up_stack_pointer
diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c
index 667ee70defea..91626d9ae5f2 100644
--- a/arch/riscv/kernel/perf_event.c
+++ b/arch/riscv/kernel/perf_event.c
@@ -185,10 +185,10 @@ static inline u64 read_counter(int idx)
 
        switch (idx) {
        case RISCV_PMU_CYCLE:
-               val = csr_read(cycle);
+               val = csr_read(CSR_CYCLE);
                break;
        case RISCV_PMU_INSTRET:
-               val = csr_read(instret);
+               val = csr_read(CSR_INSTRET);
                break;
        default:
                WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 0c41d07ec281..f244c63d29e4 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -89,7 +89,7 @@ void riscv_software_interrupt(void)
        unsigned long *stats = ipi_data[smp_processor_id()].stats;
 
        /* Clear pending IPI */
-       csr_clear(sip, SIE_SSIE);
+       csr_clear(CSR_SIP, SIE_SSIE);
 
        while (true) {
                unsigned long ops;
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 24a9333dda2c..1b407a9db3fc 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -159,9 +159,9 @@ void __init trap_init(void)
         * Set sup0 scratch register to 0, indicating to exception vector
         * that we are presently executing in the kernel
         */
-       csr_write(sscratch, 0);
+       csr_write(CSR_SSCRATCH, 0);
        /* Set the exception vector address */
-       csr_write(stvec, &handle_exception);
+       csr_write(CSR_STVEC, &handle_exception);
        /* Enable all interrupts */
-       csr_write(sie, -1);
+       csr_write(CSR_SIE, -1);
 }
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 88401d5125bc..26293bc053a8 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -239,13 +239,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
                 * Do _not_ use "tsk->active_mm->pgd" here.
                 * We might be inside an interrupt in the middle
                 * of a task switch.
-                *
-                * Note: Use the old spbtr name instead of using the current
-                * satp name to support binutils 2.29 which doesn't know about
-                * the privileged ISA 1.10 yet.
                 */
                index = pgd_index(addr);
-               pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
+               pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
                pgd_k = init_mm.pgd + index;
 
                if (!pgd_present(*pgd_k))
-- 
2.17.1

Reply via email to