From: "Guo Ren (Alibaba DAMO Academy)" <[email protected]>

The RV64ILP32 ABI, derived from a 64-bit ISA, uses 32-bit
BITS_PER_LONG. Therefore, crc32 algorithm could utilize 64-bit width
to improve the performance.

Signed-off-by: Guo Ren (Alibaba DAMO Academy) <[email protected]>
---
 arch/riscv/lib/crc32-riscv.c | 35 ++++++++++++++++++-----------------
 1 file changed, 18 insertions(+), 17 deletions(-)

diff --git a/arch/riscv/lib/crc32-riscv.c b/arch/riscv/lib/crc32-riscv.c
index 53d56ab422c7..68dfb0565696 100644
--- a/arch/riscv/lib/crc32-riscv.c
+++ b/arch/riscv/lib/crc32-riscv.c
@@ -8,6 +8,7 @@
 #include <asm/hwcap.h>
 #include <asm/alternative-macros.h>
 #include <asm/byteorder.h>
+#include <asm/csr.h>
 
 #include <linux/types.h>
 #include <linux/minmax.h>
@@ -59,12 +60,12 @@
  */
 # define CRC32_POLY_QT_BE      0x04d101df481b4e5a
 
-static inline u64 crc32_le_prep(u32 crc, unsigned long const *ptr)
+static inline u64 crc32_le_prep(u32 crc, u64 const *ptr)
 {
        return (u64)crc ^ (__force u64)__cpu_to_le64(*ptr);
 }
 
-static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long 
poly_qt)
+static inline u32 crc32_le_zbc(u64 s, u32 poly, u64 poly_qt)
 {
        u32 crc;
 
@@ -85,7 +86,7 @@ static inline u32 crc32_le_zbc(unsigned long s, u32 poly, 
unsigned long poly_qt)
        return crc;
 }
 
-static inline u64 crc32_be_prep(u32 crc, unsigned long const *ptr)
+static inline u64 crc32_be_prep(u32 crc, u64 const *ptr)
 {
        return ((u64)crc << 32) ^ (__force u64)__cpu_to_be64(*ptr);
 }
@@ -131,7 +132,7 @@ static inline u32 crc32_be_prep(u32 crc, unsigned long 
const *ptr)
 # error "Unexpected __riscv_xlen"
 #endif
 
-static inline u32 crc32_be_zbc(unsigned long s)
+static inline u32 crc32_be_zbc(xlen_t s)
 {
        u32 crc;
 
@@ -156,16 +157,16 @@ typedef u32 (*fallback)(u32 crc, unsigned char const *p, 
size_t len);
 
 static inline u32 crc32_le_unaligned(u32 crc, unsigned char const *p,
                                     size_t len, u32 poly,
-                                    unsigned long poly_qt)
+                                    xlen_t poly_qt)
 {
        size_t bits = len * 8;
-       unsigned long s = 0;
+       xlen_t s = 0;
        u32 crc_low = 0;
 
        for (int i = 0; i < len; i++)
-               s = ((unsigned long)*p++ << (__riscv_xlen - 8)) | (s >> 8);
+               s = ((xlen_t)*p++ << (__riscv_xlen - 8)) | (s >> 8);
 
-       s ^= (unsigned long)crc << (__riscv_xlen - bits);
+       s ^= (xlen_t)crc << (__riscv_xlen - bits);
        if (__riscv_xlen == 32 || len < sizeof(u32))
                crc_low = crc >> bits;
 
@@ -177,12 +178,12 @@ static inline u32 crc32_le_unaligned(u32 crc, unsigned 
char const *p,
 
 static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
                                          size_t len, u32 poly,
-                                         unsigned long poly_qt,
+                                         xlen_t poly_qt,
                                          fallback crc_fb)
 {
        size_t offset, head_len, tail_len;
-       unsigned long const *p_ul;
-       unsigned long s;
+       xlen_t const *p_ul;
+       xlen_t s;
 
        asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
                             RISCV_ISA_EXT_ZBC, 1)
@@ -199,7 +200,7 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned 
char const *p,
 
        tail_len = len & OFFSET_MASK;
        len = len >> STEP_ORDER;
-       p_ul = (unsigned long const *)p;
+       p_ul = (xlen_t const *)p;
 
        for (int i = 0; i < len; i++) {
                s = crc32_le_prep(crc, p_ul);
@@ -236,7 +237,7 @@ static inline u32 crc32_be_unaligned(u32 crc, unsigned char 
const *p,
                                     size_t len)
 {
        size_t bits = len * 8;
-       unsigned long s = 0;
+       xlen_t s = 0;
        u32 crc_low = 0;
 
        s = 0;
@@ -247,7 +248,7 @@ static inline u32 crc32_be_unaligned(u32 crc, unsigned char 
const *p,
                s ^= crc >> (32 - bits);
                crc_low = crc << bits;
        } else {
-               s ^= (unsigned long)crc << (bits - 32);
+               s ^= (xlen_t)crc << (bits - 32);
        }
 
        crc = crc32_be_zbc(s);
@@ -259,8 +260,8 @@ static inline u32 crc32_be_unaligned(u32 crc, unsigned char 
const *p,
 u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
 {
        size_t offset, head_len, tail_len;
-       unsigned long const *p_ul;
-       unsigned long s;
+       xlen_t const *p_ul;
+       xlen_t s;
 
        asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
                             RISCV_ISA_EXT_ZBC, 1)
@@ -277,7 +278,7 @@ u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
 
        tail_len = len & OFFSET_MASK;
        len = len >> STEP_ORDER;
-       p_ul = (unsigned long const *)p;
+       p_ul = (xlen_t const *)p;
 
        for (int i = 0; i < len; i++) {
                s = crc32_be_prep(crc, p_ul);
-- 
2.40.1


Reply via email to