gcc/ChangeLog:
* config.gcc: Add riscv_crypto.h
* config/riscv/riscv_crypto.h: New file.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/zknd32.c: Use intrinsics instead of builtins.
* gcc.target/riscv/zknd64.c: Likewise.
* gcc.target/riscv/zkne32.c: Likewise.
* gcc.target/riscv/zkne64.c: Likewise.
* gcc.target/riscv/zknh-sha256-32.c: Likewise.
* gcc.target/riscv/zknh-sha256-64.c: Likewise.
* gcc.target/riscv/zknh-sha512-32.c: Likewise.
* gcc.target/riscv/zknh-sha512-64.c: Likewise.
* gcc.target/riscv/zksed32.c: Likewise.
* gcc.target/riscv/zksed64.c: Likewise.
* gcc.target/riscv/zksh32.c: Likewise.
* gcc.target/riscv/zksh64.c: Likewise.
---
gcc/config.gcc | 2 +-
gcc/config/riscv/riscv_crypto.h | 219 ++++++++++++++++++
gcc/testsuite/gcc.target/riscv/zknd32.c | 6 +-
gcc/testsuite/gcc.target/riscv/zknd64.c | 12 +-
gcc/testsuite/gcc.target/riscv/zkne32.c | 6 +-
gcc/testsuite/gcc.target/riscv/zkne64.c | 10 +-
.../gcc.target/riscv/zknh-sha256-32.c | 22 +-
.../gcc.target/riscv/zknh-sha256-64.c | 10 +-
.../gcc.target/riscv/zknh-sha512-32.c | 14 +-
.../gcc.target/riscv/zknh-sha512-64.c | 10 +-
gcc/testsuite/gcc.target/riscv/zksed32.c | 6 +-
gcc/testsuite/gcc.target/riscv/zksed64.c | 6 +-
gcc/testsuite/gcc.target/riscv/zksh32.c | 6 +-
gcc/testsuite/gcc.target/riscv/zksh64.c | 6 +-
14 files changed, 288 insertions(+), 47 deletions(-)
create mode 100644 gcc/config/riscv/riscv_crypto.h
diff --git a/gcc/config.gcc b/gcc/config.gcc
index b88591b6fd8..d67fe8b6a6f 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -548,7 +548,7 @@ riscv*)
extra_objs="${extra_objs} riscv-vector-builtins.o
riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
extra_objs="${extra_objs} thead.o riscv-target-attr.o"
d_target_objs="riscv-d.o"
- extra_headers="riscv_vector.h"
+ extra_headers="riscv_vector.h riscv_crypto.h"
target_gtfiles="$target_gtfiles
\$(srcdir)/config/riscv/riscv-vector-builtins.cc"
target_gtfiles="$target_gtfiles
\$(srcdir)/config/riscv/riscv-vector-builtins.h"
;;
diff --git a/gcc/config/riscv/riscv_crypto.h b/gcc/config/riscv/riscv_crypto.h
new file mode 100644
index 00000000000..149c1132e10
--- /dev/null
+++ b/gcc/config/riscv/riscv_crypto.h
@@ -0,0 +1,219 @@
+/* RISC-V 'K' Extension intrinsics include file.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef __RISCV_CRYPTO_H
+#define __RISCV_CRYPTO_H
+
+#include <stdint.h>
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(__riscv_zknd)
+#if __riscv_xlen == 32
+#define __riscv_aes32dsi(x, y, bs) __builtin_riscv_aes32dsi(x, y, bs)
+#define __riscv_aes32dsmi(x, y, bs) __builtin_riscv_aes32dsmi(x, y, bs)
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64ds (uint64_t __x, uint64_t __y)
+{
+ return __builtin_riscv_aes64ds (__x, __y);
+}
I don't understand why some intrinsic functions are implemented as
macros to builtins
and some are implemented as static inline wrappers around butilins.
Is there a particular reason that this is mixed?
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64dsm (uint64_t __x, uint64_t __y)
+{
+ return __builtin_riscv_aes64dsm (__x, __y);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64im (uint64_t __x)
+{
+ return __builtin_riscv_aes64im (__x);
+}
+#endif
+#endif // defined (__riscv_zknd)
+
+#if defined(__riscv_zkne)
+#if __riscv_xlen == 32
+#define __riscv_aes32esi(x, y, bs) __builtin_riscv_aes32esi(x, y, bs)
+#define __riscv_aes32esmi(x, y, bs) __builtin_riscv_aes32esmi(x, y, bs)
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64es (uint64_t __x, uint64_t __y)
+{
+ return __builtin_riscv_aes64es (__x, __y);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64esm (uint64_t __x, uint64_t __y)
+{
+ return __builtin_riscv_aes64esm (__x, __y);
+}
+#endif
+#endif // defined (__riscv_zknd)
Copy and paste mistake in the comment (should be "__riscv_zkne")
+
+#if defined(__riscv_zknd) || defined(__riscv_zkne)
+#if __riscv_xlen == 64
+#define __riscv_aes64ks1i(x, rnum) __builtin_riscv_aes64ks1i(x, rnum)
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64ks2 (uint64_t __x, uint64_t __y)
+{
+ return __builtin_riscv_aes64ks2 (__x, __y);
+}
+#endif
+#endif // defined (__riscv_zknd) || defined (__riscv_zkne)
+
+#if defined(__riscv_zknh)
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha256sig0 (uint32_t __x)
+{
+ return __builtin_riscv_sha256sig0 (__x);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha256sig1 (uint32_t __x)
+{
+ return __builtin_riscv_sha256sig1 (__x);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha256sum0 (uint32_t __x)
+{
+ return __builtin_riscv_sha256sum0 (__x);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha256sum1 (uint32_t __x)
+{
+ return __builtin_riscv_sha256sum1 (__x);
+}
+
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig0h (uint32_t __x, uint32_t __y)
+{
+ return __builtin_riscv_sha512sig0h (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig0l (uint32_t __x, uint32_t __y)
+{
+ return __builtin_riscv_sha512sig0l (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig1h (uint32_t __x, uint32_t __y)
+{
+ return __builtin_riscv_sha512sig1h (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig1l (uint32_t __x, uint32_t __y)
+{
+ return __builtin_riscv_sha512sig1l (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum0l (uint32_t __x, uint32_t __y)
+{
+ return __builtin_riscv_sha512sum0l (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum0r (uint32_t __x, uint32_t __y)
+{
+ return __builtin_riscv_sha512sum0r (__x, __y);
+}
Why sum0l and sum0r?
The specification says sum0h and sum0l.
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum1l (uint32_t __x, uint32_t __y)
+{
+ return __builtin_riscv_sha512sum1l (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum1r (uint32_t __x, uint32_t __y)
+{
+ return __builtin_riscv_sha512sum1r (__x, __y);
+}
Why sum1l and sum1r?
The specification says sum1h and sum1l.
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig0 (uint64_t __x)
+{
+ return __builtin_riscv_sha512sig0 (__x);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig1 (uint64_t __x)
+{
+ return __builtin_riscv_sha512sig1 (__x);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum0 (uint64_t __x)
+{
+ return __builtin_riscv_sha512sum0 (__x);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum1 (uint64_t __x)
+{
+ return __builtin_riscv_sha512sum1 (__x);
+}
+#endif
+#endif // defined (__riscv_zknh)
+
+#if defined(__riscv_zksh)
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sm3p0 (uint32_t __x)
+{
+ return __builtin_riscv_sm3p0 (__x);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sm3p1 (uint32_t __x)
+{
+ return __builtin_riscv_sm3p1 (__x);
+}
+#endif // defined (__riscv_zksh)
+
+#if defined(__riscv_zksed)
+#define __riscv_sm4ed(x, y, bs) __builtin_riscv_sm4ed(x, y, bs);
+#define __riscv_sm4ks(x, y, bs) __builtin_riscv_sm4ks(x, y, bs);
+#endif // defined (__riscv_zksh)
Wrong comment (should be "__riscv_zksed").
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zknd32.c
b/gcc/testsuite/gcc.target/riscv/zknd32.c
index e60c027e091..62b730a700f 100644
--- a/gcc/testsuite/gcc.target/riscv/zknd32.c
+++ b/gcc/testsuite/gcc.target/riscv/zknd32.c
@@ -2,16 +2,16 @@
/* { dg-options "-O2 -march=rv32gc_zknd -mabi=ilp32d" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
uint32_t foo1(uint32_t rs1, uint32_t rs2, int bs)
{
- return __builtin_riscv_aes32dsi(rs1,rs2,bs);
+ return __riscv_aes32dsi(rs1,rs2,bs);
}
uint32_t foo2(uint32_t rs1, uint32_t rs2, int bs)
{
- return __builtin_riscv_aes32dsmi(rs1,rs2,bs);
+ return __riscv_aes32dsmi(rs1,rs2,bs);
}
/* { dg-final { scan-assembler-times "aes32dsi" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zknd64.c
b/gcc/testsuite/gcc.target/riscv/zknd64.c
index 707418cd51e..e5f2be72bae 100644
--- a/gcc/testsuite/gcc.target/riscv/zknd64.c
+++ b/gcc/testsuite/gcc.target/riscv/zknd64.c
@@ -2,31 +2,31 @@
/* { dg-options "-O2 -march=rv64gc_zknd -mabi=lp64" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
uint64_t foo1(uint64_t rs1, uint64_t rs2)
{
- return __builtin_riscv_aes64ds(rs1,rs2);
+ return __riscv_aes64ds(rs1,rs2);
}
uint64_t foo2(uint64_t rs1, uint64_t rs2)
{
- return __builtin_riscv_aes64dsm(rs1,rs2);
+ return __riscv_aes64dsm(rs1,rs2);
}
uint64_t foo3(uint64_t rs1, unsigned rnum)
{
- return __builtin_riscv_aes64ks1i(rs1,rnum);
+ return __riscv_aes64ks1i(rs1,rnum);
}
uint64_t foo4(uint64_t rs1, uint64_t rs2)
{
- return __builtin_riscv_aes64ks2(rs1,rs2);
+ return __riscv_aes64ks2(rs1,rs2);
}
uint64_t foo5(uint64_t rs1)
{
- return __builtin_riscv_aes64im(rs1);
+ return __riscv_aes64im(rs1);
}
/* { dg-final { scan-assembler-times "aes64ds\t" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zkne32.c
b/gcc/testsuite/gcc.target/riscv/zkne32.c
index 252e9ffa43b..c3a7205a48b 100644
--- a/gcc/testsuite/gcc.target/riscv/zkne32.c
+++ b/gcc/testsuite/gcc.target/riscv/zkne32.c
@@ -2,16 +2,16 @@
/* { dg-options "-O2 -march=rv32gc_zkne -mabi=ilp32d" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
{
- return __builtin_riscv_aes32esi(rs1, rs2, bs);
+ return __riscv_aes32esi(rs1, rs2, bs);
}
uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
{
- return __builtin_riscv_aes32esmi(rs1, rs2, bs);
+ return __riscv_aes32esmi(rs1, rs2, bs);
}
/* { dg-final { scan-assembler-times "aes32esi" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zkne64.c
b/gcc/testsuite/gcc.target/riscv/zkne64.c
index b25f6b5c29a..e99b21a46dd 100644
--- a/gcc/testsuite/gcc.target/riscv/zkne64.c
+++ b/gcc/testsuite/gcc.target/riscv/zkne64.c
@@ -2,26 +2,26 @@
/* { dg-options "-O2 -march=rv64gc_zkne -mabi=lp64" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
uint64_t foo1(uint64_t rs1, uint64_t rs2)
{
- return __builtin_riscv_aes64es(rs1,rs2);
+ return __riscv_aes64es(rs1,rs2);
}
uint64_t foo2(uint64_t rs1, uint64_t rs2)
{
- return __builtin_riscv_aes64esm(rs1,rs2);
+ return __riscv_aes64esm(rs1,rs2);
}
uint64_t foo3(uint64_t rs1, unsigned rnum)
{
- return __builtin_riscv_aes64ks1i(rs1,rnum);
+ return __riscv_aes64ks1i(rs1,rnum);
}
uint64_t foo4(uint64_t rs1, uint64_t rs2)
{
- return __builtin_riscv_aes64ks2(rs1,rs2);
+ return __riscv_aes64ks2(rs1,rs2);
}
/* { dg-final { scan-assembler-times "aes64es\t" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
index c51b143a8a5..96e967fba96 100644
--- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
+++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
@@ -2,7 +2,27 @@
/* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32d" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include "zknh-sha256-64.c"
+#include "riscv_crypto.h"
+
+unsigned int foo1(unsigned int rs1)
+{
+ return __riscv_sha256sig0(rs1);
+}
+
+unsigned int foo2(unsigned int rs1)
+{
+ return __riscv_sha256sig1(rs1);
+}
+
+unsigned int foo3(unsigned int rs1)
+{
+ return __riscv_sha256sum0(rs1);
+}
+
+unsigned int foo4(unsigned int rs1)
+{
+ return __riscv_sha256sum1(rs1);
+}
/* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
/* { dg-final { scan-assembler-times "sha256sig1" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
index 2ef37601e6f..172b84421e2 100644
--- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
+++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
@@ -2,24 +2,26 @@
/* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
+#include "riscv_crypto.h"
+
unsigned int foo1(unsigned int rs1)
{
- return __builtin_riscv_sha256sig0(rs1);
+ return __riscv_sha256sig0(rs1);
}
unsigned int foo2(unsigned int rs1)
{
- return __builtin_riscv_sha256sig1(rs1);
+ return __riscv_sha256sig1(rs1);
}
unsigned int foo3(unsigned int rs1)
{
- return __builtin_riscv_sha256sum0(rs1);
+ return __riscv_sha256sum0(rs1);
}
unsigned int foo4(unsigned int rs1)
{
- return __builtin_riscv_sha256sum1(rs1);
+ return __riscv_sha256sum1(rs1);
}
/* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
index f2bcae36a1f..e6fb298d6a7 100644
--- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
+++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
@@ -2,36 +2,36 @@
/* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
uint32_t foo1(uint32_t rs1, uint32_t rs2)
{
- return __builtin_riscv_sha512sig0h(rs1,rs2);
+ return __riscv_sha512sig0h(rs1,rs2);
}
uint32_t foo2(uint32_t rs1, uint32_t rs2)
{
- return __builtin_riscv_sha512sig0l(rs1,rs2);
+ return __riscv_sha512sig0l(rs1,rs2);
}
uint32_t foo3(uint32_t rs1, uint32_t rs2)
{
- return __builtin_riscv_sha512sig1h(rs1,rs2);
+ return __riscv_sha512sig1h(rs1,rs2);
}
uint32_t foo4(uint32_t rs1, uint32_t rs2)
{
- return __builtin_riscv_sha512sig1l(rs1,rs2);
+ return __riscv_sha512sig1l(rs1,rs2);
}
uint32_t foo5(uint32_t rs1, uint32_t rs2)
{
- return __builtin_riscv_sha512sum0r(rs1,rs2);
+ return __riscv_sha512sum0r(rs1,rs2);
}
uint32_t foo6(uint32_t rs1, uint32_t rs2)
{
- return __builtin_riscv_sha512sum1r(rs1,rs2);
+ return __riscv_sha512sum1r(rs1,rs2);
}
/* { dg-final { scan-assembler-times "sha512sig0h" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
index 4f248575e66..c65c2043d08 100644
--- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
+++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
@@ -2,26 +2,26 @@
/* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
uint64_t foo1(uint64_t rs1)
{
- return __builtin_riscv_sha512sig0(rs1);
+ return __riscv_sha512sig0(rs1);
}
uint64_t foo2(uint64_t rs1)
{
- return __builtin_riscv_sha512sig1(rs1);
+ return __riscv_sha512sig1(rs1);
}
uint64_t foo3(uint64_t rs1)
{
- return __builtin_riscv_sha512sum0(rs1);
+ return __riscv_sha512sum0(rs1);
}
uint64_t foo4(uint64_t rs1)
{
- return __builtin_riscv_sha512sum1(rs1);
+ return __riscv_sha512sum1(rs1);
}
diff --git a/gcc/testsuite/gcc.target/riscv/zksed32.c
b/gcc/testsuite/gcc.target/riscv/zksed32.c
index 0e8f01cd548..d63e0775391 100644
--- a/gcc/testsuite/gcc.target/riscv/zksed32.c
+++ b/gcc/testsuite/gcc.target/riscv/zksed32.c
@@ -2,16 +2,16 @@
/* { dg-options "-O2 -march=rv32gc_zksed -mabi=ilp32" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
{
- return __builtin_riscv_sm4ks(rs1,rs2,bs);
+ return __riscv_sm4ks(rs1,rs2,bs);
}
uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
{
- return __builtin_riscv_sm4ed(rs1,rs2,bs);
+ return __riscv_sm4ed(rs1,rs2,bs);
}
diff --git a/gcc/testsuite/gcc.target/riscv/zksed64.c
b/gcc/testsuite/gcc.target/riscv/zksed64.c
index 9e4d1961419..426122cf6eb 100644
--- a/gcc/testsuite/gcc.target/riscv/zksed64.c
+++ b/gcc/testsuite/gcc.target/riscv/zksed64.c
@@ -2,16 +2,16 @@
/* { dg-options "-O2 -march=rv64gc_zksed -mabi=lp64" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
{
- return __builtin_riscv_sm4ks(rs1,rs2,bs);
+ return __riscv_sm4ks(rs1,rs2,bs);
}
uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
{
- return __builtin_riscv_sm4ed(rs1,rs2,bs);
+ return __riscv_sm4ed(rs1,rs2,bs);
}
diff --git a/gcc/testsuite/gcc.target/riscv/zksh32.c
b/gcc/testsuite/gcc.target/riscv/zksh32.c
index c182e557a85..3d0d154ad1d 100644
--- a/gcc/testsuite/gcc.target/riscv/zksh32.c
+++ b/gcc/testsuite/gcc.target/riscv/zksh32.c
@@ -2,16 +2,16 @@
/* { dg-options "-O2 -march=rv32gc_zksh -mabi=ilp32" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
uint32_t foo1(uint32_t rs1)
{
- return __builtin_riscv_sm3p0(rs1);
+ return __riscv_sm3p0(rs1);
}
uint32_t foo2(uint32_t rs1)
{
- return __builtin_riscv_sm3p1(rs1);
+ return __riscv_sm3p1(rs1);
}
diff --git a/gcc/testsuite/gcc.target/riscv/zksh64.c
b/gcc/testsuite/gcc.target/riscv/zksh64.c
index d794b39f77a..1398c1329f0 100644
--- a/gcc/testsuite/gcc.target/riscv/zksh64.c
+++ b/gcc/testsuite/gcc.target/riscv/zksh64.c
@@ -2,16 +2,16 @@
/* { dg-options "-O2 -march=rv64gc_zksh -mabi=lp64" } */
/* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
uint32_t foo1(uint32_t rs1)
{
- return __builtin_riscv_sm3p0(rs1);
+ return __riscv_sm3p0(rs1);
}
uint32_t foo2(uint32_t rs1)
{
- return __builtin_riscv_sm3p1(rs1);
+ return __riscv_sm3p1(rs1);
}
--
2.34.1