The branch main has been updated by jhb: URL: https://cgit.FreeBSD.org/src/commit/?id=56f5947a7102554729e0400f08b0f4d50a2d0827
commit 56f5947a7102554729e0400f08b0f4d50a2d0827 Author: John Baldwin <j...@freebsd.org> AuthorDate: 2022-04-12 17:05:45 +0000 Commit: John Baldwin <j...@freebsd.org> CommitDate: 2022-04-12 17:05:45 +0000 Remove checks for __GNUCLIKE_ASM assuming it is always true. All supported compilers (modern versions of GCC and clang) support this. Many places didn't have an #else so would just silently do the wrong thing. Ancient versions of icc (the original motivation for this) are no longer a compiler FreeBSD supports. PR: 263102 (exp-run) Reviewed by: brooks, imp Differential Revision: https://reviews.freebsd.org/D34797 --- lib/msun/src/math_private.h | 6 +- sys/amd64/amd64/fpu.c | 22 ----- sys/amd64/include/atomic.h | 43 --------- sys/amd64/include/cpufunc.h | 6 +- sys/amd64/include/ieeefp.h | 10 +- sys/amd64/include/pcpu.h | 6 +- sys/amd64/include/profile.h | 6 -- sys/arm64/include/profile.h | 4 - sys/crypto/via/padlock_cipher.c | 2 - sys/crypto/via/padlock_hash.c | 4 - sys/dev/random/nehemiah.c | 2 - sys/dev/sound/pcm/feeder_rate.c | 2 +- sys/i386/i386/in_cksum_machdep.c | 192 --------------------------------------- sys/i386/i386/npx.c | 21 ----- sys/i386/include/atomic.h | 47 ---------- sys/i386/include/cpufunc.h | 6 +- sys/i386/include/ieeefp.h | 4 - sys/i386/include/in_cksum.h | 9 -- sys/i386/include/pcpu.h | 6 +- sys/i386/include/profile.h | 6 -- sys/x86/include/bus.h | 42 --------- sys/x86/include/x86_ieeefp.h | 4 - sys/x86/x86/identcpu.c | 4 - 23 files changed, 19 insertions(+), 435 deletions(-) diff --git a/lib/msun/src/math_private.h b/lib/msun/src/math_private.h index 20ce7bd88464..df526e71e545 100644 --- a/lib/msun/src/math_private.h +++ b/lib/msun/src/math_private.h @@ -644,7 +644,7 @@ rnintl(long double x) * return type provided their arg is a floating point integer. They can * sometimes be more efficient because no rounding is required. */ -#if (defined(amd64) || defined(__i386__)) && defined(__GNUCLIKE_ASM) +#if defined(amd64) || defined(__i386__) #define irint(x) \ (sizeof(x) == sizeof(float) && \ sizeof(__float_t) == sizeof(long double) ? irintf(x) : \ @@ -657,7 +657,7 @@ rnintl(long double x) #define i64rint(x) ((int64_t)(x)) /* only needed for ld128 so not opt. */ -#if defined(__i386__) && defined(__GNUCLIKE_ASM) +#if defined(__i386__) static __inline int irintf(float x) { @@ -677,7 +677,7 @@ irintd(double x) } #endif -#if (defined(__amd64__) || defined(__i386__)) && defined(__GNUCLIKE_ASM) +#if defined(__amd64__) || defined(__i386__) static __inline int irintl(long double x) { diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c index 9b23cc5773a3..0bbf1cf047c0 100644 --- a/sys/amd64/amd64/fpu.c +++ b/sys/amd64/amd64/fpu.c @@ -69,8 +69,6 @@ __FBSDID("$FreeBSD$"); * Floating point support. */ -#if defined(__GNUCLIKE_ASM) && !defined(lint) - #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw)) #define fnclex() __asm __volatile("fnclex") #define fninit() __asm __volatile("fninit") @@ -145,26 +143,6 @@ xsaveopt64(char *addr, uint64_t mask) "memory"); } -#else /* !(__GNUCLIKE_ASM && !lint) */ - -void fldcw(u_short cw); -void fnclex(void); -void fninit(void); -void fnstcw(caddr_t addr); -void fnstsw(caddr_t addr); -void fxsave(caddr_t addr); -void fxrstor(caddr_t addr); -void ldmxcsr(u_int csr); -void stmxcsr(u_int *csr); -void xrstor32(char *addr, uint64_t mask); -void xrstor64(char *addr, uint64_t mask); -void xsave32(char *addr, uint64_t mask); -void xsave64(char *addr, uint64_t mask); -void xsaveopt32(char *addr, uint64_t mask); -void xsaveopt64(char *addr, uint64_t mask); - -#endif /* __GNUCLIKE_ASM && !lint */ - #define start_emulating() load_cr0(rcr0() | CR0_TS) #define stop_emulating() clts() diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h index d61fb359e261..8a7bab8fee2d 100644 --- a/sys/amd64/include/atomic.h +++ b/sys/amd64/include/atomic.h @@ -102,38 +102,6 @@ * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;) */ -#if !defined(__GNUCLIKE_ASM) -#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ -void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ -void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v) - -int atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src); -int atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src); -int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src); -int atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src); -int atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src); -int atomic_fcmpset_short(volatile u_short *dst, u_short *expect, - u_short src); -int atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src); -int atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src); -u_int atomic_fetchadd_int(volatile u_int *p, u_int v); -u_long atomic_fetchadd_long(volatile u_long *p, u_long v); -int atomic_testandset_int(volatile u_int *p, u_int v); -int atomic_testandset_long(volatile u_long *p, u_int v); -int atomic_testandclear_int(volatile u_int *p, u_int v); -int atomic_testandclear_long(volatile u_long *p, u_int v); -void atomic_thread_fence_acq(void); -void atomic_thread_fence_acq_rel(void); -void atomic_thread_fence_rel(void); -void atomic_thread_fence_seq_cst(void); - -#define ATOMIC_LOAD(TYPE) \ -u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p) -#define ATOMIC_STORE(TYPE) \ -void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) - -#else /* !__GNUCLIKE_ASM */ - /* * Always use lock prefixes. The result is slighly less optimal for * UP systems, but it matters less now, and sometimes UP is emulated @@ -385,8 +353,6 @@ atomic_thread_fence_seq_cst(void) __storeload_barrier(); } -#endif /* !__GNUCLIKE_ASM */ - ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); @@ -423,8 +389,6 @@ ATOMIC_LOADSTORE(long); #ifndef WANT_FUNCTIONS /* Read the current value and store a new value in the destination. */ -#ifdef __GNUCLIKE_ASM - static __inline u_int atomic_swap_int(volatile u_int *p, u_int v) { @@ -449,13 +413,6 @@ atomic_swap_long(volatile u_long *p, u_long v) return (v); } -#else /* !__GNUCLIKE_ASM */ - -u_int atomic_swap_int(volatile u_int *p, u_int v); -u_long atomic_swap_long(volatile u_long *p, u_long v); - -#endif /* __GNUCLIKE_ASM */ - #define atomic_set_acq_char atomic_set_barr_char #define atomic_set_rel_char atomic_set_barr_char #define atomic_clear_acq_char atomic_clear_barr_char diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h index bca74d8ead67..f639c9c7119e 100644 --- a/sys/amd64/include/cpufunc.h +++ b/sys/amd64/include/cpufunc.h @@ -57,7 +57,7 @@ struct region_descriptor; #define writel(va, d) (*(volatile uint32_t *) (va) = (d)) #define writeq(va, d) (*(volatile uint64_t *) (va) = (d)) -#if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE) +#if defined(__CC_SUPPORTS___INLINE) static __inline void breakpoint(void) @@ -964,7 +964,7 @@ sgx_eremove(void *epc) return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0)); } -#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */ +#else /* !__CC_SUPPORTS___INLINE */ int breakpoint(void); u_int bsfl(u_int mask); @@ -1029,7 +1029,7 @@ void wbinvd(void); void write_rflags(u_int rf); void wrmsr(u_int msr, uint64_t newval); -#endif /* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */ +#endif /* __CC_SUPPORTS___INLINE */ void reset_dbregs(void); diff --git a/sys/amd64/include/ieeefp.h b/sys/amd64/include/ieeefp.h index 96ee7e9040d8..48d879f0b80b 100644 --- a/sys/amd64/include/ieeefp.h +++ b/sys/amd64/include/ieeefp.h @@ -67,8 +67,6 @@ #define SSE_RND_OFF 13 /* rounding control offset */ #define SSE_FZ_OFF 15 /* flush to zero offset */ -#ifdef __GNUCLIKE_ASM - /* * General notes about conflicting SSE vs FP status bits. * This code assumes that software will not fiddle with the control @@ -184,9 +182,7 @@ __fpgetsticky(void) return ((fp_except_t)_ex); } -#endif /* __GNUCLIKE_ASM */ - -#if !defined(__IEEEFP_NOINLINES__) && defined(__GNUCLIKE_ASM) +#if !defined(__IEEEFP_NOINLINES__) #define fpgetmask() __fpgetmask() #define fpgetprec() __fpgetprec() @@ -196,7 +192,7 @@ __fpgetsticky(void) #define fpsetprec(m) __fpsetprec(m) #define fpsetround(m) __fpsetround(m) -#else /* !(!__IEEEFP_NOINLINES__ && __GNUCLIKE_ASM) */ +#else /* __IEEEFP_NOINLINES__ */ /* Augment the userland declarations. */ __BEGIN_DECLS @@ -210,6 +206,6 @@ fp_prec_t fpgetprec(void); fp_prec_t fpsetprec(fp_prec_t); __END_DECLS -#endif /* !__IEEEFP_NOINLINES__ && __GNUCLIKE_ASM */ +#endif /* !__IEEEFP_NOINLINES__ */ #endif /* !_MACHINE_IEEEFP_H_ */ diff --git a/sys/amd64/include/pcpu.h b/sys/amd64/include/pcpu.h index dc99d4249bd2..ad2b6216ed47 100644 --- a/sys/amd64/include/pcpu.h +++ b/sys/amd64/include/pcpu.h @@ -109,7 +109,7 @@ _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line"); #define MONITOR_STOPSTATE_RUNNING 0 #define MONITOR_STOPSTATE_STOPPED 1 -#if defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) +#if defined(__GNUCLIKE___TYPEOF) /* * Evaluates to the byte offset of the per-cpu variable name. @@ -277,11 +277,11 @@ _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line"); } \ } while (0); -#else /* !__GNUCLIKE_ASM || !__GNUCLIKE___TYPEOF */ +#else /* !__GNUCLIKE___TYPEOF */ #error "this file needs to be ported to your compiler" -#endif /* __GNUCLIKE_ASM && __GNUCLIKE___TYPEOF */ +#endif /* __GNUCLIKE___TYPEOF */ #endif /* _KERNEL */ diff --git a/sys/amd64/include/profile.h b/sys/amd64/include/profile.h index b0fb469f5354..e86fd582b407 100644 --- a/sys/amd64/include/profile.h +++ b/sys/amd64/include/profile.h @@ -45,7 +45,6 @@ static void _mcount(uintfptr_t frompc, uintfptr_t selfpc) __used; \ static void _mcount -#ifdef __GNUCLIKE_ASM #define MCOUNT __asm(" \n\ .text \n\ .p2align 4,0x90 \n\ @@ -101,9 +100,6 @@ mcount() \ _mcount(frompc, selfpc); \ } #endif -#else /* !__GNUCLIKE_ASM */ -#define MCOUNT -#endif /* __GNUCLIKE_ASM */ typedef u_long uintfptr_t; @@ -114,9 +110,7 @@ typedef u_long uintfptr_t; typedef u_long fptrdiff_t; __BEGIN_DECLS -#ifdef __GNUCLIKE_ASM void mcount(void) __asm(".mcount"); -#endif __END_DECLS #endif /* !_KERNEL */ diff --git a/sys/arm64/include/profile.h b/sys/arm64/include/profile.h index f5f5caa1faa3..f898e2707d65 100644 --- a/sys/arm64/include/profile.h +++ b/sys/arm64/include/profile.h @@ -48,7 +48,6 @@ typedef __uintfptr_t uintfptr_t; static void _mcount(uintfptr_t frompc, uintfptr_t selfpc) __used; \ static void _mcount -#ifdef __GNUCLIKE_ASM /* * Call into _mcount. On arm64 the .mcount is a function so callers will * handle caller saved registers. As we don't directly touch any callee @@ -86,9 +85,6 @@ mcount(uintfptr_t frompc) _mcount(frompc, __builtin_return_address(0)); } #endif -#else -#define MCOUNT -#endif #endif /* !_KERNEL */ diff --git a/sys/crypto/via/padlock_cipher.c b/sys/crypto/via/padlock_cipher.c index 0bc831f07c40..c5cd5161f4b2 100644 --- a/sys/crypto/via/padlock_cipher.c +++ b/sys/crypto/via/padlock_cipher.c @@ -83,7 +83,6 @@ static __inline void padlock_cbc(void *in, void *out, size_t count, void *key, union padlock_cw *cw, void *iv) { -#ifdef __GNUCLIKE_ASM /* The .byte line is really VIA C3 "xcrypt-cbc" instruction */ __asm __volatile( "pushf \n\t" @@ -94,7 +93,6 @@ padlock_cbc(void *in, void *out, size_t count, void *key, union padlock_cw *cw, : "b" (key), "d" (cw) : "cc", "memory" ); -#endif } static void diff --git a/sys/crypto/via/padlock_hash.c b/sys/crypto/via/padlock_hash.c index f09024af4ed5..f834e677fc98 100644 --- a/sys/crypto/via/padlock_hash.c +++ b/sys/crypto/via/padlock_hash.c @@ -124,13 +124,11 @@ padlock_do_sha1(const u_char *in, u_char *out, int count) ((uint32_t *)result)[3] = 0x10325476; ((uint32_t *)result)[4] = 0xC3D2E1F0; -#ifdef __GNUCLIKE_ASM __asm __volatile( ".byte 0xf3, 0x0f, 0xa6, 0xc8" /* rep xsha1 */ : "+S"(in), "+D"(result) : "c"(count), "a"(0) ); -#endif padlock_output_block((uint32_t *)result, (uint32_t *)out, SHA1_HASH_LEN / sizeof(uint32_t)); @@ -151,13 +149,11 @@ padlock_do_sha256(const char *in, char *out, int count) ((uint32_t *)result)[6] = 0x1F83D9AB; ((uint32_t *)result)[7] = 0x5BE0CD19; -#ifdef __GNUCLIKE_ASM __asm __volatile( ".byte 0xf3, 0x0f, 0xa6, 0xd0" /* rep xsha256 */ : "+S"(in), "+D"(result) : "c"(count), "a"(0) ); -#endif padlock_output_block((uint32_t *)result, (uint32_t *)out, SHA2_256_HASH_LEN / sizeof(uint32_t)); diff --git a/sys/dev/random/nehemiah.c b/sys/dev/random/nehemiah.c index 3ad18005c935..6381b9357176 100644 --- a/sys/dev/random/nehemiah.c +++ b/sys/dev/random/nehemiah.c @@ -65,7 +65,6 @@ VIA_RNG_store(void *buf) uint32_t retval = 0; uint32_t rate = 0; -#ifdef __GNUCLIKE_ASM __asm __volatile( "movl $0,%%edx\n\t" ".byte 0x0f, 0xa7, 0xc0" @@ -73,7 +72,6 @@ VIA_RNG_store(void *buf) : : "memory" ); -#endif if (rate == 0) return (retval&0x1f); return (0); diff --git a/sys/dev/sound/pcm/feeder_rate.c b/sys/dev/sound/pcm/feeder_rate.c index f6bb1836089b..b4b7dc7401e0 100644 --- a/sys/dev/sound/pcm/feeder_rate.c +++ b/sys/dev/sound/pcm/feeder_rate.c @@ -310,7 +310,7 @@ SYSCTL_PROC(_hw_snd, OID_AUTO, feeder_rate_quality, */ #define _Z_GCAST(x) ((uint64_t)(x)) -#if defined(__GNUCLIKE_ASM) && defined(__i386__) +#if defined(__i386__) /* * This is where i386 being beaten to a pulp. Fortunately this function is * rarely being called and if it is, it will decide the best (hopefully) diff --git a/sys/i386/i386/in_cksum_machdep.c b/sys/i386/i386/in_cksum_machdep.c index ba7cd170286b..49255a3c99ec 100644 --- a/sys/i386/i386/in_cksum_machdep.c +++ b/sys/i386/i386/in_cksum_machdep.c @@ -56,199 +56,8 @@ __FBSDID("$FreeBSD$"); #undef ADDCARRY #define ADDCARRY(x) if ((x) > 0xffff) (x) -= 0xffff -/* - * icc needs to be special cased here, as the asm code below results - * in broken code if compiled with icc. - */ -#if !defined(__GNUCLIKE_ASM) -/* non gcc parts stolen from sys/alpha/alpha/in_cksum.c */ -#define REDUCE32 \ - { \ - q_util.q = sum; \ - sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \ - } -#define REDUCE16 \ - { \ - q_util.q = sum; \ - l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \ - sum = l_util.s[0] + l_util.s[1]; \ - ADDCARRY(sum); \ - } -#endif #define REDUCE {sum = (sum & 0xffff) + (sum >> 16); ADDCARRY(sum);} -#if !defined(__GNUCLIKE_ASM) -static const u_int32_t in_masks[] = { - /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/ - 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */ - 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */ - 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */ - 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */ -}; - -union l_util { - u_int16_t s[2]; - u_int32_t l; -}; -union q_util { - u_int16_t s[4]; - u_int32_t l[2]; - u_int64_t q; -}; - -static u_int64_t -in_cksumdata(const u_int32_t *lw, int len) -{ - u_int64_t sum = 0; - u_int64_t prefilled; - int offset; - union q_util q_util; - - if ((3 & (long) lw) == 0 && len == 20) { - sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4]; - REDUCE32; - return sum; - } - - if ((offset = 3 & (long) lw) != 0) { - const u_int32_t *masks = in_masks + (offset << 2); - lw = (u_int32_t *) (((long) lw) - offset); - sum = *lw++ & masks[len >= 3 ? 3 : len]; - len -= 4 - offset; - if (len <= 0) { - REDUCE32; - return sum; - } - } -#if 0 - /* - * Force to cache line boundary. - */ - offset = 32 - (0x1f & (long) lw); - if (offset < 32 && len > offset) { - len -= offset; - if (4 & offset) { - sum += (u_int64_t) lw[0]; - lw += 1; - } - if (8 & offset) { - sum += (u_int64_t) lw[0] + lw[1]; - lw += 2; - } - if (16 & offset) { - sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3]; - lw += 4; - } - } -#endif - /* - * access prefilling to start load of next cache line. - * then add current cache line - * save result of prefilling for loop iteration. - */ - prefilled = lw[0]; - while ((len -= 32) >= 4) { - u_int64_t prefilling = lw[8]; - sum += prefilled + lw[1] + lw[2] + lw[3] - + lw[4] + lw[5] + lw[6] + lw[7]; - lw += 8; - prefilled = prefilling; - } - if (len >= 0) { - sum += prefilled + lw[1] + lw[2] + lw[3] - + lw[4] + lw[5] + lw[6] + lw[7]; - lw += 8; - } else { - len += 32; - } - while ((len -= 16) >= 0) { - sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3]; - lw += 4; - } - len += 16; - while ((len -= 4) >= 0) { - sum += (u_int64_t) *lw++; - } - len += 4; - if (len > 0) - sum += (u_int64_t) (in_masks[len] & *lw); - REDUCE32; - return sum; -} - -u_short -in_addword(u_short a, u_short b) -{ - u_int64_t sum = a + b; - - ADDCARRY(sum); - return (sum); -} - -u_short -in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c) -{ - u_int64_t sum; - union q_util q_util; - union l_util l_util; - - sum = (u_int64_t) a + b + c; - REDUCE16; - return (sum); -} - -u_short -in_cksum_skip(struct mbuf *m, int len, int skip) -{ - u_int64_t sum = 0; - int mlen = 0; - int clen = 0; - caddr_t addr; - union q_util q_util; - union l_util l_util; - - len -= skip; - for (; skip && m; m = m->m_next) { - if (m->m_len > skip) { - mlen = m->m_len - skip; - addr = mtod(m, caddr_t) + skip; - goto skip_start; - } else { - skip -= m->m_len; - } - } - - for (; m && len; m = m->m_next) { - if (m->m_len == 0) - continue; - mlen = m->m_len; - addr = mtod(m, caddr_t); -skip_start: - if (len < mlen) - mlen = len; - if ((clen ^ (long) addr) & 1) - sum += in_cksumdata((const u_int32_t *)addr, mlen) << 8; - else - sum += in_cksumdata((const u_int32_t *)addr, mlen); - - clen += mlen; - len -= mlen; - } - REDUCE16; - return (~sum & 0xffff); -} - -u_int in_cksum_hdr(const struct ip *ip) -{ - u_int64_t sum = in_cksumdata((const u_int32_t *)ip, sizeof(struct ip)); - union q_util q_util; - union l_util l_util; - - REDUCE16; - return (~sum & 0xffff); -} -#else - /* * These asm statements require __volatile because they pass information * via the condition codes. GCC does not currently provide a way to specify @@ -490,4 +299,3 @@ skip_start: REDUCE; return (~sum & 0xffff); } -#endif diff --git a/sys/i386/i386/npx.c b/sys/i386/i386/npx.c index dc04d84b0eb5..307099925f23 100644 --- a/sys/i386/i386/npx.c +++ b/sys/i386/i386/npx.c @@ -79,8 +79,6 @@ __FBSDID("$FreeBSD$"); * 387 and 287 Numeric Coprocessor Extension (NPX) Driver. */ -#if defined(__GNUCLIKE_ASM) && !defined(lint) - #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw)) #define fnclex() __asm __volatile("fnclex") #define fninit() __asm __volatile("fninit") @@ -126,25 +124,6 @@ xsaveopt(char *addr, uint64_t mask) __asm __volatile("xsaveopt %0" : "=m" (*addr) : "a" (low), "d" (hi) : "memory"); } -#else /* !(__GNUCLIKE_ASM && !lint) */ - -void fldcw(u_short cw); -void fnclex(void); -void fninit(void); -void fnsave(caddr_t addr); -void fnstcw(caddr_t addr); -void fnstsw(caddr_t addr); -void fp_divide_by_0(void); -void frstor(caddr_t addr); -void fxsave(caddr_t addr); -void fxrstor(caddr_t addr); -void ldmxcsr(u_int csr); -void stmxcsr(u_int *csr); -void xrstor(char *addr, uint64_t mask); -void xsave(char *addr, uint64_t mask); -void xsaveopt(char *addr, uint64_t mask); - -#endif /* __GNUCLIKE_ASM && !lint */ #define start_emulating() load_cr0(rcr0() | CR0_TS) #define stop_emulating() clts() diff --git a/sys/i386/include/atomic.h b/sys/i386/include/atomic.h index 154144a470c6..af6d323e0396 100644 --- a/sys/i386/include/atomic.h +++ b/sys/i386/include/atomic.h @@ -96,42 +96,6 @@ __mbu(void) * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;) */ -#if !defined(__GNUCLIKE_ASM) -#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ -void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ -void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v) - -int atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src); -int atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src); -int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src); -int atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src); -int atomic_fcmpset_short(volatile u_short *dst, u_short *expect, - u_short src); -int atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src); -u_int atomic_fetchadd_int(volatile u_int *p, u_int v); -int atomic_testandset_int(volatile u_int *p, u_int v); -int atomic_testandclear_int(volatile u_int *p, u_int v); -void atomic_thread_fence_acq(void); -void atomic_thread_fence_acq_rel(void); -void atomic_thread_fence_rel(void); -void atomic_thread_fence_seq_cst(void); - -#define ATOMIC_LOAD(TYPE) \ -u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p) -#define ATOMIC_STORE(TYPE) \ -void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) - -int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t); -int atomic_fcmpset_64(volatile uint64_t *, uint64_t *, uint64_t); -uint64_t atomic_load_acq_64(volatile uint64_t *); -void atomic_store_rel_64(volatile uint64_t *, uint64_t); -uint64_t atomic_swap_64(volatile uint64_t *, uint64_t); -uint64_t atomic_fetchadd_64(volatile uint64_t *, uint64_t); -void atomic_add_64(volatile uint64_t *, uint64_t); -void atomic_subtract_64(volatile uint64_t *, uint64_t); - -#else /* !__GNUCLIKE_ASM */ - /* * Always use lock prefixes. The result is slighly less optimal for * UP systems, but it matters less now, and sometimes UP is emulated @@ -622,8 +586,6 @@ atomic_subtract_64(volatile uint64_t *p, uint64_t v) #endif /* _KERNEL */ -#endif /* !__GNUCLIKE_ASM */ - ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); @@ -698,8 +660,6 @@ atomic_testandclear_long(volatile u_long *p, u_int v) } /* Read the current value and store a new value in the destination. */ -#ifdef __GNUCLIKE_ASM - static __inline u_int atomic_swap_int(volatile u_int *p, u_int v) { @@ -719,13 +679,6 @@ atomic_swap_long(volatile u_long *p, u_long v) return (atomic_swap_int((volatile u_int *)p, (u_int)v)); } -#else /* !__GNUCLIKE_ASM */ - -u_int atomic_swap_int(volatile u_int *p, u_int v); -u_long atomic_swap_long(volatile u_long *p, u_long v); - -#endif /* __GNUCLIKE_ASM */ - #define atomic_set_acq_char atomic_set_barr_char #define atomic_set_rel_char atomic_set_barr_char #define atomic_clear_acq_char atomic_clear_barr_char diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h index 79cdd3004b77..59ee9331cf9f 100644 --- a/sys/i386/include/cpufunc.h +++ b/sys/i386/include/cpufunc.h @@ -54,7 +54,7 @@ struct region_descriptor; #define writew(va, d) (*(volatile uint16_t *) (va) = (d)) #define writel(va, d) (*(volatile uint32_t *) (va) = (d)) -#if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE) +#if defined(__CC_SUPPORTS___INLINE) static __inline void breakpoint(void) @@ -774,7 +774,7 @@ wrpkru(uint32_t mask) __asm __volatile("wrpkru" : : "a" (mask), "c" (0), "d" (0)); } -#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */ +#else /* !__CC_SUPPORTS___INLINE */ int breakpoint(void); u_int bsfl(u_int mask); @@ -844,7 +844,7 @@ void write_cyrix_reg(u_char reg, u_char data); void write_eflags(u_int ef); void wrmsr(u_int msr, uint64_t newval); -#endif /* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */ +#endif /* __CC_SUPPORTS___INLINE */ void reset_dbregs(void); diff --git a/sys/i386/include/ieeefp.h b/sys/i386/include/ieeefp.h index 1d92014e9209..133c40cb271e 100644 --- a/sys/i386/include/ieeefp.h +++ b/sys/i386/include/ieeefp.h @@ -44,8 +44,6 @@ #include <x86/x86_ieeefp.h> -#ifdef __GNUCLIKE_ASM - static __inline fp_rnd_t fpgetround(void) { @@ -156,6 +154,4 @@ fpresetsticky(fp_except_t _m) return (_p); } -#endif /* __GNUCLIKE_ASM */ - #endif /* !_MACHINE_IEEEFP_H_ */ diff --git a/sys/i386/include/in_cksum.h b/sys/i386/include/in_cksum.h index 84e369cf3c81..ac6049719364 100644 --- a/sys/i386/include/in_cksum.h +++ b/sys/i386/include/in_cksum.h @@ -47,7 +47,6 @@ * in the normal case (where there are no options and the header length is * therefore always exactly five 32-bit words. */ -#if defined(__GNUCLIKE_ASM) #if defined(IPVERSION) && (IPVERSION == 4) static __inline u_int in_cksum_hdr(const struct ip *ip) @@ -107,19 +106,11 @@ in_pseudo(u_int sum, u_int b, u_int c) sum -= 0xffff; return (sum); } -#endif #ifdef _KERNEL #define HAVE_MD_IN_CKSUM -#if !defined(__GNUCLIKE_ASM) -#if defined(IPVERSION) && (IPVERSION == 4) -u_int in_cksum_hdr(const struct ip *ip); -#endif -u_short in_addword(u_short sum, u_short b); -u_short in_pseudo(u_int sum, u_int b, u_int c); -#endif u_short in_cksum_skip(struct mbuf *m, int len, int skip); #endif /* _KERNEL */ diff --git a/sys/i386/include/pcpu.h b/sys/i386/include/pcpu.h index a4c7968ea85f..c4099f04ded1 100644 --- a/sys/i386/include/pcpu.h +++ b/sys/i386/include/pcpu.h @@ -99,7 +99,7 @@ _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line"); #define MONITOR_STOPSTATE_RUNNING 0 #define MONITOR_STOPSTATE_STOPPED 1 -#if defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) +#if defined(__GNUCLIKE___TYPEOF) /* * Evaluates to the byte offset of the per-cpu variable name. @@ -206,11 +206,11 @@ _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line"); #define IS_BSP() (PCPU_GET(cpuid) == 0) -#else /* defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) */ +#else /* defined(__GNUCLIKE___TYPEOF) */ #error "this file needs to be ported to your compiler" -#endif /* __GNUCLIKE_ASM etc. */ +#endif /* __GNUCLIKE___TYPEOF */ #endif /* _KERNEL */ diff --git a/sys/i386/include/profile.h b/sys/i386/include/profile.h index 8bb3c7af748c..e834abe33c37 100644 --- a/sys/i386/include/profile.h +++ b/sys/i386/include/profile.h @@ -43,7 +43,6 @@ #define _MCOUNT_DECL static __inline void _mcount -#ifdef __GNUCLIKE_ASM #define MCOUNT \ void \ mcount() \ @@ -75,9 +74,6 @@ mcount() \ _mcount(frompc, selfpc); \ __asm("" : : "c" (ecx)); \ } -#else /* !__GNUCLIKE_ASM */ -#define MCOUNT -#endif /* __GNUCLIKE_ASM */ typedef u_int uintfptr_t; @@ -88,9 +84,7 @@ typedef u_int uintfptr_t; typedef u_int fptrdiff_t; __BEGIN_DECLS -#ifdef __GNUCLIKE_ASM void mcount(void) __asm(".mcount"); -#endif __END_DECLS #endif /* !_KERNEL */ diff --git a/sys/x86/include/bus.h b/sys/x86/include/bus.h index 9522e5db7c78..ccd4ba26e387 100644 --- a/sys/x86/include/bus.h +++ b/sys/x86/include/bus.h @@ -103,10 +103,6 @@ #include <machine/cpufunc.h> #include <machine/bus_dma.h> -#ifndef __GNUCLIKE_ASM -#error "no assembler code for your compiler" -#endif - /* * Values for the x86 bus space tag, not to be used directly by MI code. */ @@ -285,7 +281,6 @@ bus_space_read_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh, if (tag == X86_BUS_SPACE_IO) insb(bsh + offset, addr, count); else { -#ifdef __GNUCLIKE_ASM __asm __volatile(" \n\ 1: movb (%2),%%al \n\ stosb \n\ @@ -293,7 +288,6 @@ bus_space_read_multi_1(bus_space_tag_t tag, bus_space_handle_t bsh, "=D" (addr), "=c" (count) : "r" (bsh + offset), "0" (addr), "1" (count) : "%eax", "memory"); -#endif } } @@ -305,7 +299,6 @@ bus_space_read_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh, if (tag == X86_BUS_SPACE_IO) insw(bsh + offset, addr, count); else { -#ifdef __GNUCLIKE_ASM __asm __volatile(" \n\ 1: movw (%2),%%ax \n\ stosw \n\ @@ -313,7 +306,6 @@ bus_space_read_multi_2(bus_space_tag_t tag, bus_space_handle_t bsh, "=D" (addr), "=c" (count) : "r" (bsh + offset), "0" (addr), "1" (count) : "%eax", "memory"); -#endif } } @@ -325,7 +317,6 @@ bus_space_read_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh, if (tag == X86_BUS_SPACE_IO) insl(bsh + offset, addr, count); else { -#ifdef __GNUCLIKE_ASM __asm __volatile(" \n\ 1: movl (%2),%%eax \n\ stosl \n\ @@ -333,7 +324,6 @@ bus_space_read_multi_4(bus_space_tag_t tag, bus_space_handle_t bsh, "=D" (addr), "=c" (count) : "r" (bsh + offset), "0" (addr), "1" (count) : "%eax", "memory"); -#endif } } @@ -368,7 +358,6 @@ bus_space_read_region_1(bus_space_tag_t tag, bus_space_handle_t bsh, if (tag == X86_BUS_SPACE_IO) { int _port_ = bsh + offset; -#ifdef __GNUCLIKE_ASM __asm __volatile(" \n\ 1: inb %w2,%%al \n\ stosb \n\ @@ -377,17 +366,14 @@ bus_space_read_region_1(bus_space_tag_t tag, bus_space_handle_t bsh, "=D" (addr), "=c" (count), "=d" (_port_) : "0" (addr), "1" (count), "2" (_port_) : "%eax", "memory", "cc"); -#endif } else { bus_space_handle_t _port_ = bsh + offset; -#ifdef __GNUCLIKE_ASM __asm __volatile(" \n\ repne \n\ movsb" : "=D" (addr), "=c" (count), "=S" (_port_) : "0" (addr), "1" (count), "2" (_port_) : "memory", "cc"); -#endif *** 252 LINES SKIPPED ***