Module Name:    src
Committed By:   maxv
Date:           Wed May  1 15:17:49 UTC 2019

Modified Files:
        src/sys/arch/amd64/amd64: cpufunc.S
        src/sys/arch/i386/i386: cpufunc.S
        src/sys/arch/x86/include: cpufunc.h

Log Message:
Start converting the x86 CPU functions to inlined ASM. Matters for NVMM,
where some are invoked millions of times.


To generate a diff of this commit:
cvs rdiff -u -r1.37 -r1.38 src/sys/arch/amd64/amd64/cpufunc.S
cvs rdiff -u -r1.29 -r1.30 src/sys/arch/i386/i386/cpufunc.S
cvs rdiff -u -r1.25 -r1.26 src/sys/arch/x86/include/cpufunc.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/cpufunc.S
diff -u src/sys/arch/amd64/amd64/cpufunc.S:1.37 src/sys/arch/amd64/amd64/cpufunc.S:1.38
--- src/sys/arch/amd64/amd64/cpufunc.S:1.37	Wed May  1 14:29:15 2019
+++ src/sys/arch/amd64/amd64/cpufunc.S	Wed May  1 15:17:49 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.S,v 1.37 2019/05/01 14:29:15 maxv Exp $	*/
+/*	$NetBSD: cpufunc.S,v 1.38 2019/05/01 15:17:49 maxv Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -264,24 +264,6 @@ END(x86_write_flags)
 STRONG_ALIAS(x86_write_psl,x86_write_flags)
 #endif /* XENPV */
 
-ENTRY(rdmsr)
-	movq	%rdi, %rcx
-	xorq	%rax, %rax
-	rdmsr
-	shlq	$32, %rdx
-	orq	%rdx, %rax
-	ret
-END(rdmsr)
-
-ENTRY(wrmsr)
-	movq	%rdi, %rcx
-	movq	%rsi, %rax
-	movq	%rsi, %rdx
-	shrq	$32, %rdx
-	wrmsr
-	ret
-END(wrmsr)
-
 ENTRY(rdmsr_locked)
 	movq	%rdi, %rcx
 	xorq	%rax, %rax
@@ -324,23 +306,6 @@ ENTRY(rdmsr_safe)
 	ret
 END(rdmsr_safe)
 
-ENTRY(rdxcr)
-	movq	%rdi, %rcx
-	xgetbv
-	shlq	$32, %rdx
-	orq	%rdx, %rax
-	ret
-END(rdxcr)
-
-ENTRY(wrxcr)
-	movq	%rdi, %rcx
-	movq	%rsi, %rax
-	movq	%rsi, %rdx
-	shrq	$32, %rdx
-	xsetbv
-	ret
-END(wrxcr)
-
 /*
  * MSR operations fault handler
  */
@@ -374,14 +339,6 @@ ENTRY(cpu_counter32)
 	ret
 END(cpu_counter32)
 
-ENTRY(rdtsc)
-	xorq	%rax,%rax
-	rdtsc
-	shlq	$32,%rdx
-	orq	%rdx,%rax
-	ret
-END(rdtsc)
-
 ENTRY(breakpoint)
 	pushq	%rbp
 	movq	%rsp, %rbp
@@ -498,11 +455,6 @@ ENTRY(x86_mwait)
 	ret
 END(x86_mwait)
 
-ENTRY(x86_pause)
-	pause
-	ret
-END(x86_pause)
-
 ENTRY(x86_cpuid2)
 	movq	%rbx, %r8
 	movq	%rdi, %rax

Index: src/sys/arch/i386/i386/cpufunc.S
diff -u src/sys/arch/i386/i386/cpufunc.S:1.29 src/sys/arch/i386/i386/cpufunc.S:1.30
--- src/sys/arch/i386/i386/cpufunc.S:1.29	Wed May  1 14:29:15 2019
+++ src/sys/arch/i386/i386/cpufunc.S	Wed May  1 15:17:49 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.S,v 1.29 2019/05/01 14:29:15 maxv Exp $	*/
+/*	$NetBSD: cpufunc.S,v 1.30 2019/05/01 15:17:49 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
 #include <sys/errno.h>
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.29 2019/05/01 14:29:15 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.30 2019/05/01 15:17:49 maxv Exp $");
 
 #include "opt_xen.h"
 
@@ -107,20 +107,6 @@ STRONG_ALIAS(x86_write_psl,x86_write_fla
 STRONG_ALIAS(x86_read_psl,x86_read_flags)
 #endif	/* XENPV */
 
-ENTRY(rdmsr)
-	movl	4(%esp), %ecx
-	rdmsr
-	ret
-END(rdmsr)
-
-ENTRY(wrmsr)
-	movl	4(%esp), %ecx
-	movl	8(%esp), %eax
-	movl	12(%esp), %edx
-	wrmsr
-	ret
-END(wrmsr)
-
 ENTRY(rdmsr_locked)
 	movl	4(%esp), %ecx
 	pushl	%edi
@@ -164,23 +150,6 @@ ENTRY(rdmsr_safe)
 	ret
 END(rdmsr_safe)
 
-/* uint64_t rdxcr(uint32_t) */
-ENTRY(rdxcr)
-	movl	4(%esp), %ecx	/* extended control reg number */
-	xgetbv			/* Read to %edx:%eax */
-	ret
-END(rdxcr)
-
-/* void wrxcr(uint32_t, uint64_t) */
-ENTRY(wrxcr)
-	movl	4(%esp), %ecx	/* extended control reg number */
-	movl	8(%esp), %eax	/* feature mask bits */
-	movl	12(%esp), %edx
-	xsetbv
-	ret
-END(wrxcr)
-	
-
 /*
  * MSR operations fault handler
  */
@@ -205,11 +174,6 @@ ENTRY(cpu_counter32)
 	ret
 END(cpu_counter32)
 
-ENTRY(rdtsc)
-	rdtsc
-	ret
-END(rdtsc)
-
 ENTRY(breakpoint)
 	pushl	%ebp
 	movl	%esp, %ebp
@@ -296,11 +260,6 @@ ENTRY(x86_mwait)  
 	ret
 END(x86_mwait)  
 
-ENTRY(x86_pause)
-	pause
-	ret
-END(x86_pause)
-
 ENTRY(x86_cpuid2)
 	pushl	%ebx
 	pushl	%edi

Index: src/sys/arch/x86/include/cpufunc.h
diff -u src/sys/arch/x86/include/cpufunc.h:1.25 src/sys/arch/x86/include/cpufunc.h:1.26
--- src/sys/arch/x86/include/cpufunc.h:1.25	Wed May  1 14:29:15 2019
+++ src/sys/arch/x86/include/cpufunc.h	Wed May  1 15:17:49 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.h,v 1.25 2019/05/01 14:29:15 maxv Exp $	*/
+/*	$NetBSD: cpufunc.h,v 1.26 2019/05/01 15:17:49 maxv Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
@@ -44,7 +44,12 @@
 
 #ifdef _KERNEL
 
-void	x86_pause(void);
+static inline void
+x86_pause(void)
+{
+	asm volatile ("pause");
+}
+
 void	x86_lfence(void);
 void	x86_sfence(void);
 void	x86_mfence(void);
@@ -56,7 +61,21 @@ void	tlbflushg(void);
 void	invlpg(vaddr_t);
 void	wbinvd(void);
 void	breakpoint(void);
-uint64_t rdtsc(void);
+
+static inline uint64_t
+rdtsc(void)
+{
+	uint32_t low, high;
+
+	asm volatile (
+		"rdtsc"
+		: "=a" (low), "=d" (high)
+		:
+	);
+
+	return (low | ((uint64_t)high << 32));
+}
+
 #ifndef XEN
 void	x86_hotpatch(uint32_t, const uint8_t *, size_t);
 void	x86_patch_window_open(u_long *, u_long *);
@@ -125,8 +144,33 @@ void	x86_ldmxcsr(const uint32_t *);
 void	x86_stmxcsr(uint32_t *);
 void	fldummy(void);
 
-uint64_t rdxcr(uint32_t);
-void	wrxcr(uint32_t, uint64_t);
+static inline uint64_t
+rdxcr(uint32_t xcr)
+{
+	uint32_t low, high;
+
+	asm volatile (
+		"xgetbv"
+		: "=a" (low), "=d" (high)
+		: "c" (xcr)
+	);
+
+	return (low | ((uint64_t)high << 32));
+}
+
+static inline void
+wrxcr(uint32_t xcr, uint64_t val)
+{
+	uint32_t low, high;
+
+	low = val;
+	high = val >> 32;
+	asm volatile (
+		"xsetbv"
+		:
+		: "a" (low), "d" (high), "c" (xcr)
+	);
+}
 
 void	xrstor(const union savefpu *, uint64_t);
 void	xsave(union savefpu *, uint64_t);
@@ -156,10 +200,37 @@ void	x86_reset(void);
 
 #define	OPTERON_MSR_PASSCODE	0x9c5a203aU
 
-uint64_t	rdmsr(u_int);
+static inline uint64_t
+rdmsr(u_int msr)
+{
+	uint32_t low, high;
+
+	asm volatile (
+		"rdmsr"
+		: "=a" (low), "=d" (high)
+		: "c" (msr)
+	);
+
+	return (low | ((uint64_t)high << 32));
+}
+
 uint64_t	rdmsr_locked(u_int);
 int		rdmsr_safe(u_int, uint64_t *);
-void		wrmsr(u_int, uint64_t);
+
+static inline void
+wrmsr(u_int msr, uint64_t val)
+{
+	uint32_t low, high;
+
+	low = val;
+	high = val >> 32;
+	asm volatile (
+		"wrmsr"
+		:
+		: "a" (low), "d" (high), "c" (msr)
+	);
+}
+
 void		wrmsr_locked(u_int, uint64_t);
 
 #endif /* _KERNEL */

Reply via email to