Module Name: src Committed By: dsl Date: Mon May 7 20:51:20 UTC 2012
Modified Files: src/sys/arch/amd64/include: frameasm.h Log Message: Move all the XEN differences to a single conditional. Merge the XEN/non-XEN versions of INTRFASTEXIT and INTR_RECURSE_HWFRAME by using extra defines. Split INTRENTRY so that code can insert extra instructions inside user/kernel conditional. To generate a diff of this commit: cvs rdiff -u -r1.16 -r1.17 src/sys/arch/amd64/include/frameasm.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/amd64/include/frameasm.h diff -u src/sys/arch/amd64/include/frameasm.h:1.16 src/sys/arch/amd64/include/frameasm.h:1.17 --- src/sys/arch/amd64/include/frameasm.h:1.16 Wed Aug 10 06:33:13 2011 +++ src/sys/arch/amd64/include/frameasm.h Mon May 7 20:51:20 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: frameasm.h,v 1.16 2011/08/10 06:33:13 cherry Exp $ */ +/* $NetBSD: frameasm.h,v 1.17 2012/05/07 20:51:20 dsl Exp $ */ #ifndef _AMD64_MACHINE_FRAMEASM_H #define _AMD64_MACHINE_FRAMEASM_H @@ -17,7 +17,23 @@ /* Xen do not need swapgs, done by hypervisor */ #define swapgs #define iretq pushq $0 ; jmp HYPERVISOR_iret -#endif +#define XEN_ONLY2(x,y) x,y +#define NOT_XEN(x) + +#define CLI(temp_reg) \ + movq CPUVAR(VCPU),%r ## temp_reg ; \ + movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg); + +#define STI(temp_reg) \ + movq CPUVAR(VCPU),%r ## temp_reg ; \ + movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg); + +#else /* XEN */ +#define XEN_ONLY2(x,y) +#define NOT_XEN(x) x +#define CLI(temp_reg) cli +#define STI(temp_reg) sti +#endif /* XEN */ /* * These are used on interrupt or trap entry or exit. @@ -57,23 +73,27 @@ movq TF_RBX(%rsp),%rbx ; \ movq TF_RAX(%rsp),%rax -#define INTRENTRY \ +#define INTRENTRY_L(kernel_trap) \ subq $TF_REGSIZE,%rsp ; \ - testq $SEL_UPL,TF_CS(%rsp) ; \ - je 98f ; \ + INTR_SAVE_GPRS ; \ + testb $SEL_UPL,TF_CS(%rsp) ; \ + je kernel_trap ; \ swapgs ; \ movw %gs,TF_GS(%rsp) ; \ movw %fs,TF_FS(%rsp) ; \ movw %es,TF_ES(%rsp) ; \ - movw %ds,TF_DS(%rsp) ; \ -98: INTR_SAVE_GPRS + movw %ds,TF_DS(%rsp) + +#define INTRENTRY \ + INTRENTRY_L(98f) ; \ +98: -#ifndef XEN #define INTRFASTEXIT \ INTR_RESTORE_GPRS ; \ testq $SEL_UPL,TF_CS(%rsp) /* Interrupted %cs */ ; \ je 99f ; \ - cli ; \ +/* XEN: Disabling events before going to user mode sounds like a BAD idea */ \ + NOT_XEN(cli;) \ movw TF_ES(%rsp),%es ; \ movw TF_DS(%rsp),%ds ; \ swapgs ; \ @@ -88,41 +108,15 @@ pushfq ; \ movl %cs,%r11d ; \ pushq %r11 ; \ +/* XEN: We must fixup CS, as even kernel mode runs at CPL 3 */ \ + XEN_ONLY2(andb $0xfc,(%rsp)) \ pushq %r13 ; -#else /* !XEN */ -/* - * Disabling events before going to user mode sounds like a BAD idea - * do no restore gs either, HYPERVISOR_iret will do a swapgs - */ -#define INTRFASTEXIT \ - INTR_RESTORE_GPRS ; \ - testq $SEL_UPL,TF_CS(%rsp) ; \ - je 99f ; \ - movw TF_ES(%rsp),%es ; \ - movw TF_DS(%rsp),%ds ; \ -99: addq $TF_REGSIZE+16,%rsp /* + T_xxx and error code */ ; \ - iretq - -/* We must fixup CS, as even kernel mode runs at CPL 3 */ -#define INTR_RECURSE_HWFRAME \ - movq %rsp,%r10 ; \ - movl %ss,%r11d ; \ - pushq %r11 ; \ - pushq %r10 ; \ - pushfq ; \ - movl %cs,%r11d ; \ - pushq %r11 ; \ - andb $0xfc,(%rsp) ; \ - pushq %r13 ; - -#endif /* !XEN */ - #define DO_DEFERRED_SWITCH \ cmpl $0, CPUVAR(WANT_PMAPLOAD) ; \ jz 1f ; \ call _C_LABEL(do_pmap_load) ; \ - 1: +1: #define CHECK_DEFERRED_SWITCH \ cmpl $0, CPUVAR(WANT_PMAPLOAD) @@ -130,18 +124,4 @@ #define CHECK_ASTPENDING(reg) cmpl $0, L_MD_ASTPENDING(reg) #define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg) -#ifdef XEN -#define CLI(temp_reg) \ - movq CPUVAR(VCPU),%r ## temp_reg ; \ - movb $1,EVTCHN_UPCALL_MASK(%r ## temp_reg); - -#define STI(temp_reg) \ - movq CPUVAR(VCPU),%r ## temp_reg ; \ - movb $0,EVTCHN_UPCALL_MASK(%r ## temp_reg); - -#else /* XEN */ -#define CLI(temp_reg) cli -#define STI(temp_reg) sti -#endif /* XEN */ - #endif /* _AMD64_MACHINE_FRAMEASM_H */