Module Name: src Committed By: bouyer Date: Sun Apr 12 17:25:53 UTC 2020
Modified Files: src/sys/arch/amd64/amd64 [bouyer-xenpvh]: genassym.cf lock_stubs.S spl.S vector.S src/sys/arch/i386/i386 [bouyer-xenpvh]: genassym.cf i386_trap.S locore.S spl.S vector.S src/sys/arch/x86/include [bouyer-xenpvh]: cpu.h intrdefs.h src/sys/arch/x86/isa [bouyer-xenpvh]: isa_machdep.c src/sys/arch/x86/x86 [bouyer-xenpvh]: i8259.c intr.c src/sys/arch/xen/include [bouyer-xenpvh]: hypervisor.h intr.h src/sys/arch/xen/x86 [bouyer-xenpvh]: hypervisor_machdep.c xen_intr.c src/sys/arch/xen/xen [bouyer-xenpvh]: clock.c evtchn.c xenevt.c Log Message: Get rid of xen-specific ci_x* interrupt handling: - use the general SIR mechanism, reserving 3 more slots for IPL_VM, IPL_SCHED and IPL_HIGH - remove specific handling from C sources, or change to ipending - convert IPL number to SIR number in various places - Remove XUNMASK/XPENDING in assembly or change to IUNMASK/IPENDING - remove Xen-specific ci_xsources, ci_xmask, ci_xunmask, ci_xpending from struct cpu_info - for now remove a KASSERT that there are no pending interrupts in idle_block(). We can get there with some software interrupts pending in autoconf XXX needs to be looked at. To generate a diff of this commit: cvs rdiff -u -r1.82.4.2 -r1.82.4.3 src/sys/arch/amd64/amd64/genassym.cf cvs rdiff -u -r1.35.6.1 -r1.35.6.2 src/sys/arch/amd64/amd64/lock_stubs.S cvs rdiff -u -r1.43.4.5 -r1.43.4.6 src/sys/arch/amd64/amd64/spl.S cvs rdiff -u -r1.73.6.2 -r1.73.6.3 src/sys/arch/amd64/amd64/vector.S cvs rdiff -u -r1.119.4.2 -r1.119.4.3 src/sys/arch/i386/i386/genassym.cf cvs rdiff -u -r1.20 -r1.20.6.1 src/sys/arch/i386/i386/i386_trap.S cvs rdiff -u -r1.179.2.1 -r1.179.2.2 src/sys/arch/i386/i386/locore.S cvs rdiff -u -r1.50.4.4 -r1.50.4.5 src/sys/arch/i386/i386/spl.S cvs rdiff -u -r1.85.6.3 -r1.85.6.4 src/sys/arch/i386/i386/vector.S cvs rdiff -u -r1.117.4.3 -r1.117.4.4 src/sys/arch/x86/include/cpu.h cvs rdiff -u -r1.23 -r1.23.6.1 src/sys/arch/x86/include/intrdefs.h cvs rdiff -u -r1.44 -r1.44.10.1 src/sys/arch/x86/isa/isa_machdep.c cvs rdiff -u -r1.23 -r1.23.10.1 src/sys/arch/x86/x86/i8259.c cvs rdiff -u -r1.150.6.1 -r1.150.6.2 src/sys/arch/x86/x86/intr.c cvs rdiff -u -r1.49 -r1.49.10.1 src/sys/arch/xen/include/hypervisor.h cvs rdiff -u -r1.53 -r1.53.6.1 src/sys/arch/xen/include/intr.h cvs rdiff -u -r1.36 -r1.36.8.1 src/sys/arch/xen/x86/hypervisor_machdep.c cvs rdiff -u -r1.21.2.1 -r1.21.2.2 src/sys/arch/xen/x86/xen_intr.c cvs rdiff -u -r1.80.6.1 -r1.80.6.2 src/sys/arch/xen/xen/clock.c cvs rdiff -u -r1.88.2.2 -r1.88.2.3 src/sys/arch/xen/xen/evtchn.c cvs rdiff -u -r1.56 -r1.56.2.1 src/sys/arch/xen/xen/xenevt.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/amd64/amd64/genassym.cf diff -u src/sys/arch/amd64/amd64/genassym.cf:1.82.4.2 src/sys/arch/amd64/amd64/genassym.cf:1.82.4.3 --- src/sys/arch/amd64/amd64/genassym.cf:1.82.4.2 Sat Apr 11 10:11:30 2020 +++ src/sys/arch/amd64/amd64/genassym.cf Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -# $NetBSD: genassym.cf,v 1.82.4.2 2020/04/11 10:11:30 bouyer Exp $ +# $NetBSD: genassym.cf,v 1.82.4.3 2020/04/12 17:25:52 bouyer Exp $ # # Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc. @@ -323,6 +323,8 @@ define IPL_NONE IPL_NONE define IPL_PREEMPT IPL_PREEMPT define IPL_NET IPL_NET define IPL_CLOCK IPL_CLOCK +define IPL_VM IPL_VM +define IPL_SCHED IPL_SCHED define IPL_HIGH IPL_HIGH define LIR_IPI LIR_IPI @@ -362,10 +364,9 @@ define VM_GUEST_XENPV VM_GUEST_XENPV ifdef XEN define CPU_INFO_VCPU offsetof(struct cpu_info, ci_vcpu) -define CPU_INFO_XPENDING offsetof(struct cpu_info, ci_xpending) -define CPU_INFO_XMASK offsetof(struct cpu_info, ci_xmask) -define CPU_INFO_XUNMASK offsetof(struct cpu_info, ci_xunmask) -define CPU_INFO_XSOURCES offsetof(struct cpu_info, ci_xsources) +define SIR_XENIPL_VM SIR_XENIPL_VM +define SIR_XENIPL_SCHED SIR_XENIPL_SCHED +define SIR_XENIPL_HIGH SIR_XENIPL_HIGH define EVTCHN_UPCALL_MASK offsetof(struct vcpu_info, evtchn_upcall_mask) ifdef XENPV define XEN_PT_BASE offsetof(struct start_info, pt_base) Index: src/sys/arch/amd64/amd64/lock_stubs.S diff -u src/sys/arch/amd64/amd64/lock_stubs.S:1.35.6.1 src/sys/arch/amd64/amd64/lock_stubs.S:1.35.6.2 --- src/sys/arch/amd64/amd64/lock_stubs.S:1.35.6.1 Sat Apr 11 18:26:06 2020 +++ src/sys/arch/amd64/amd64/lock_stubs.S Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: lock_stubs.S,v 1.35.6.1 2020/04/11 18:26:06 bouyer Exp $ */ +/* $NetBSD: lock_stubs.S,v 1.35.6.2 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. @@ -130,11 +130,6 @@ ENTRY(mutex_spin_exit) CLI(ax) testl CPU_INFO_IPENDING(%r8), %esi jnz _C_LABEL(Xspllower) -#if defined(XEN) - movl CPU_INFO_XUNMASK(%r8,%rdi,4), %esi - testl CPU_INFO_XPENDING(%r8), %esi - jnz _C_LABEL(Xspllower) -#endif movl %edi, CPU_INFO_ILEVEL(%r8) STI(ax) 1: rep /* double byte ret as branch */ @@ -158,14 +153,6 @@ ENTRY(mutex_spin_exit) movl %eax,%ebx cmpxchg8b CPU_INFO_ISTATE(%rsi) /* swap in new ilevel */ jnz 4f -#if defined(XEN) - movl CPU_INFO_XPENDING(%rsi),%eax - testl %eax,CPU_INFO_XUNMASK(%rsi,%rcx,4)/* deferred interrupts? */ - jnz 3f - movl %edx, %eax - cmpxchgl %ecx, CPU_INFO_ILEVEL(%rsi) - jnz 4f -#endif 2: popq %rbx ret Index: src/sys/arch/amd64/amd64/spl.S diff -u src/sys/arch/amd64/amd64/spl.S:1.43.4.5 src/sys/arch/amd64/amd64/spl.S:1.43.4.6 --- src/sys/arch/amd64/amd64/spl.S:1.43.4.5 Sat Apr 11 18:26:06 2020 +++ src/sys/arch/amd64/amd64/spl.S Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: spl.S,v 1.43.4.5 2020/04/11 18:26:06 bouyer Exp $ */ +/* $NetBSD: spl.S,v 1.43.4.6 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright (c) 2003 Wasabi Systems, Inc. @@ -239,11 +239,6 @@ ENTRY(spllower) CLI(ax) testl CPUVAR(IPENDING),%edx jnz 2f -#if defined(XEN) - movl CPUVAR(XUNMASK)(,%rdi,4),%edx - testl CPUVAR(XPENDING),%edx - jnz 2f -#endif movl %edi,CPUVAR(ILEVEL) POPF /* clobbers %rdi */ 1: @@ -344,18 +339,6 @@ IDTVEC(spllower) movq CPUVAR(ISOURCES)(,%rax,8),%rax jmp *IS_RECURSE(%rax) 2: -#if defined(XEN) - movl %ebx,%eax /* get cpl */ - movl CPUVAR(XUNMASK)(,%rax,4),%eax - CLI(si) - andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */ - jz 3f - bsrl %eax,%eax - btrl %eax,CPUVAR(XPENDING) - movq CPUVAR(XSOURCES)(,%rax,8),%rax - jmp *IS_RECURSE(%rax) -#endif -3: movl %ebx,CPUVAR(ILEVEL) STI(si) popq %r12 @@ -387,19 +370,7 @@ IDTVEC(doreti) btrl %eax,CPUVAR(IPENDING) movq CPUVAR(ISOURCES)(,%rax,8),%rax jmp *IS_RESUME(%rax) -2: -#if defined(XEN) - movl %ebx,%eax - movl CPUVAR(XUNMASK)(,%rax,4),%eax - CLI(si) - andl CPUVAR(XPENDING),%eax - jz 3f - bsrl %eax,%eax /* slow, but not worth optimizing */ - btrl %eax,CPUVAR(XPENDING) - movq CPUVAR(XSOURCES)(,%rax,8),%rax - jmp *IS_RESUME(%rax) -#endif -3: /* Check for ASTs on exit to user mode. */ +2: /* Check for ASTs on exit to user mode. */ movl %ebx,CPUVAR(ILEVEL) 5: testb $SEL_RPL,TF_CS(%rsp) Index: src/sys/arch/amd64/amd64/vector.S diff -u src/sys/arch/amd64/amd64/vector.S:1.73.6.2 src/sys/arch/amd64/amd64/vector.S:1.73.6.3 --- src/sys/arch/amd64/amd64/vector.S:1.73.6.2 Sat Apr 11 11:56:51 2020 +++ src/sys/arch/amd64/amd64/vector.S Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: vector.S,v 1.73.6.2 2020/04/11 11:56:51 bouyer Exp $ */ +/* $NetBSD: vector.S,v 1.73.6.3 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. @@ -693,19 +693,19 @@ END(name ## _stubs) #if defined(XEN) /* Resume/recurse procedures for spl() */ -#define XENINTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \ -IDTVEC(recurse_ ## name ## num) ;\ +#define XENINTRSTUB(name, sir, level, unmask) \ +IDTVEC(recurse_ ## name ## sir) ;\ INTR_RECURSE_HWFRAME ;\ subq $8,%rsp ;\ pushq $T_ASTFLT /* trap # for doing ASTs */ ;\ INTR_RECURSE_ENTRY ;\ -IDTVEC(resume_ ## name ## num) \ +IDTVEC(resume_ ## name ## sir) \ movq $IREENT_MAGIC,TF_ERR(%rsp) ;\ movl %ebx,%r13d ;\ - movq CPUVAR(XSOURCES) + (num) * 8,%r14 ;\ + movq CPUVAR(ISOURCES) + (sir) * 8,%r14 ;\ 1: \ pushq %r13 ;\ - movl $num,CPUVAR(ILEVEL) ;\ + movl $level,CPUVAR(ILEVEL) ;\ STI(si) ;\ incl CPUVAR(IDEPTH) ;\ movq IS_HANDLERS(%r14),%rbx ;\ @@ -718,48 +718,18 @@ IDTVEC(resume_ ## name ## num) \ jnz 6b ;\ 5: \ CLI(si) ;\ - unmask(num) /* unmask it in hardware */ ;\ - late_ack(num) ;\ + unmask(sir) /* unmask it in hardware */ ;\ STI(si) ;\ jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\ /* The unmask func for Xen events */ -#define hypervisor_asm_unmask(num) \ - movq $num,%rdi ;\ - call _C_LABEL(hypervisor_enable_ipl) - -XENINTRSTUB(xenev,0,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,1,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,2,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,3,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,4,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,5,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,6,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,7,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,8,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,9,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,10,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,11,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,12,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,13,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,14,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,15,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,16,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,17,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,18,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,19,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,20,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,21,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,22,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,23,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,24,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,25,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,26,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,27,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,28,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,29,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,30,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,31,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) +#define hypervisor_asm_unmask(sir) \ + movq $sir,%rdi ;\ + call _C_LABEL(hypervisor_enable_sir) + +XENINTRSTUB(xenev,SIR_XENIPL_VM,IPL_VM,hypervisor_asm_unmask) +XENINTRSTUB(xenev,SIR_XENIPL_SCHED,IPL_SCHED,hypervisor_asm_unmask) +XENINTRSTUB(xenev,SIR_XENIPL_HIGH,IPL_HIGH,hypervisor_asm_unmask) /* On Xen, the xenev_stubs are purely for spl entry, since there is no * vector based mechanism. We however provide the entrypoint to ensure @@ -771,39 +741,14 @@ LABEL(entry_xenev) callq _C_LABEL(panic) END(entry_xenev) +#define XENINTRSTUB_ENTRY(name, sir) \ + .quad entry_xenev , _C_LABEL(Xrecurse_ ## name ## sir); \ + .quad _C_LABEL(Xresume_ ## name ## sir); + LABEL(xenev_stubs) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev0), _C_LABEL(Xresume_xenev0) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev1) ,_C_LABEL(Xresume_xenev1) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev2) ,_C_LABEL(Xresume_xenev2) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev3) ,_C_LABEL(Xresume_xenev3) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev4) ,_C_LABEL(Xresume_xenev4) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev5) ,_C_LABEL(Xresume_xenev5) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev6) ,_C_LABEL(Xresume_xenev6) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev7) ,_C_LABEL(Xresume_xenev7) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev8) ,_C_LABEL(Xresume_xenev8) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev9) ,_C_LABEL(Xresume_xenev9) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev10), _C_LABEL(Xresume_xenev10) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev11), _C_LABEL(Xresume_xenev11) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev12), _C_LABEL(Xresume_xenev12) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev13), _C_LABEL(Xresume_xenev13) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev14), _C_LABEL(Xresume_xenev14) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev15), _C_LABEL(Xresume_xenev15) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev16), _C_LABEL(Xresume_xenev16) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev17), _C_LABEL(Xresume_xenev17) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev18), _C_LABEL(Xresume_xenev18) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev19), _C_LABEL(Xresume_xenev19) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev20), _C_LABEL(Xresume_xenev20) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev21), _C_LABEL(Xresume_xenev21) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev22), _C_LABEL(Xresume_xenev22) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev23), _C_LABEL(Xresume_xenev23) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev24), _C_LABEL(Xresume_xenev24) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev25), _C_LABEL(Xresume_xenev25) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev26), _C_LABEL(Xresume_xenev26) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev27), _C_LABEL(Xresume_xenev27) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev28), _C_LABEL(Xresume_xenev28) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev29), _C_LABEL(Xresume_xenev29) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev30), _C_LABEL(Xresume_xenev30) - .quad entry_xenev, _C_LABEL(Xrecurse_xenev31), _C_LABEL(Xresume_xenev31) + XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_VM) ; + XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_SCHED) ; + XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_HIGH) ; END(xenev_stubs) /* Index: src/sys/arch/i386/i386/genassym.cf diff -u src/sys/arch/i386/i386/genassym.cf:1.119.4.2 src/sys/arch/i386/i386/genassym.cf:1.119.4.3 --- src/sys/arch/i386/i386/genassym.cf:1.119.4.2 Sat Apr 11 10:11:31 2020 +++ src/sys/arch/i386/i386/genassym.cf Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -# $NetBSD: genassym.cf,v 1.119.4.2 2020/04/11 10:11:31 bouyer Exp $ +# $NetBSD: genassym.cf,v 1.119.4.3 2020/04/12 17:25:52 bouyer Exp $ # # Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc. @@ -324,6 +324,8 @@ define IPL_PREEMPT IPL_PREEMPT define IPL_NET IPL_NET define IPL_SCHED IPL_SCHED define IPL_CLOCK IPL_CLOCK +define IPL_VM IPL_VM +define IPL_SCHED IPL_SCHED define IPL_HIGH IPL_HIGH define IPL_SOFTNET IPL_SOFTNET @@ -376,10 +378,9 @@ define VM_GUEST_XENPV VM_GUEST_XENPV ifdef XEN define CPU_INFO_VCPU offsetof(struct cpu_info, ci_vcpu) -define CPU_INFO_XPENDING offsetof(struct cpu_info, ci_xpending) -define CPU_INFO_XMASK offsetof(struct cpu_info, ci_xmask) -define CPU_INFO_XUNMASK offsetof(struct cpu_info, ci_xunmask) -define CPU_INFO_XSOURCES offsetof(struct cpu_info, ci_xsources) +define SIR_XENIPL_VM SIR_XENIPL_VM +define SIR_XENIPL_SCHED SIR_XENIPL_SCHED +define SIR_XENIPL_HIGH SIR_XENIPL_HIGH define START_INFO_SHARED_INFO offsetof(struct start_info, shared_info) define START_INFO_FLAGS offsetof(struct start_info, flags) define START_INFO_CONSOLE_MFN offsetof(struct start_info, console.domU.mfn) Index: src/sys/arch/i386/i386/i386_trap.S diff -u src/sys/arch/i386/i386/i386_trap.S:1.20 src/sys/arch/i386/i386/i386_trap.S:1.20.6.1 --- src/sys/arch/i386/i386/i386_trap.S:1.20 Sat Oct 12 06:31:03 2019 +++ src/sys/arch/i386/i386/i386_trap.S Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: i386_trap.S,v 1.20 2019/10/12 06:31:03 maxv Exp $ */ +/* $NetBSD: i386_trap.S,v 1.20.6.1 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright 2002 (c) Wasabi Systems, Inc. @@ -66,7 +66,7 @@ #if 0 #include <machine/asm.h> -__KERNEL_RCSID(0, "$NetBSD: i386_trap.S,v 1.20 2019/10/12 06:31:03 maxv Exp $"); +__KERNEL_RCSID(0, "$NetBSD: i386_trap.S,v 1.20.6.1 2020/04/12 17:25:52 bouyer Exp $"); #endif /* @@ -446,12 +446,12 @@ calltrap: movl $.Lalltraps_resume,%esi /* address to resume loop at */ .Lalltraps_resume: movl %ebx,%eax /* get cpl */ - movl CPUVAR(XUNMASK)(,%eax,4),%eax - andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */ + movl CPUVAR(IUNMASK)(,%eax,4),%eax + andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */ jz 11f bsrl %eax,%eax - btrl %eax,CPUVAR(XPENDING) - movl CPUVAR(XSOURCES)(,%eax,4),%eax + btrl %eax,CPUVAR(IPENDING) + movl CPUVAR(ISOURCES)(,%eax,4),%eax jmp *IS_RESUME(%eax) 11: movl %ebx,CPUVAR(ILEVEL) /* restore cpl */ jmp .Lalltraps_checkusr Index: src/sys/arch/i386/i386/locore.S diff -u src/sys/arch/i386/i386/locore.S:1.179.2.1 src/sys/arch/i386/i386/locore.S:1.179.2.2 --- src/sys/arch/i386/i386/locore.S:1.179.2.1 Wed Apr 8 17:59:16 2020 +++ src/sys/arch/i386/i386/locore.S Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: locore.S,v 1.179.2.1 2020/04/08 17:59:16 bouyer Exp $ */ +/* $NetBSD: locore.S,v 1.179.2.2 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright-o-rama! @@ -128,7 +128,7 @@ */ #include <machine/asm.h> -__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.179.2.1 2020/04/08 17:59:16 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.179.2.2 2020/04/12 17:25:52 bouyer Exp $"); #include "opt_copy_symtab.h" #include "opt_ddb.h" @@ -1490,12 +1490,12 @@ IDTVEC(syscall) movl $.Lsyscall_resume, %esi /* address to resume loop at */ .Lsyscall_resume: movl %ebx,%eax /* get cpl */ - movl CPUVAR(XUNMASK)(,%eax,4),%eax - andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */ + movl CPUVAR(IUNMASK)(,%eax,4),%eax + andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */ jz 17f bsrl %eax,%eax - btrl %eax,CPUVAR(XPENDING) - movl CPUVAR(XSOURCES)(,%eax,4),%eax + btrl %eax,CPUVAR(IPENDING) + movl CPUVAR(ISOURCES)(,%eax,4),%eax jmp *IS_RESUME(%eax) 17: movl %ebx, CPUVAR(ILEVEL) /* restore cpl */ jmp .Lsyscall_checkast Index: src/sys/arch/i386/i386/spl.S diff -u src/sys/arch/i386/i386/spl.S:1.50.4.4 src/sys/arch/i386/i386/spl.S:1.50.4.5 --- src/sys/arch/i386/i386/spl.S:1.50.4.4 Sat Apr 11 18:26:07 2020 +++ src/sys/arch/i386/i386/spl.S Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: spl.S,v 1.50.4.4 2020/04/11 18:26:07 bouyer Exp $ */ +/* $NetBSD: spl.S,v 1.50.4.5 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <machine/asm.h> -__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.50.4.4 2020/04/11 18:26:07 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.50.4.5 2020/04/12 17:25:52 bouyer Exp $"); #include "opt_ddb.h" #include "opt_spldebug.h" @@ -92,11 +92,6 @@ ENTRY(spllower) CLI(%eax) testl CPUVAR(IPENDING),%edx jnz 2f -#if defined(XEN) - movl CPUVAR(XUNMASK)(,%ecx,4),%edx - testl CPUVAR(XPENDING),%edx - jnz 2f -#endif movl %ecx,CPUVAR(ILEVEL) POPF(%eax) 1: @@ -222,17 +217,6 @@ IDTVEC(spllower) movl CPUVAR(ISOURCES)(,%eax,4),%eax jmp *IS_RECURSE(%eax) 2: -#if defined(XEN) - movl %ebx,%eax /* get cpl */ - movl CPUVAR(XUNMASK)(,%eax,4),%eax - andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */ - jz 3f - bsrl %eax,%eax - btrl %eax,CPUVAR(XPENDING) - movl CPUVAR(XSOURCES)(,%eax,4),%eax - jmp *IS_RECURSE(%eax) -#endif -3: movl %ebx,CPUVAR(ILEVEL) #ifdef XENPV STIC(%eax) @@ -296,18 +280,7 @@ IDTVEC(doreti) btrl %eax,CPUVAR(IPENDING) movl CPUVAR(ISOURCES)(,%eax, 4),%eax jmp *IS_RESUME(%eax) -2: -#if defined(XEN) - movl %ebx,%eax - movl CPUVAR(XUNMASK)(,%eax,4),%eax - andl CPUVAR(XPENDING),%eax - jz 3f - bsrl %eax,%eax /* slow, but not worth optimizing */ - btrl %eax,CPUVAR(XPENDING) - movl CPUVAR(XSOURCES)(,%eax, 4),%eax - jmp *IS_RESUME(%eax) -#endif -3: /* Check for ASTs on exit to user mode. */ +2: /* Check for ASTs on exit to user mode. */ movl %ebx,CPUVAR(ILEVEL) 5: testb $CHK_UPL,TF_CS(%esp) Index: src/sys/arch/i386/i386/vector.S diff -u src/sys/arch/i386/i386/vector.S:1.85.6.3 src/sys/arch/i386/i386/vector.S:1.85.6.4 --- src/sys/arch/i386/i386/vector.S:1.85.6.3 Sun Apr 12 16:35:49 2020 +++ src/sys/arch/i386/i386/vector.S Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: vector.S,v 1.85.6.3 2020/04/12 16:35:49 bouyer Exp $ */ +/* $NetBSD: vector.S,v 1.85.6.4 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright 2002 (c) Wasabi Systems, Inc. @@ -65,7 +65,7 @@ */ #include <machine/asm.h> -__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.85.6.3 2020/04/12 16:35:49 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.85.6.4 2020/04/12 17:25:52 bouyer Exp $"); #include "opt_ddb.h" #include "opt_multiprocessor.h" @@ -940,17 +940,17 @@ END(x2apic_level_stubs) #if defined(XEN) #define voidop(num) -#define XENINTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \ -IDTVEC(recurse_ ## name ## num) ;\ +#define XENINTRSTUB(name, sir, level, unmask) \ +IDTVEC(recurse_ ## name ## sir) ;\ INTR_RECURSE_HWFRAME ;\ subl $4,%esp ;\ pushl $T_ASTFLT /* trap # for doing ASTs */ ;\ INTRENTRY ;\ -IDTVEC(resume_ ## name ## num) \ +IDTVEC(resume_ ## name ## sir) \ movl $IREENT_MAGIC,TF_ERR(%esp) ;\ pushl %ebx ;\ - movl CPUVAR(XSOURCES) + (num) * 4,%ebp ;\ - movl $num,CPUVAR(ILEVEL) ;\ + movl CPUVAR(ISOURCES) + (sir) * 4,%ebp ;\ + movl $level,CPUVAR(ILEVEL) ;\ IDEPTH_INCR /* leaves old %esp on stack */ ;\ STI(%eax) ;\ movl IS_HANDLERS(%ebp),%ebx ;\ @@ -963,8 +963,7 @@ IDTVEC(resume_ ## name ## num) \ jnz 6b ;\ \ CLI(%eax) ;\ - unmask(num) /* unmask it in hardware */ ;\ - late_ack(num) ;\ + unmask(sir) /* unmask it in hardware */ ;\ jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\ /* @@ -972,43 +971,14 @@ IDTVEC(resume_ ## name ## num) \ * reassert the event pending bit if needed. For now just call * the C function doing it, maybe rewrite in inline assembly ? */ -#define hypervisor_asm_unmask(num) \ - pushl $num ;\ - call _C_LABEL(hypervisor_enable_ipl) ;\ +#define hypervisor_asm_unmask(sir) \ + pushl $sir ;\ + call _C_LABEL(hypervisor_enable_sir) ;\ addl $4,%esp -XENINTRSTUB(xenev,0,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,1,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,2,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,3,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,4,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,5,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,6,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,7,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,8,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,9,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,10,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,11,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,12,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,13,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,14,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,15,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,16,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,17,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,18,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,19,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,20,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,21,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,22,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,23,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,24,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,25,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,26,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,27,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,28,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,29,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,30,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) -XENINTRSTUB(xenev,31,voidop,voidop,voidop,hypervisor_asm_unmask,voidop) +XENINTRSTUB(xenev,SIR_XENIPL_VM,IPL_VM,hypervisor_asm_unmask) +XENINTRSTUB(xenev,SIR_XENIPL_SCHED,IPL_SCHED,hypervisor_asm_unmask) +XENINTRSTUB(xenev,SIR_XENIPL_HIGH,IPL_HIGH,hypervisor_asm_unmask) /* On Xen, the xenev_stubs are purely for spl entry, since there is no * vector based mechanism. We however provide the entrypoint to ensure @@ -1020,40 +990,15 @@ LABEL(entry_xenev) call _C_LABEL(panic) END(entry_xenev) +#define XENINTRSTUB_ENTRY(name, sir) \ + .long entry_xenev , _C_LABEL(Xrecurse_ ## name ## sir); \ + .long _C_LABEL(Xresume_ ## name ## sir); + .type _C_LABEL(xenev_stubs), @object LABEL(xenev_stubs) - .long entry_xenev, _C_LABEL(Xrecurse_xenev0), _C_LABEL(Xresume_xenev0) - .long entry_xenev, _C_LABEL(Xrecurse_xenev1) ,_C_LABEL(Xresume_xenev1) - .long entry_xenev, _C_LABEL(Xrecurse_xenev2) ,_C_LABEL(Xresume_xenev2) - .long entry_xenev, _C_LABEL(Xrecurse_xenev3) ,_C_LABEL(Xresume_xenev3) - .long entry_xenev, _C_LABEL(Xrecurse_xenev4) ,_C_LABEL(Xresume_xenev4) - .long entry_xenev, _C_LABEL(Xrecurse_xenev5) ,_C_LABEL(Xresume_xenev5) - .long entry_xenev, _C_LABEL(Xrecurse_xenev6) ,_C_LABEL(Xresume_xenev6) - .long entry_xenev, _C_LABEL(Xrecurse_xenev7) ,_C_LABEL(Xresume_xenev7) - .long entry_xenev, _C_LABEL(Xrecurse_xenev8) ,_C_LABEL(Xresume_xenev8) - .long entry_xenev, _C_LABEL(Xrecurse_xenev9) ,_C_LABEL(Xresume_xenev9) - .long entry_xenev, _C_LABEL(Xrecurse_xenev10), _C_LABEL(Xresume_xenev10) - .long entry_xenev, _C_LABEL(Xrecurse_xenev11), _C_LABEL(Xresume_xenev11) - .long entry_xenev, _C_LABEL(Xrecurse_xenev12), _C_LABEL(Xresume_xenev12) - .long entry_xenev, _C_LABEL(Xrecurse_xenev13), _C_LABEL(Xresume_xenev13) - .long entry_xenev, _C_LABEL(Xrecurse_xenev14), _C_LABEL(Xresume_xenev14) - .long entry_xenev, _C_LABEL(Xrecurse_xenev15), _C_LABEL(Xresume_xenev15) - .long entry_xenev, _C_LABEL(Xrecurse_xenev16), _C_LABEL(Xresume_xenev16) - .long entry_xenev, _C_LABEL(Xrecurse_xenev17), _C_LABEL(Xresume_xenev17) - .long entry_xenev, _C_LABEL(Xrecurse_xenev18), _C_LABEL(Xresume_xenev18) - .long entry_xenev, _C_LABEL(Xrecurse_xenev19), _C_LABEL(Xresume_xenev19) - .long entry_xenev, _C_LABEL(Xrecurse_xenev20), _C_LABEL(Xresume_xenev20) - .long entry_xenev, _C_LABEL(Xrecurse_xenev21), _C_LABEL(Xresume_xenev21) - .long entry_xenev, _C_LABEL(Xrecurse_xenev22), _C_LABEL(Xresume_xenev22) - .long entry_xenev, _C_LABEL(Xrecurse_xenev23), _C_LABEL(Xresume_xenev23) - .long entry_xenev, _C_LABEL(Xrecurse_xenev24), _C_LABEL(Xresume_xenev24) - .long entry_xenev, _C_LABEL(Xrecurse_xenev25), _C_LABEL(Xresume_xenev25) - .long entry_xenev, _C_LABEL(Xrecurse_xenev26), _C_LABEL(Xresume_xenev26) - .long entry_xenev, _C_LABEL(Xrecurse_xenev27), _C_LABEL(Xresume_xenev27) - .long entry_xenev, _C_LABEL(Xrecurse_xenev28), _C_LABEL(Xresume_xenev28) - .long entry_xenev, _C_LABEL(Xrecurse_xenev29), _C_LABEL(Xresume_xenev29) - .long entry_xenev, _C_LABEL(Xrecurse_xenev30), _C_LABEL(Xresume_xenev30) - .long entry_xenev, _C_LABEL(Xrecurse_xenev31), _C_LABEL(Xresume_xenev31) + XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_VM) ; + XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_SCHED) ; + XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_HIGH) ; END(xenev_stubs) #endif /* XEN */ Index: src/sys/arch/x86/include/cpu.h diff -u src/sys/arch/x86/include/cpu.h:1.117.4.3 src/sys/arch/x86/include/cpu.h:1.117.4.4 --- src/sys/arch/x86/include/cpu.h:1.117.4.3 Sat Apr 11 10:11:31 2020 +++ src/sys/arch/x86/include/cpu.h Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: cpu.h,v 1.117.4.3 2020/04/11 10:11:31 bouyer Exp $ */ +/* $NetBSD: cpu.h,v 1.117.4.4 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright (c) 1990 The Regents of the University of California. @@ -133,12 +133,6 @@ struct cpu_info { int ci_kfpu_spl; struct intrsource *ci_isources[MAX_INTR_SOURCES]; -#if defined(XEN) - struct intrsource *ci_xsources[NIPL]; - uint32_t ci_xmask[NIPL]; - uint32_t ci_xunmask[NIPL]; - uint32_t ci_xpending; /* XEN doesn't use the cmpxchg8 path */ -#endif volatile int ci_mtx_count; /* Negative count of spin mutexes */ volatile int ci_mtx_oldspl; /* Old SPL at this ci_idepth */ Index: src/sys/arch/x86/include/intrdefs.h diff -u src/sys/arch/x86/include/intrdefs.h:1.23 src/sys/arch/x86/include/intrdefs.h:1.23.6.1 --- src/sys/arch/x86/include/intrdefs.h:1.23 Sat Nov 23 19:40:37 2019 +++ src/sys/arch/x86/include/intrdefs.h Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: intrdefs.h,v 1.23 2019/11/23 19:40:37 ad Exp $ */ +/* $NetBSD: intrdefs.h,v 1.23.6.1 2020/04/12 17:25:52 bouyer Exp $ */ #ifndef _X86_INTRDEFS_H_ #define _X86_INTRDEFS_H_ @@ -40,6 +40,11 @@ #define SIR_BIO 26 #define SIR_CLOCK 25 #define SIR_PREEMPT 24 +#define SIR_XENIPL_HIGH 23 +#define SIR_XENIPL_SCHED 22 +#define SIR_XENIPL_VM 21 + +#define XEN_IPL2SIR(ipl) ((ipl) + (SIR_XENIPL_VM - IPL_VM)) /* * Maximum # of interrupt sources per CPU. 32 to fit in one word. Index: src/sys/arch/x86/isa/isa_machdep.c diff -u src/sys/arch/x86/isa/isa_machdep.c:1.44 src/sys/arch/x86/isa/isa_machdep.c:1.44.10.1 --- src/sys/arch/x86/isa/isa_machdep.c:1.44 Mon Feb 11 14:59:33 2019 +++ src/sys/arch/x86/isa/isa_machdep.c Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: isa_machdep.c,v 1.44 2019/02/11 14:59:33 cherry Exp $ */ +/* $NetBSD: isa_machdep.c,v 1.44.10.1 2020/04/12 17:25:52 bouyer Exp $ */ /*- * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. @@ -65,7 +65,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: isa_machdep.c,v 1.44 2019/02/11 14:59:33 cherry Exp $"); +__KERNEL_RCSID(0, "$NetBSD: isa_machdep.c,v 1.44.10.1 2020/04/12 17:25:52 bouyer Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -142,11 +142,7 @@ isa_intr_alloc(isa_chipset_tag_t ic, int for (i = 0; i < NUM_LEGACY_IRQS; i++) { if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0) continue; -#if !defined(XENPV) isp = ci->ci_isources[i]; -#else - isp = ci->ci_xsources[i]; -#endif if (isp == NULL) { /* if nothing's using the irq, just return it */ *irq = i; Index: src/sys/arch/x86/x86/i8259.c diff -u src/sys/arch/x86/x86/i8259.c:1.23 src/sys/arch/x86/x86/i8259.c:1.23.10.1 --- src/sys/arch/x86/x86/i8259.c:1.23 Mon Feb 11 14:59:33 2019 +++ src/sys/arch/x86/x86/i8259.c Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: i8259.c,v 1.23 2019/02/11 14:59:33 cherry Exp $ */ +/* $NetBSD: i8259.c,v 1.23.10.1 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright 2002 (c) Wasabi Systems, Inc. @@ -70,7 +70,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: i8259.c,v 1.23 2019/02/11 14:59:33 cherry Exp $"); +__KERNEL_RCSID(0, "$NetBSD: i8259.c,v 1.23.10.1 2020/04/12 17:25:52 bouyer Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -233,21 +233,13 @@ i8259_reinit_irqs(void) { int irqs, irq; struct cpu_info *ci = &cpu_info_primary; -#if !defined(XENPV) const size_t array_count = __arraycount(ci->ci_isources); -#else - const size_t array_count = __arraycount(ci->ci_xsources); -#endif const size_t array_len = MIN(array_count, NUM_LEGACY_IRQS); irqs = 0; for (irq = 0; irq < array_len; irq++) -#if !defined(XENPV) if (ci->ci_isources[irq] != NULL) -#else - if (ci->ci_xsources[irq] != NULL) -#endif irqs |= 1 << irq; if (irqs >= 0x100) /* any IRQs >= 8 in use */ irqs |= 1 << IRQ_SLAVE; Index: src/sys/arch/x86/x86/intr.c diff -u src/sys/arch/x86/x86/intr.c:1.150.6.1 src/sys/arch/x86/x86/intr.c:1.150.6.2 --- src/sys/arch/x86/x86/intr.c:1.150.6.1 Sat Apr 11 18:26:07 2020 +++ src/sys/arch/x86/x86/intr.c Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: intr.c,v 1.150.6.1 2020/04/11 18:26:07 bouyer Exp $ */ +/* $NetBSD: intr.c,v 1.150.6.2 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. @@ -133,7 +133,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.150.6.1 2020/04/11 18:26:07 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.150.6.2 2020/04/12 17:25:52 bouyer Exp $"); #include "opt_intrdebug.h" #include "opt_multiprocessor.h" @@ -1372,13 +1372,6 @@ cpu_intr_init(struct cpu_info *ci) #endif ci->ci_idepth = -1; - -#ifdef XENPVHVM - ci->ci_xunmask[0] = 0xfffffffe; - for (int i = 1; i < NIPL; i++) - ci->ci_xunmask[i] = ci->ci_xunmask[i - 1] & ~(1 << i); -#endif - } #if defined(INTRDEBUG) || defined(DDB) Index: src/sys/arch/xen/include/hypervisor.h diff -u src/sys/arch/xen/include/hypervisor.h:1.49 src/sys/arch/xen/include/hypervisor.h:1.49.10.1 --- src/sys/arch/xen/include/hypervisor.h:1.49 Mon Feb 4 18:14:53 2019 +++ src/sys/arch/xen/include/hypervisor.h Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: hypervisor.h,v 1.49 2019/02/04 18:14:53 cherry Exp $ */ +/* $NetBSD: hypervisor.h,v 1.49.10.1 2020/04/12 17:25:52 bouyer Exp $ */ /* * Copyright (c) 2006 Manuel Bouyer. @@ -170,7 +170,7 @@ void hypervisor_send_event(struct cpu_in void hypervisor_unmask_event(unsigned int); void hypervisor_mask_event(unsigned int); void hypervisor_clear_event(unsigned int); -void hypervisor_enable_ipl(unsigned int); +void hypervisor_enable_sir(unsigned int); void hypervisor_set_ipending(uint32_t, int, int); void hypervisor_machdep_attach(void); void hypervisor_machdep_resume(void); Index: src/sys/arch/xen/include/intr.h diff -u src/sys/arch/xen/include/intr.h:1.53 src/sys/arch/xen/include/intr.h:1.53.6.1 --- src/sys/arch/xen/include/intr.h:1.53 Mon Dec 23 13:35:37 2019 +++ src/sys/arch/xen/include/intr.h Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: intr.h,v 1.53 2019/12/23 13:35:37 thorpej Exp $ */ +/* $NetBSD: intr.h,v 1.53.6.1 2020/04/12 17:25:52 bouyer Exp $ */ /* NetBSD intr.h,v 1.15 2004/10/31 10:39:34 yamt Exp */ /*- @@ -62,9 +62,6 @@ struct evtsource { char ev_xname[64]; /* handler device list */ }; -#define XMASK(ci,level) (ci)->ci_xmask[(level)] -#define XUNMASK(ci,level) (ci)->ci_xunmask[(level)] - extern struct intrstub xenev_stubs[]; extern int irq2port[NR_EVENT_CHANNELS]; /* actually port + 1, so that 0 is invaid */ Index: src/sys/arch/xen/x86/hypervisor_machdep.c diff -u src/sys/arch/xen/x86/hypervisor_machdep.c:1.36 src/sys/arch/xen/x86/hypervisor_machdep.c:1.36.8.1 --- src/sys/arch/xen/x86/hypervisor_machdep.c:1.36 Thu May 9 17:09:51 2019 +++ src/sys/arch/xen/x86/hypervisor_machdep.c Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: hypervisor_machdep.c,v 1.36 2019/05/09 17:09:51 bouyer Exp $ */ +/* $NetBSD: hypervisor_machdep.c,v 1.36.8.1 2020/04/12 17:25:52 bouyer Exp $ */ /* * @@ -54,7 +54,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.36 2019/05/09 17:09:51 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.36.8.1 2020/04/12 17:25:52 bouyer Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -203,14 +203,6 @@ stipending(void) x86_enable_intr(); } -#if 0 - if (ci->ci_xpending & 0x1) - printf("stipending events %08lx mask %08lx ilevel %d ipending %08x\n", - HYPERVISOR_shared_info->events, - HYPERVISOR_shared_info->events_mask, ci->ci_ilevel, - ci->ci_xpending); -#endif - return (ret); } @@ -290,7 +282,7 @@ do_hypervisor_callback(struct intrframe if (level != ci->ci_ilevel) printf("hypervisor done %08x level %d/%d ipending %08x\n", (uint)vci->evtchn_pending_sel, - level, ci->ci_ilevel, ci->ci_xpending); + level, ci->ci_ilevel, ci->ci_ipending); #endif } @@ -384,7 +376,7 @@ evt_enable_event(unsigned int port, unsi } void -hypervisor_enable_ipl(unsigned int ipl) +hypervisor_enable_sir(unsigned int sir) { struct cpu_info *ci = curcpu(); @@ -394,37 +386,37 @@ hypervisor_enable_ipl(unsigned int ipl) * we know that all callback for this event have been processed. */ - evt_iterate_bits(&ci->ci_xsources[ipl]->ipl_evt_mask1, - ci->ci_xsources[ipl]->ipl_evt_mask2, NULL, + evt_iterate_bits(&ci->ci_isources[sir]->ipl_evt_mask1, + ci->ci_isources[sir]->ipl_evt_mask2, NULL, evt_enable_event, NULL); } void -hypervisor_set_ipending(uint32_t iplmask, int l1, int l2) +hypervisor_set_ipending(uint32_t imask, int l1, int l2) { /* This function is not re-entrant */ KASSERT(x86_read_psl() != 0); - int ipl; + int sir; struct cpu_info *ci = curcpu(); /* set pending bit for the appropriate IPLs */ - ci->ci_xpending |= iplmask; + ci->ci_ipending |= imask; /* * And set event pending bit for the lowest IPL. As IPL are handled * from high to low, this ensure that all callbacks will have been * called when we ack the event */ - ipl = ffs(iplmask); - KASSERT(ipl > 0); - ipl--; - KASSERT(ipl < NIPL); - KASSERT(ci->ci_xsources[ipl] != NULL); - ci->ci_xsources[ipl]->ipl_evt_mask1 |= 1UL << l1; - ci->ci_xsources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2; + sir = ffs(imask); + KASSERT(sir > SIR_XENIPL_VM); + sir--; + KASSERT(sir <= SIR_XENIPL_HIGH); + KASSERT(ci->ci_isources[sir] != NULL); + ci->ci_isources[sir]->ipl_evt_mask1 |= 1UL << l1; + ci->ci_isources[sir]->ipl_evt_mask2[l1] |= 1UL << l2; if (__predict_false(ci != curcpu())) { if (xen_send_ipi(ci, XEN_IPI_HVCB)) { panic("hypervisor_set_ipending: " Index: src/sys/arch/xen/x86/xen_intr.c diff -u src/sys/arch/xen/x86/xen_intr.c:1.21.2.1 src/sys/arch/xen/x86/xen_intr.c:1.21.2.2 --- src/sys/arch/xen/x86/xen_intr.c:1.21.2.1 Sat Apr 11 10:11:31 2020 +++ src/sys/arch/xen/x86/xen_intr.c Sun Apr 12 17:25:52 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: xen_intr.c,v 1.21.2.1 2020/04/11 10:11:31 bouyer Exp $ */ +/* $NetBSD: xen_intr.c,v 1.21.2.2 2020/04/12 17:25:52 bouyer Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.21.2.1 2020/04/11 10:11:31 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.21.2.2 2020/04/12 17:25:52 bouyer Exp $"); #include "opt_multiprocessor.h" @@ -306,12 +306,6 @@ void xen_cpu_intr_init(struct cpu_info * void xen_cpu_intr_init(struct cpu_info *ci) { - int i; /* XXX: duplicate */ - - ci->ci_xunmask[0] = 0xfffffffe; - for (i = 1; i < NIPL; i++) - ci->ci_xunmask[i] = ci->ci_xunmask[i - 1] & ~(1 << i); - #if defined(INTRSTACKSIZE) vaddr_t istack; @@ -338,7 +332,7 @@ xen_cpu_intr_init(struct cpu_info *ci) #endif #ifdef MULTIPROCESSOR - for (i = 0; i < XEN_NIPIS; i++) + for (int i = 0; i < XEN_NIPIS; i++) evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC, NULL, device_xname(ci->ci_dev), xen_ipi_names[i]); #endif Index: src/sys/arch/xen/xen/clock.c diff -u src/sys/arch/xen/xen/clock.c:1.80.6.1 src/sys/arch/xen/xen/clock.c:1.80.6.2 --- src/sys/arch/xen/xen/clock.c:1.80.6.1 Sat Apr 11 18:26:07 2020 +++ src/sys/arch/xen/xen/clock.c Sun Apr 12 17:25:53 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: clock.c,v 1.80.6.1 2020/04/11 18:26:07 bouyer Exp $ */ +/* $NetBSD: clock.c,v 1.80.6.2 2020/04/12 17:25:53 bouyer Exp $ */ /*- * Copyright (c) 2017, 2018 The NetBSD Foundation, Inc. @@ -36,7 +36,7 @@ #endif #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.80.6.1 2020/04/11 18:26:07 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.80.6.2 2020/04/12 17:25:53 bouyer Exp $"); #include <sys/param.h> #include <sys/types.h> @@ -162,7 +162,6 @@ void idle_block(void) { - KASSERT(curcpu()->ci_xpending == 0); HYPERVISOR_block(); } Index: src/sys/arch/xen/xen/evtchn.c diff -u src/sys/arch/xen/xen/evtchn.c:1.88.2.2 src/sys/arch/xen/xen/evtchn.c:1.88.2.3 --- src/sys/arch/xen/xen/evtchn.c:1.88.2.2 Sun Apr 12 11:16:58 2020 +++ src/sys/arch/xen/xen/evtchn.c Sun Apr 12 17:25:53 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: evtchn.c,v 1.88.2.2 2020/04/12 11:16:58 bouyer Exp $ */ +/* $NetBSD: evtchn.c,v 1.88.2.3 2020/04/12 17:25:53 bouyer Exp $ */ /* * Copyright (c) 2006 Manuel Bouyer. @@ -54,7 +54,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.88.2.2 2020/04/12 11:16:58 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.88.2.3 2020/04/12 17:25:53 bouyer Exp $"); #include "opt_xen.h" #include "isa.h" @@ -372,7 +372,7 @@ evtchn_do_event(int evtch, struct intrfr while (ih != NULL) { if (ih->ih_cpu != ci) { hypervisor_send_event(ih->ih_cpu, evtch); - iplmask &= ~XUNMASK(ci, ih->ih_level); + iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level)); ih = ih->ih_evt_next; continue; } @@ -388,7 +388,7 @@ evtchn_do_event(int evtch, struct intrfr mutex_spin_exit(&evtlock[evtch]); goto splx; } - iplmask &= ~XUNMASK(ci, ih->ih_level); + iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level)); ci->ci_ilevel = ih->ih_level; ih_fun = (void *)ih->ih_fun; ih_fun(ih->ih_arg, regs); @@ -790,7 +790,7 @@ intr_calculatemasks(struct evtsource *ev for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) { if (ih->ih_level > evts->ev_maxlevel) evts->ev_maxlevel = ih->ih_level; - evts->ev_imask |= (1 << ih->ih_level); + evts->ev_imask |= (1 << XEN_IPL2SIR(ih->ih_level)); if (ih->ih_cpu == ci) cpu_receive = 1; } @@ -904,19 +904,24 @@ event_set_iplhandler(struct cpu_info *ci int level) { struct intrsource *ipls; + int sir = XEN_IPL2SIR(level); + KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); KASSERT(ci == ih->ih_cpu); - if (ci->ci_xsources[level] == NULL) { + if (ci->ci_isources[sir] == NULL) { ipls = kmem_zalloc(sizeof (struct intrsource), KM_NOSLEEP); if (ipls == NULL) panic("can't allocate fixed interrupt source"); - ipls->is_recurse = xenev_stubs[level].ist_recurse; - ipls->is_resume = xenev_stubs[level].ist_resume; + ipls->is_recurse = xenev_stubs[level - IPL_VM].ist_recurse; + ipls->is_resume = xenev_stubs[level - IPL_VM].ist_resume; ipls->is_handlers = ih; - ci->ci_xsources[level] = ipls; + ipls->is_maxlevel = level; + ipls->is_pic = &xen_pic; + ci->ci_isources[sir] = ipls; + x86_intr_calculatemasks(ci); } else { - ipls = ci->ci_xsources[level]; + ipls = ci->ci_isources[sir]; ih->ih_next = ipls->is_handlers; ipls->is_handlers = ih; } @@ -949,7 +954,9 @@ event_remove_handler(int evtch, int (*fu ci = ih->ih_cpu; *ihp = ih->ih_evt_next; - ipls = ci->ci_xsources[ih->ih_level]; + int sir = XEN_IPL2SIR(ih->ih_level); + KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); + ipls = ci->ci_isources[sir]; for (ihp = &ipls->is_handlers, ih = ipls->is_handlers; ih != NULL; ihp = &ih->ih_next, ih = ih->ih_next) { @@ -1014,7 +1021,7 @@ xen_debug_handler(void *arg) struct cpu_info *ci = curcpu(); int i; int xci_ilevel = ci->ci_ilevel; - int xci_xpending = ci->ci_xpending; + int xci_ipending = ci->ci_ipending; int xci_idepth = ci->ci_idepth; u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending; u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask; @@ -1031,8 +1038,8 @@ xen_debug_handler(void *arg) __insn_barrier(); printf("debug event\n"); - printf("ci_ilevel 0x%x ci_xpending 0x%x ci_idepth %d\n", - xci_ilevel, xci_xpending, xci_idepth); + printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n", + xci_ilevel, xci_ipending, xci_idepth); printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld" " evtchn_pending_sel 0x%lx\n", upcall_pending, upcall_mask, pending_sel); Index: src/sys/arch/xen/xen/xenevt.c diff -u src/sys/arch/xen/xen/xenevt.c:1.56 src/sys/arch/xen/xen/xenevt.c:1.56.2.1 --- src/sys/arch/xen/xen/xenevt.c:1.56 Tue Apr 7 10:19:53 2020 +++ src/sys/arch/xen/xen/xenevt.c Sun Apr 12 17:25:53 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: xenevt.c,v 1.56 2020/04/07 10:19:53 jdolecek Exp $ */ +/* $NetBSD: xenevt.c,v 1.56.2.1 2020/04/12 17:25:53 bouyer Exp $ */ /* * Copyright (c) 2005 Manuel Bouyer. @@ -26,7 +26,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: xenevt.c,v 1.56 2020/04/07 10:19:53 jdolecek Exp $"); +__KERNEL_RCSID(0, "$NetBSD: xenevt.c,v 1.56.2.1 2020/04/12 17:25:53 bouyer Exp $"); #include "opt_xen.h" #include <sys/param.h> @@ -193,7 +193,7 @@ xenevt_setipending(int l1, int l2) { atomic_or_ulong(&xenevt_ev1, 1UL << l1); atomic_or_ulong(&xenevt_ev2[l1], 1UL << l2); - atomic_or_32(&cpu_info_primary.ci_xpending, 1 << IPL_HIGH); + atomic_or_32(&cpu_info_primary.ci_ipending, 1 << SIR_XENIPL_HIGH); } /* process pending events */