Module Name:    src
Committed By:   ad
Date:           Sun Dec  1 12:19:28 UTC 2019

Modified Files:
        src/sys/arch/sh3/include: userret.h
        src/sys/arch/sh3/sh3: exception.c exception_vector.S

Log Message:
sh3: make ASTs work as expected, and fix a few things in the TLB refill path.
With help from uwe@ and martin@.


To generate a diff of this commit:
cvs rdiff -u -r1.16 -r1.17 src/sys/arch/sh3/include/userret.h
cvs rdiff -u -r1.70 -r1.71 src/sys/arch/sh3/sh3/exception.c
cvs rdiff -u -r1.50 -r1.51 src/sys/arch/sh3/sh3/exception_vector.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/sh3/include/userret.h
diff -u src/sys/arch/sh3/include/userret.h:1.16 src/sys/arch/sh3/include/userret.h:1.17
--- src/sys/arch/sh3/include/userret.h:1.16	Sat Nov 30 15:53:36 2019
+++ src/sys/arch/sh3/include/userret.h	Sun Dec  1 12:19:28 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: userret.h,v 1.16 2019/11/30 15:53:36 ad Exp $	*/
+/*	$NetBSD: userret.h,v 1.17 2019/12/01 12:19:28 ad Exp $	*/
 
 /*
  * Copyright (c) 1988 University of Utah.
@@ -51,6 +51,14 @@ static __inline void
 userret(struct lwp *l)
 {
 
+	/* This must come first... */
+	l->l_md.md_astpending = 0;
+
+	if (l->l_pflag & LP_OWEUPC) {
+		l->l_pflag &= ~LP_OWEUPC;
+		ADDUPROF(l);
+	}
+
 	/* Invoke MI userret code */
 	mi_userret(l);
 

Index: src/sys/arch/sh3/sh3/exception.c
diff -u src/sys/arch/sh3/sh3/exception.c:1.70 src/sys/arch/sh3/sh3/exception.c:1.71
--- src/sys/arch/sh3/sh3/exception.c:1.70	Sat Nov 30 15:53:36 2019
+++ src/sys/arch/sh3/sh3/exception.c	Sun Dec  1 12:19:28 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: exception.c,v 1.70 2019/11/30 15:53:36 ad Exp $	*/
+/*	$NetBSD: exception.c,v 1.71 2019/12/01 12:19:28 ad Exp $	*/
 
 /*-
- * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
+ * Copyright (c) 2002, 2019 The NetBSD Foundation, Inc. All rights reserved.
  * Copyright (c) 1990 The Regents of the University of California.
  * All rights reserved.
  *
@@ -79,7 +79,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: exception.c,v 1.70 2019/11/30 15:53:36 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: exception.c,v 1.71 2019/12/01 12:19:28 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_kgdb.h"
@@ -89,6 +89,7 @@ __KERNEL_RCSID(0, "$NetBSD: exception.c,
 #include <sys/kernel.h>
 #include <sys/proc.h>
 #include <sys/signal.h>
+#include <sys/intr.h>
 
 #ifdef DDB
 #include <sh3/db_machdep.h>
@@ -295,12 +296,9 @@ tlb_exception(struct lwp *l, struct trap
 			}				\
 		} while(/*CONSTCOND*/0)
 
-	splx(tf->tf_ssr & PSL_IMASK);
-
 	usermode = !KERNELMODE(tf->tf_ssr);
 	if (usermode) {
 		KDASSERT(l->l_md.md_regs == tf);
-		LWP_CACHE_CREDS(l, l->l_proc);
 	} else {
 #if 0 /* FIXME: probably wrong for yamt-idlelwp */
 		KDASSERT(l == NULL ||		/* idle */
@@ -330,6 +328,8 @@ tlb_exception(struct lwp *l, struct trap
 			ksi.ksi_signo = SIGSEGV;
 			ksi.ksi_code = SEGV_ACCERR;
 			ksi.ksi_addr = (void *)va;
+			splx(tf->tf_ssr & PSL_IMASK);
+			LWP_CACHE_CREDS(l, l->l_proc);
 			goto user_fault;
 		} else {
 			TLB_ASSERT(l && onfault != NULL,
@@ -372,12 +372,12 @@ tlb_exception(struct lwp *l, struct trap
 
 	/* Lookup page table. if entry found, load it. */
 	if (track && __pmap_pte_load(pmap, va, track)) {
-		if (usermode)
-			userret(l);
 		return;
 	}
 
 	/* Page not found. call fault handler */
+	splx(tf->tf_ssr & PSL_IMASK);
+	LWP_CACHE_CREDS(l, l->l_proc);
 	pcb->pcb_onfault = NULL;
 	err = uvm_fault(map, va, ftype);
 	pcb->pcb_onfault = onfault;
@@ -399,10 +399,21 @@ tlb_exception(struct lwp *l, struct trap
 
 	/* Page in. load PTE to TLB. */
 	if (err == 0) {
-		bool loaded = __pmap_pte_load(pmap, va, track);
+		bool loaded;
+		userret(l);
+		loaded = __pmap_pte_load(pmap, va, track);
+#if 0
+		/*
+		 * XXXAD I don't think you should do this - consider
+		 * a multithreaded program where another thread got
+		 * switched to during UVM fault and it unmapped the
+		 * page. I think you should just let the fault happen
+		 * again.
+		 */
 		TLB_ASSERT(loaded, "page table entry not found");
-		if (usermode)
-			userret(l);
+#else
+		__USE(loaded);
+#endif
 		return;
 	}
 
@@ -441,7 +452,6 @@ tlb_exception(struct lwp *l, struct trap
 	ksi.ksi_trap = tf->tf_expevt;
 	trapsignal(l, &ksi);
 	userret(l);
-	ast(l, tf);
 	return;
 
  tlb_panic:
@@ -459,27 +469,29 @@ tlb_exception(struct lwp *l, struct trap
  *	tf ... full user context.
  *	This is called when exception return. if return from kernel to user,
  *	handle asynchronous software traps and context switch if needed.
+ *	Interrupts are blocked on entry.
  */
 void
 ast(struct lwp *l, struct trapframe *tf)
 {
+	int s;
 
-	if (KERNELMODE(tf->tf_ssr)) {
+	if (__predict_true(l->l_md.md_astpending == 0)) {
+		return;
+	}
+	if (__predict_false(KERNELMODE(tf->tf_ssr))) {
+		/* should not occur but leave it here to be safe */
 		return;
 	}
 
 	KDASSERT(l != NULL);
 	KDASSERT(l->l_md.md_regs == tf);
 
-	while (l->l_md.md_astpending) {
-		//curcpu()->ci_data.cpu_nast++;
-		l->l_md.md_astpending = 0;
-
-		if (l->l_pflag & LP_OWEUPC) {
-			l->l_pflag &= ~LP_OWEUPC;
-			ADDUPROF(l);
-		}
-
+	s = tf->tf_ssr & PSL_IMASK;
+	do {
+		splx(s);
+		/* userret() clears l_md.md_astpending */
 		userret(l);
-	}
+		s = splhigh();
+	} while (__predict_false(l->l_md.md_astpending));
 }

Index: src/sys/arch/sh3/sh3/exception_vector.S
diff -u src/sys/arch/sh3/sh3/exception_vector.S:1.50 src/sys/arch/sh3/sh3/exception_vector.S:1.51
--- src/sys/arch/sh3/sh3/exception_vector.S:1.50	Wed Nov  2 00:11:59 2016
+++ src/sys/arch/sh3/sh3/exception_vector.S	Sun Dec  1 12:19:28 2019
@@ -1,7 +1,7 @@
-/*	$NetBSD: exception_vector.S,v 1.50 2016/11/02 00:11:59 pgoyette Exp $	*/
+/*	$NetBSD: exception_vector.S,v 1.51 2019/12/01 12:19:28 ad Exp $	*/
 
 /*-
- * Copyright (c) 2002 The NetBSD Foundation, Inc.
+ * Copyright (c) 2002, 2019 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
 #define _ALIGN_TEXT	.align 5
 #include <sh3/asm.h>
 
-__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.50 2016/11/02 00:11:59 pgoyette Exp $")
+__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.51 2019/12/01 12:19:28 ad Exp $")
 
 
 /*
@@ -86,18 +86,25 @@ NENTRY(sh_vector_generic)
 	/* Check TLB exception or not */
 	mov.l	.Lg_TLB_PROT_ST, r1
 	cmp/hi	r1, r0
-	bt	1f
+	bt/s	1f
+	 mov	r4, r8	/* preserve curlwp across call */
 
 	/* tlb_exception(curlwp, tf, TEA); */
 	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
 	mov.l	.Lg_tlb_exception, r0
 	jsr	@r0
 	 mov	r14, r5			/* 2nd arg */
+
+	/* Check for ASTs on exit to user mode. */
+	__INTR_MASK(r0, r1)
+	mov.l	.Lg_ast, r0
+	mov	r8, r4
+	jsr	@r0
+	 mov	r14, r5
 	bra	.Lg_return_from_exception
 	 nop
 
-	/* general_exception(curlwp, tf, TEA); */
-1:	mov	r4, r8
+1:	/* general_exception(curlwp, tf, TEA); */
 #if defined(PTRACE_HOOKS) || defined(DDB)
 	mov	#0, r2
 	MOV	(BBRA, r1)
@@ -110,8 +117,9 @@ NENTRY(sh_vector_generic)
 	 mov	r14, r5			/* 2nd arg */
 
 	/* Check for ASTs on exit to user mode. */
-	mov	r8, r4
+	__INTR_MASK(r0, r1)
 	mov.l	.Lg_ast, r0
+	mov	r8, r4
 	jsr	@r0
 	 mov	r14, r5
 
@@ -271,9 +279,17 @@ NENTRY(sh3_vector_tlbmiss)
 	mov.l	@r1, r4			! arg1: curlwp
 	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
 	mov.l	.L3_tlb_exception, r0
+	mov	r4, r8			! save curlwp across the call
 	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
 	jsr	@r0
 	 mov	r14, r5			! arg2: trapframe
+
+	/* Check for ASTs on exit to user mode. */
+	__INTR_MASK(r0, r1)
+	mov.l	.L3_ast, r0
+	mov	r8, r4			! arg1: curlwp
+	jsr	@r0
+	 mov	r14, r5			! arg2: trapframe
 	__EXCEPTION_RETURN
 
 	.align	4
@@ -289,6 +305,7 @@ NENTRY(sh3_vector_tlbmiss)
 .L3_SH3_EXPEVT:			.long	SH3_EXPEVT
 .L3_curlwp:			.long	_C_LABEL(curlwp)
 .L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
+.L3_ast:			.long	_C_LABEL(ast)
 
 /* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
 VECTOR_END_MARKER(sh3_vector_tlbmiss_end)
@@ -421,9 +438,17 @@ NENTRY(sh4_vector_tlbmiss)
 	mov.l	@r1, r4			! arg1: curlwp
 	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
 	mov.l	.L4_tlb_exception, r0
+	mov	r4, r8			! save curlwp across the call
 	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
 	jsr	@r0
 	 mov	r14, r5			! arg2: trapframe
+
+	/* Check for ASTs on exit to user mode. */
+	__INTR_MASK(r0, r1)
+	mov.l	.L4_ast, r0
+	mov	r8, r4			! arg1: curlwp
+	jsr	@r0
+	 mov	r14, r5			! arg2: trapframe
 	__EXCEPTION_RETURN
 
 	.align	5
@@ -439,6 +464,7 @@ NENTRY(sh4_vector_tlbmiss)
 .L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
 .L4_curlwp:			.long	_C_LABEL(curlwp)
 .L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
+.L4_ast:			.long	_C_LABEL(ast)
 
 /* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
 VECTOR_END_MARKER(sh4_vector_tlbmiss_end)

Reply via email to