Module Name:    src
Committed By:   matt
Date:           Sun Aug 23 03:38:19 UTC 2009

Modified Files:
        src/sys/arch/mips/include [matt-nb5-mips64]: types.h
        src/sys/arch/mips/mips [matt-nb5-mips64]: mips_machdep.c
            process_machdep.c
        src/sys/kern [matt-nb5-mips64]: core_elf32.c sys_process.c
        src/sys/sys [matt-nb5-mips64]: ptrace.h

Log Message:
Change lazy fp load/save is done.  fpcurlwp is never NULL.
If no current lwp has the FP, then fpcurlwp is set to lwp0.
this allows many check for NULL and avoids a few null-derefs.
Since savefpregs clear COP1, loadfpregs can be called to reload
fpregs.  If it notices that situation, it just sets COP1 and returns
Save does not reset fpcurlwp, just clears COP1.  load does set fpcurlwp.

If MIPS3_SR_FR is set, all 32 64-bit FP registers are saved/restored via Xdc1.
If MIPS3_SR_FR is clear, only 32 32-bit FP register are saved/restore via Xwc1.
This preserves the existing ABI.


To generate a diff of this commit:
cvs rdiff -u -r1.43.36.2 -r1.43.36.3 src/sys/arch/mips/include/types.h
cvs rdiff -u -r1.205.4.1.2.1.2.2 -r1.205.4.1.2.1.2.3 \
    src/sys/arch/mips/mips/mips_machdep.c
cvs rdiff -u -r1.29.62.1 -r1.29.62.2 src/sys/arch/mips/mips/process_machdep.c
cvs rdiff -u -r1.32.16.1 -r1.32.16.2 src/sys/kern/core_elf32.c
cvs rdiff -u -r1.143.4.1 -r1.143.4.1.4.1 src/sys/kern/sys_process.c
cvs rdiff -u -r1.40 -r1.40.28.1 src/sys/sys/ptrace.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/include/types.h
diff -u src/sys/arch/mips/include/types.h:1.43.36.2 src/sys/arch/mips/include/types.h:1.43.36.3
--- src/sys/arch/mips/include/types.h:1.43.36.2	Fri Aug 21 17:29:42 2009
+++ src/sys/arch/mips/include/types.h	Sun Aug 23 03:38:19 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: types.h,v 1.43.36.2 2009/08/21 17:29:42 matt Exp $	*/
+/*	$NetBSD: types.h,v 1.43.36.3 2009/08/23 03:38:19 matt Exp $	*/
 
 /*-
  * Copyright (c) 1992, 1993
@@ -110,6 +110,7 @@
 
 #define	__HAVE_AST_PERPROC
 #define	__HAVE_SYSCALL_INTERN
+#define	__HAVE_PROCESS_XFPREGS
 #ifdef MIPS3_PLUS	/* XXX bogus! */
 #define	__HAVE_CPU_COUNTER
 #endif

Index: src/sys/arch/mips/mips/mips_machdep.c
diff -u src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.2 src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.3
--- src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.2	Fri Aug 21 17:48:57 2009
+++ src/sys/arch/mips/mips/mips_machdep.c	Sun Aug 23 03:38:19 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.2 2009/08/21 17:48:57 matt Exp $	*/
+/*	$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.3 2009/08/23 03:38:19 matt Exp $	*/
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -112,7 +112,7 @@
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.2 2009/08/21 17:48:57 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.3 2009/08/23 03:38:19 matt Exp $");
 
 #include "opt_cputype.h"
 
@@ -783,6 +783,7 @@
 	 */
 	lwp0.l_cpu = &cpu_info_store;
 	cpu_info_store.ci_curlwp = &lwp0;
+	cpu_info_store.ci_fpcurlwp = &lwp0;
 	curlwp = &lwp0;
 
 	mycpu = NULL;
@@ -1119,6 +1120,15 @@
 	f->f_regs[_R_PC] = (int)pack->ep_entry & ~3;
 	f->f_regs[_R_T9] = (int)pack->ep_entry & ~3; /* abicall requirement */
 	f->f_regs[_R_SR] = PSL_USERSET;
+#if !defined(__mips_o32)
+	/*
+	 * allow 64bit ops in userland for non-O32 ABIs
+	 */
+	if (l->l_proc->p_md.md_abi != _MIPS_BSD_API_O32)
+		f->f_regs[_R_SR] |= MIPS_SR_UX;
+	if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi))
+		f->f_regs[_R_SR] |= MIPS3_SR_FR;
+#endif
 	/*
 	 * Set up arguments for _start():
 	 *	_start(stack, obj, cleanup, ps_strings);
@@ -1134,7 +1144,7 @@
 	f->f_regs[_R_A3] = (intptr_t)l->l_proc->p_psstr;
 
 	if ((l->l_md.md_flags & MDP_FPUSED) && l == fpcurlwp)
-		fpcurlwp = NULL;
+		fpcurlwp = &lwp0;
 	memset(&l->l_addr->u_pcb.pcb_fpregs, 0, sizeof(struct fpreg));
 	l->l_md.md_flags &= ~MDP_FPUSED;
 	l->l_md.md_ss_addr = 0;
@@ -1493,171 +1503,273 @@
 }
 
 void
-savefpregs(l)
-	struct lwp *l;
+savefpregs(struct lwp *l)
 {
 #ifndef NOFPU
-	u_int32_t status, fpcsr;
-	mips_fpreg_t *fp;
-	struct frame *f;
-
-	if (l == NULL)
+	struct frame * const f = l->l_md.md_regs;
+	mips_fpreg_t * const fp = l->l_addr->u_pcb.pcb_fpregs.r_regs;
+	uint32_t status, fpcsr;
+	
+	/*
+	 * Don't do anything if the FPU is already off.
+	 */
+	if ((f->f_regs[_R_SR] & MIPS_SR_COP_1_BIT) == 0)
 		return;
+
+	/*
+	 * this process yielded FPA.
+	 */
+	KASSERT(f->f_regs[_R_SR] & MIPS_SR_COP_1_BIT);	/* it should be on */
+
 	/*
 	 * turnoff interrupts enabling CP1 to read FPCSR register.
 	 */
 	__asm volatile (
-		".set noreorder					\n\t"
-		".set noat					\n\t"
-		"mfc0	%0, $" ___STRING(MIPS_COP_0_STATUS) "	\n\t"
-		"li	$1, %2					\n\t"
-		"mtc0	$1, $" ___STRING(MIPS_COP_0_STATUS) "	\n\t"
+		".set noreorder"				"\n\t"
+		".set noat"					"\n\t"
+		"mfc0	%0, $" ___STRING(MIPS_COP_0_STATUS) 	"\n\t"
+		"mtc0	%2, $" ___STRING(MIPS_COP_0_STATUS)	"\n\t"
 		___STRING(COP0_HAZARD_FPUENABLE)
-		"cfc1	%1, $31					\n\t"
-		"cfc1	%1, $31					\n\t"
-		".set reorder					\n\t"
+		"cfc1	%1, $31"				"\n\t"
+		"cfc1	%1, $31"				"\n\t"
+		".set reorder"					"\n\t"
 		".set at" 
-		: "=r" (status), "=r"(fpcsr) : "i"(MIPS_SR_COP_1_BIT));
+		: "=r" (status), "=r"(fpcsr)
+		: "r"(f->f_regs[_R_SR] & (MIPS_SR_COP_1_BIT|MIPS3_SR_FR)));
+
 	/*
-	 * this process yielded FPA.
+	 * Make sure we don't reenable FP when we return to userspace.
 	 */
-	f = l->l_md.md_regs;
-	f->f_regs[_R_SR] &= ~MIPS_SR_COP_1_BIT;
+	f->f_regs[_R_SR] ^= MIPS_SR_COP_1_BIT;
 
 	/*
 	 * save FPCSR and FP register values.
 	 */
-	fp = l->l_addr->u_pcb.pcb_fpregs.r_regs;
-	fp[32] = fpcsr;
-#if defined(__mips_o32) || defined(__mips_o64)
-#define	LXC1	"lwc1"
-#define	SXC1	"swc1"
-#endif
-#if defined(__mips_n32) || defined(__mips_n64)
-#define	LXC1	"ldc1"
-#define	SXC1	"sdc1"
-#endif
-	__asm volatile (
-		".set noreorder		;"
-		SXC1"	$f0, (0*%d1)(%0)	;"
-		SXC1"	$f1, (1*%d1)(%0)	;"
-		SXC1"	$f2, (2*%d1)(%0)	;"
-		SXC1"	$f3, (3*%d1)(%0)	;"
-		SXC1"	$f4, (4*%d1)(%0)	;"
-		SXC1"	$f5, (5*%d1)(%0)	;"
-		SXC1"	$f6, (6*%d1)(%0)	;"
-		SXC1"	$f7, (7*%d1)(%0)	;"
-		SXC1"	$f8, (8*%d1)(%0)	;"
-		SXC1"	$f9, (9*%d1)(%0)	;"
-		SXC1"	$f10, (10*%d1)(%0)	;"
-		SXC1"	$f11, (11*%d1)(%0)	;"
-		SXC1"	$f12, (12*%d1)(%0)	;"
-		SXC1"	$f13, (13*%d1)(%0)	;"
-		SXC1"	$f14, (14*%d1)(%0)	;"
-		SXC1"	$f15, (15*%d1)(%0)	;"
-		SXC1"	$f16, (16*%d1)(%0)	;"
-		SXC1"	$f17, (17*%d1)(%0)	;"
-		SXC1"	$f18, (18*%d1)(%0)	;"
-		SXC1"	$f19, (19*%d1)(%0)	;"
-		SXC1"	$f20, (20*%d1)(%0)	;"
-		SXC1"	$f21, (21*%d1)(%0)	;"
-		SXC1"	$f22, (22*%d1)(%0)	;"
-		SXC1"	$f23, (23*%d1)(%0)	;"
-		SXC1"	$f24, (24*%d1)(%0)	;"
-		SXC1"	$f25, (25*%d1)(%0)	;"
-		SXC1"	$f26, (26*%d1)(%0)	;"
-		SXC1"	$f27, (27*%d1)(%0)	;"
-		SXC1"	$f28, (28*%d1)(%0)	;"
-		SXC1"	$f29, (29*%d1)(%0)	;"
-		SXC1"	$f30, (30*%d1)(%0)	;"
-		SXC1"	$f31, (31*%d1)(%0)	;"
-		".set reorder" :: "r"(fp), "i"(sizeof(fp[0])));
+#if !defined(__mips_o32)
+	if (f->f_regs[_R_SR] & MIPS3_SR_FR) {
+		KASSERT(_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi));
+		fp[32] = fpcsr;
+		__asm volatile (
+			".set noreorder			;"
+			"sdc1	$f0, (0*%d1)(%0)	;"
+			"sdc1	$f1, (1*%d1)(%0)	;"
+			"sdc1	$f2, (2*%d1)(%0)	;"
+			"sdc1	$f3, (3*%d1)(%0)	;"
+			"sdc1	$f4, (4*%d1)(%0)	;"
+			"sdc1	$f5, (5*%d1)(%0)	;"
+			"sdc1	$f6, (6*%d1)(%0)	;"
+			"sdc1	$f7, (7*%d1)(%0)	;"
+			"sdc1	$f8, (8*%d1)(%0)	;"
+			"sdc1	$f9, (9*%d1)(%0)	;"
+			"sdc1	$f10, (10*%d1)(%0)	;"
+			"sdc1	$f11, (11*%d1)(%0)	;"
+			"sdc1	$f12, (12*%d1)(%0)	;"
+			"sdc1	$f13, (13*%d1)(%0)	;"
+			"sdc1	$f14, (14*%d1)(%0)	;"
+			"sdc1	$f15, (15*%d1)(%0)	;"
+			"sdc1	$f16, (16*%d1)(%0)	;"
+			"sdc1	$f17, (17*%d1)(%0)	;"
+			"sdc1	$f18, (18*%d1)(%0)	;"
+			"sdc1	$f19, (19*%d1)(%0)	;"
+			"sdc1	$f20, (20*%d1)(%0)	;"
+			"sdc1	$f21, (21*%d1)(%0)	;"
+			"sdc1	$f22, (22*%d1)(%0)	;"
+			"sdc1	$f23, (23*%d1)(%0)	;"
+			"sdc1	$f24, (24*%d1)(%0)	;"
+			"sdc1	$f25, (25*%d1)(%0)	;"
+			"sdc1	$f26, (26*%d1)(%0)	;"
+			"sdc1	$f27, (27*%d1)(%0)	;"
+			"sdc1	$f28, (28*%d1)(%0)	;"
+			"sdc1	$f29, (29*%d1)(%0)	;"
+			"sdc1	$f30, (30*%d1)(%0)	;"
+			"sdc1	$f31, (31*%d1)(%0)	;"
+			".set reorder" :: "r"(fp), "i"(sizeof(fp[0])));
+	} else
+#endif /* !defined(__mips_o32) */
+	{
+		KASSERT(!_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi));
+		((int *)fp)[32] = fpcsr;
+		__asm volatile (
+			".set noreorder			;"
+			"swc1	$f0, (0*%d1)(%0)	;"
+			"swc1	$f1, (1*%d1)(%0)	;"
+			"swc1	$f2, (2*%d1)(%0)	;"
+			"swc1	$f3, (3*%d1)(%0)	;"
+			"swc1	$f4, (4*%d1)(%0)	;"
+			"swc1	$f5, (5*%d1)(%0)	;"
+			"swc1	$f6, (6*%d1)(%0)	;"
+			"swc1	$f7, (7*%d1)(%0)	;"
+			"swc1	$f8, (8*%d1)(%0)	;"
+			"swc1	$f9, (9*%d1)(%0)	;"
+			"swc1	$f10, (10*%d1)(%0)	;"
+			"swc1	$f11, (11*%d1)(%0)	;"
+			"swc1	$f12, (12*%d1)(%0)	;"
+			"swc1	$f13, (13*%d1)(%0)	;"
+			"swc1	$f14, (14*%d1)(%0)	;"
+			"swc1	$f15, (15*%d1)(%0)	;"
+			"swc1	$f16, (16*%d1)(%0)	;"
+			"swc1	$f17, (17*%d1)(%0)	;"
+			"swc1	$f18, (18*%d1)(%0)	;"
+			"swc1	$f19, (19*%d1)(%0)	;"
+			"swc1	$f20, (20*%d1)(%0)	;"
+			"swc1	$f21, (21*%d1)(%0)	;"
+			"swc1	$f22, (22*%d1)(%0)	;"
+			"swc1	$f23, (23*%d1)(%0)	;"
+			"swc1	$f24, (24*%d1)(%0)	;"
+			"swc1	$f25, (25*%d1)(%0)	;"
+			"swc1	$f26, (26*%d1)(%0)	;"
+			"swc1	$f27, (27*%d1)(%0)	;"
+			"swc1	$f28, (28*%d1)(%0)	;"
+			"swc1	$f29, (29*%d1)(%0)	;"
+			"swc1	$f30, (30*%d1)(%0)	;"
+			"swc1	$f31, (31*%d1)(%0)	;"
+		".set reorder" :: "r"(fp), "i"(4));
+	}
 	/*
 	 * stop CP1, enable interrupts.
 	 */
 	__asm volatile ("mtc0 %0, $" ___STRING(MIPS_COP_0_STATUS)
 	    :: "r"(status));
-#endif
+#endif /* !defined(NOFPU) */
 }
 
 void
-loadfpregs(l)
-	struct lwp *l;
+loadfpregs(struct lwp *l)
 {
 #ifndef NOFPU
+	struct frame * const f = l->l_md.md_regs;
+	mips_fpreg_t * const fp = l->l_addr->u_pcb.pcb_fpregs.r_regs;
 	uint32_t status;
-	mips_fpreg_t *fp;
-	struct frame *f;
+	uint32_t fpcsr;
 
-	if (l == NULL)
-		panic("loading fpregs for NULL proc");
+	/*
+	 * Got turned, maybe due to savefpregs.
+	 */
+	if (fpcurlwp == l) {
+		f->f_regs[_R_SR] |= MIPS_SR_COP_1_BIT;
+		return;
+	} else {
+		savefpregs(fpcurlwp);
+		fpcurlwp = l;
+	}
+
+	/*
+	 * Enable the FP when this lwp return to userspace.
+	 */
+	f->f_regs[_R_SR] |= MIPS_SR_COP_1_BIT;
 
 	/*
 	 * turnoff interrupts enabling CP1 to load FP registers.
 	 */
 	__asm volatile(
-		".set noreorder					\n\t"
-		".set noat					\n\t"
-		"mfc0	%0, $" ___STRING(MIPS_COP_0_STATUS) "	\n\t"
-		"li	$1, %1					\n\t"
-		"mtc0	$1, $" ___STRING(MIPS_COP_0_STATUS) "	\n\t"
+		".set noreorder"				"\n\t"
+		".set noat"					"\n\t"
+		"mfc0	%0, $" ___STRING(MIPS_COP_0_STATUS)	"\n\t"
+		"mtc0	%1, $" ___STRING(MIPS_COP_0_STATUS)	"\n\t"
 		___STRING(COP0_HAZARD_FPUENABLE)
-		".set reorder					\n\t"
-		".set at" : "=r"(status) : "i"(MIPS_SR_COP_1_BIT));
+		".set reorder"					"\n\t"
+		".set at"
+	    : "=r"(status)
+	    : "r"(f->f_regs[_R_SR] & (MIPS_SR_COP_1_BIT|MIPS3_SR_FR)));
 
-	f = l->l_md.md_regs;
-	fp = l->l_addr->u_pcb.pcb_fpregs.r_regs;
 	/*
 	 * load FP registers and establish processes' FP context.
 	 */
-	__asm volatile (
-		".set noreorder		;"
-		LXC1"	$f0, (0*%d1)(%0)	;"
-		LXC1"	$f1, (1*%d1)(%0)	;"
-		LXC1"	$f2, (2*%d1)(%0)	;"
-		LXC1"	$f3, (3*%d1)(%0)	;"
-		LXC1"	$f4, (4*%d1)(%0)	;"
-		LXC1"	$f5, (5*%d1)(%0)	;"
-		LXC1"	$f6, (6*%d1)(%0)	;"
-		LXC1"	$f7, (7*%d1)(%0)	;"
-		LXC1"	$f8, (8*%d1)(%0)	;"
-		LXC1"	$f9, (9*%d1)(%0)	;"
-		LXC1"	$f10, (10*%d1)(%0)	;"
-		LXC1"	$f11, (11*%d1)(%0)	;"
-		LXC1"	$f12, (12*%d1)(%0)	;"
-		LXC1"	$f13, (13*%d1)(%0)	;"
-		LXC1"	$f14, (14*%d1)(%0)	;"
-		LXC1"	$f15, (15*%d1)(%0)	;"
-		LXC1"	$f16, (16*%d1)(%0)	;"
-		LXC1"	$f17, (17*%d1)(%0)	;"
-		LXC1"	$f18, (18*%d1)(%0)	;"
-		LXC1"	$f19, (19*%d1)(%0)	;"
-		LXC1"	$f20, (20*%d1)(%0)	;"
-		LXC1"	$f21, (21*%d1)(%0)	;"
-		LXC1"	$f22, (22*%d1)(%0)	;"
-		LXC1"	$f23, (23*%d1)(%0)	;"
-		LXC1"	$f24, (24*%d1)(%0)	;"
-		LXC1"	$f25, (25*%d1)(%0)	;"
-		LXC1"	$f26, (26*%d1)(%0)	;"
-		LXC1"	$f27, (27*%d1)(%0)	;"
-		LXC1"	$f28, (28*%d1)(%0)	;"
-		LXC1"	$f29, (29*%d1)(%0)	;"
-		LXC1"	$f30, (30*%d1)(%0)	;"
-		LXC1"	$f31, (31*%d1)(%0)	;"
-		".set reorder" :: "r"(fp), "i"(sizeof(fp[0])));
+#if !defined(__mips_o32)
+	if (f->f_regs[_R_SR] & MIPS3_SR_FR) {
+		KASSERT(_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi));
+		__asm volatile (
+			".set noreorder			;"
+			"ldc1	$f0, (0*%d1)(%0)	;"
+			"ldc1	$f1, (1*%d1)(%0)	;"
+			"ldc1	$f2, (2*%d1)(%0)	;"
+			"ldc1	$f3, (3*%d1)(%0)	;"
+			"ldc1	$f4, (4*%d1)(%0)	;"
+			"ldc1	$f5, (5*%d1)(%0)	;"
+			"ldc1	$f6, (6*%d1)(%0)	;"
+			"ldc1	$f7, (7*%d1)(%0)	;"
+			"ldc1	$f8, (8*%d1)(%0)	;"
+			"ldc1	$f9, (9*%d1)(%0)	;"
+			"ldc1	$f10, (10*%d1)(%0)	;"
+			"ldc1	$f11, (11*%d1)(%0)	;"
+			"ldc1	$f12, (12*%d1)(%0)	;"
+			"ldc1	$f13, (13*%d1)(%0)	;"
+			"ldc1	$f14, (14*%d1)(%0)	;"
+			"ldc1	$f15, (15*%d1)(%0)	;"
+			"ldc1	$f16, (16*%d1)(%0)	;"
+			"ldc1	$f17, (17*%d1)(%0)	;"
+			"ldc1	$f18, (18*%d1)(%0)	;"
+			"ldc1	$f19, (19*%d1)(%0)	;"
+			"ldc1	$f20, (20*%d1)(%0)	;"
+			"ldc1	$f21, (21*%d1)(%0)	;"
+			"ldc1	$f22, (22*%d1)(%0)	;"
+			"ldc1	$f23, (23*%d1)(%0)	;"
+			"ldc1	$f24, (24*%d1)(%0)	;"
+			"ldc1	$f25, (25*%d1)(%0)	;"
+			"ldc1	$f26, (26*%d1)(%0)	;"
+			"ldc1	$f27, (27*%d1)(%0)	;"
+			"ldc1	$f28, (28*%d1)(%0)	;"
+			"ldc1	$f29, (29*%d1)(%0)	;"
+			"ldc1	$f30, (30*%d1)(%0)	;"
+			"ldc1	$f31, (31*%d1)(%0)	;"
+			".set reorder" :: "r"(fp), "i"(sizeof(fp[0])));
+		fpcsr = fp[32];
+	} else
+#endif
+	{
+		KASSERT(!_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi));
+		__asm volatile (
+			".set noreorder			;"
+			"lwc1	$f0, (0*%d1)(%0)	;"
+			"lwc1	$f1, (1*%d1)(%0)	;"
+			"lwc1	$f2, (2*%d1)(%0)	;"
+			"lwc1	$f3, (3*%d1)(%0)	;"
+			"lwc1	$f4, (4*%d1)(%0)	;"
+			"lwc1	$f5, (5*%d1)(%0)	;"
+			"lwc1	$f6, (6*%d1)(%0)	;"
+			"lwc1	$f7, (7*%d1)(%0)	;"
+			"lwc1	$f8, (8*%d1)(%0)	;"
+			"lwc1	$f9, (9*%d1)(%0)	;"
+			"lwc1	$f10, (10*%d1)(%0)	;"
+			"lwc1	$f11, (11*%d1)(%0)	;"
+			"lwc1	$f12, (12*%d1)(%0)	;"
+			"lwc1	$f13, (13*%d1)(%0)	;"
+			"lwc1	$f14, (14*%d1)(%0)	;"
+			"lwc1	$f15, (15*%d1)(%0)	;"
+			"lwc1	$f16, (16*%d1)(%0)	;"
+			"lwc1	$f17, (17*%d1)(%0)	;"
+			"lwc1	$f18, (18*%d1)(%0)	;"
+			"lwc1	$f19, (19*%d1)(%0)	;"
+			"lwc1	$f20, (20*%d1)(%0)	;"
+			"lwc1	$f21, (21*%d1)(%0)	;"
+			"lwc1	$f22, (22*%d1)(%0)	;"
+			"lwc1	$f23, (23*%d1)(%0)	;"
+			"lwc1	$f24, (24*%d1)(%0)	;"
+			"lwc1	$f25, (25*%d1)(%0)	;"
+			"lwc1	$f26, (26*%d1)(%0)	;"
+			"lwc1	$f27, (27*%d1)(%0)	;"
+			"lwc1	$f28, (28*%d1)(%0)	;"
+			"lwc1	$f29, (29*%d1)(%0)	;"
+			"lwc1	$f30, (30*%d1)(%0)	;"
+			"lwc1	$f31, (31*%d1)(%0)	;"
+			".set reorder"
+		    :
+		    : "r"(fp), "i"(4));
+		fpcsr = ((int *)fp)[32];
+	}
 
 	/*
 	 * load FPCSR and stop CP1 again while enabling interrupts.
 	 */
 	__asm volatile(
-		".set noreorder					\n\t"
-		".set noat					\n\t"
-		"ctc1	%0, $31					\n\t"
-		"mtc0	%1, $" ___STRING(MIPS_COP_0_STATUS) "	\n\t"
-		".set reorder					\n\t"
+		".set noreorder"				"\n\t"
+		".set noat"					"\n\t"
+		"ctc1	%0, $31"				"\n\t"
+		"mtc0	%1, $" ___STRING(MIPS_COP_0_STATUS)	"\n\t"
+		".set reorder"					"\n\t"
 		".set at"
-		:: "r"(fp[32] &~ MIPS_FPU_EXCEPTION_BITS), "r"(status));
-#endif
+		:: "r"(fpcsr &~ MIPS_FPU_EXCEPTION_BITS), "r"(status));
+#endif /* !defined(NOFPU) */
 }
 
 /* 
@@ -1801,8 +1913,7 @@
 	if (flags & _UC_FPU) {
 		/* Disable the FPU to fault in FP registers. */
 		f->f_regs[_R_SR] &= ~MIPS_SR_COP_1_BIT;
-		if (l == fpcurlwp)
-			fpcurlwp = NULL;
+		fpcurlwp = &lwp0;
 
 		/*
 		 * The PCB FP regs struct includes the FP CSR, so use the

Index: src/sys/arch/mips/mips/process_machdep.c
diff -u src/sys/arch/mips/mips/process_machdep.c:1.29.62.1 src/sys/arch/mips/mips/process_machdep.c:1.29.62.2
--- src/sys/arch/mips/mips/process_machdep.c:1.29.62.1	Fri Aug 21 17:40:22 2009
+++ src/sys/arch/mips/mips/process_machdep.c	Sun Aug 23 03:38:19 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: process_machdep.c,v 1.29.62.1 2009/08/21 17:40:22 matt Exp $	*/
+/*	$NetBSD: process_machdep.c,v 1.29.62.2 2009/08/23 03:38:19 matt Exp $	*/
 
 /*
  * Copyright (c) 1993 The Regents of the University of California.
@@ -76,7 +76,7 @@
  */
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
-__KERNEL_RCSID(0, "$NetBSD: process_machdep.c,v 1.29.62.1 2009/08/21 17:40:22 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: process_machdep.c,v 1.29.62.2 2009/08/23 03:38:19 matt Exp $");
 
 /*
  * This file may seem a bit stylized, but that so that it's easier to port.
@@ -130,24 +130,40 @@
 	return 0;
 }
 
+#if defined(__mips_n32) || defined(__mips_n64)
+CTASSERT(sizeof(struct fpreg_oabi) <= sizeof(struct fpreg));
+#endif
+
 int
-process_read_fpregs(struct lwp *l, struct fpreg *regs)
+process_read_xfpregs(struct lwp *l, struct fpreg *regs, size_t *regslen_p)
 {
+	KASSERT(*regslen_p == sizeof(struct fpreg));
+
+#if defined(__mips_n32) || defined(__mips_n64)
+	if (!_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi))
+		*regslen_p = sizeof(struct fpreg_oabi);
+#endif
 
 	if ((l->l_md.md_flags & MDP_FPUSED) && l == fpcurlwp)
 		savefpregs(l);
-	memcpy(regs, &l->l_addr->u_pcb.pcb_fpregs, sizeof(struct fpreg));
+	memcpy(regs, &l->l_addr->u_pcb.pcb_fpregs, *regslen_p);
 	return 0;
 }
 
 int
-process_write_fpregs(struct lwp *l, const struct fpreg *regs)
+process_write_xfpregs(struct lwp *l, const struct fpreg *regs, size_t regslen)
 {
+	KASSERT(regslen <= sizeof(struct fpreg));
 
 	/* to load FPA contents next time when FP insn is executed */
 	if ((l->l_md.md_flags & MDP_FPUSED) && l == fpcurlwp)
-		fpcurlwp = NULL;
-	memcpy(&l->l_addr->u_pcb.pcb_fpregs, regs, sizeof(struct fpreg));
+		fpcurlwp = &lwp0;
+#if defined(__mips_n32) || defined(__mips_n64)
+	KASSERT((_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi) ? sizeof(struct fpreg) : sizeof(struct fpreg_oabi)) == regslen);
+#else
+	KASSERT(regslen == sizeof(struct fpreg));
+#endif
+	memcpy(&l->l_addr->u_pcb.pcb_fpregs, regs, regslen);
 	return 0;
 }
 

Index: src/sys/kern/core_elf32.c
diff -u src/sys/kern/core_elf32.c:1.32.16.1 src/sys/kern/core_elf32.c:1.32.16.2
--- src/sys/kern/core_elf32.c:1.32.16.1	Fri Aug 21 18:00:36 2009
+++ src/sys/kern/core_elf32.c	Sun Aug 23 03:38:19 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: core_elf32.c,v 1.32.16.1 2009/08/21 18:00:36 matt Exp $	*/
+/*	$NetBSD: core_elf32.c,v 1.32.16.2 2009/08/23 03:38:19 matt Exp $	*/
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -40,7 +40,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(1, "$NetBSD: core_elf32.c,v 1.32.16.1 2009/08/21 18:00:36 matt Exp $");
+__KERNEL_RCSID(1, "$NetBSD: core_elf32.c,v 1.32.16.2 2009/08/23 03:38:19 matt Exp $");
 
 /* If not included by core_elf64.c, ELFSIZE won't be defined. */
 #ifndef ELFSIZE
@@ -85,7 +85,11 @@
 #define	elfround(x)	roundup((x), ELFROUNDSIZE)
 
 #define elf_process_read_regs	CONCAT(process_read_regs, ELFSIZE)
+#ifdef __HAVE_PROCESS_XFPREGS
+#define elf_process_read_xfpregs CONCAT(process_read_xfpregs, ELFSIZE)
+#else
 #define elf_process_read_fpregs	CONCAT(process_read_fpregs, ELFSIZE)
+#endif
 #define elf_reg			CONCAT(process_reg, ELFSIZE)
 #define elf_fpreg		CONCAT(process_fpreg, ELFSIZE)
 
@@ -452,14 +456,19 @@
 #ifdef PT_GETFPREGS
 	notesize = sizeof(nhdr) + elfround(namesize) + elfround(sizeof(freg));
 	if (iocookie) {
+		size_t freglen = sizeof(freg);
 		uvm_lwp_hold(l);
+#ifdef __HAVE_PROCESS_XFPREGS
+		error = elf_process_read_xfpregs(l, &freg, &freglen);
+#else
 		error = elf_process_read_fpregs(l, &freg);
+#endif
 		uvm_lwp_rele(l);
 		if (error)
 			return (error);
 
 		nhdr.n_namesz = namesize;
-		nhdr.n_descsz = sizeof(freg);
+		nhdr.n_descsz = freglen;
 		nhdr.n_type = PT_GETFPREGS;
 
 		error = ELFNAMEEND(coredump_writenote)(p, iocookie, &nhdr,

Index: src/sys/kern/sys_process.c
diff -u src/sys/kern/sys_process.c:1.143.4.1 src/sys/kern/sys_process.c:1.143.4.1.4.1
--- src/sys/kern/sys_process.c:1.143.4.1	Fri Feb  6 01:54:09 2009
+++ src/sys/kern/sys_process.c	Sun Aug 23 03:38:19 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: sys_process.c,v 1.143.4.1 2009/02/06 01:54:09 snj Exp $	*/
+/*	$NetBSD: sys_process.c,v 1.143.4.1.4.1 2009/08/23 03:38:19 matt Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
@@ -118,7 +118,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sys_process.c,v 1.143.4.1 2009/02/06 01:54:09 snj Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sys_process.c,v 1.143.4.1.4.1 2009/08/23 03:38:19 matt Exp $");
 
 #include "opt_coredump.h"
 #include "opt_ptrace.h"
@@ -838,7 +838,7 @@
 	int error;
 	struct fpreg r;
 	char *kv;
-	int kl;
+	size_t kl;
 
 	if (uio->uio_offset < 0 || uio->uio_offset > (off_t)sizeof(r))
 		return EINVAL;
@@ -853,14 +853,22 @@
 
 	uvm_lwp_hold(l);
 
+#ifdef __HAVE_PROCESS_XFPREGS
+	error = process_read_xfpregs(l, &r, &kl);
+#else
 	error = process_read_fpregs(l, &r);
+#endif
 	if (error == 0)
 		error = uiomove(kv, kl, uio);
 	if (error == 0 && uio->uio_rw == UIO_WRITE) {
 		if (l->l_stat != LSSTOP)
 			error = EBUSY;
 		else
+#ifdef __HAVE_PROCESS_XFPREGS
+			error = process_write_xfpregs(l, &r, kl);
+#else
 			error = process_write_fpregs(l, &r);
+#endif
 	}
 
 	uvm_lwp_rele(l);

Index: src/sys/sys/ptrace.h
diff -u src/sys/sys/ptrace.h:1.40 src/sys/sys/ptrace.h:1.40.28.1
--- src/sys/sys/ptrace.h:1.40	Sat Jan  5 12:41:43 2008
+++ src/sys/sys/ptrace.h	Sun Aug 23 03:38:18 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: ptrace.h,v 1.40 2008/01/05 12:41:43 dsl Exp $	*/
+/*	$NetBSD: ptrace.h,v 1.40.28.1 2009/08/23 03:38:18 matt Exp $	*/
 
 /*-
  * Copyright (c) 1984, 1993
@@ -111,6 +111,15 @@
 
 void	proc_reparent(struct proc *, struct proc *);
 #ifdef PT_GETFPREGS
+#ifdef __HAVE_PROCESS_XFPREGS
+int	process_read_xfpregs(struct lwp *, struct fpreg *, size_t *);
+#ifndef process_read_xfpregs32
+#define process_read_xfpregs32	process_read_xfpregs
+#endif
+#ifndef process_read_xfpregs64
+#define process_read_xfpregs64	process_read_xfpregs
+#endif
+#else
 int	process_read_fpregs(struct lwp *, struct fpreg *);
 #ifndef process_read_fpregs32
 #define process_read_fpregs32	process_read_fpregs
@@ -119,6 +128,7 @@
 #define process_read_fpregs64	process_read_fpregs
 #endif
 #endif
+#endif
 #ifdef PT_GETREGS
 int	process_read_regs(struct lwp *, struct reg *);
 #ifndef process_read_regs32
@@ -131,8 +141,12 @@
 int	process_set_pc(struct lwp *, void *);
 int	process_sstep(struct lwp *, int);
 #ifdef PT_SETFPREGS
+#ifdef __HAVE_PROCESS_XFPREGS
+int	process_write_xfpregs(struct lwp *, const struct fpreg *, size_t);
+#else
 int	process_write_fpregs(struct lwp *, const struct fpreg *);
 #endif
+#endif
 #ifdef PT_SETREGS
 int	process_write_regs(struct lwp *, const struct reg *);
 #endif

Reply via email to