Module Name:    src
Committed By:   matt
Date:           Mon Jun 13 21:19:02 UTC 2011

Modified Files:
        src/sys/arch/powerpc/include: cpu.h types.h
        src/sys/arch/powerpc/powerpc: vm_machdep.c

Log Message:
Add __HAVE_CPU_UAREA_ROUTINES support so that uareas will be direct-mapped.
(This avoids the nasty tlb recursion problem on ibm4xx as well on mpc85xx).


To generate a diff of this commit:
cvs rdiff -u -r1.78 -r1.79 src/sys/arch/powerpc/include/cpu.h
cvs rdiff -u -r1.42 -r1.43 src/sys/arch/powerpc/include/types.h
cvs rdiff -u -r1.85 -r1.86 src/sys/arch/powerpc/powerpc/vm_machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/powerpc/include/cpu.h
diff -u src/sys/arch/powerpc/include/cpu.h:1.78 src/sys/arch/powerpc/include/cpu.h:1.79
--- src/sys/arch/powerpc/include/cpu.h:1.78	Sun Jun 12 16:27:14 2011
+++ src/sys/arch/powerpc/include/cpu.h	Mon Jun 13 21:19:01 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.78 2011/06/12 16:27:14 matt Exp $	*/
+/*	$NetBSD: cpu.h,v 1.79 2011/06/13 21:19:01 matt Exp $	*/
 
 /*
  * Copyright (C) 1999 Wolfgang Solfrank.
@@ -357,9 +357,8 @@
 extern int cpu_printfataltraps;
 extern char cpu_model[];
 
-void cpu_uarea_remap(struct lwp *);
-struct cpu_info *cpu_attach_common(struct device *, int);
-void cpu_setup(struct device *, struct cpu_info *);
+struct cpu_info *cpu_attach_common(device_t, int);
+void cpu_setup(device_t, struct cpu_info *);
 void cpu_identify(char *, size_t);
 int cpu_get_dfs(void);
 void cpu_set_dfs(int);
@@ -397,6 +396,8 @@
 
 #define	DELAY(n)		delay(n)
 
+void *	cpu_uarea_alloc(bool);
+bool	cpu_uarea_free(void *);
 void	cpu_need_resched(struct cpu_info *, int);
 void	cpu_signotify(struct lwp *);
 void	cpu_need_proftick(struct lwp *);

Index: src/sys/arch/powerpc/include/types.h
diff -u src/sys/arch/powerpc/include/types.h:1.42 src/sys/arch/powerpc/include/types.h:1.43
--- src/sys/arch/powerpc/include/types.h:1.42	Sun Jun 12 06:10:41 2011
+++ src/sys/arch/powerpc/include/types.h	Mon Jun 13 21:19:02 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: types.h,v 1.42 2011/06/12 06:10:41 matt Exp $	*/
+/*	$NetBSD: types.h,v 1.43 2011/06/13 21:19:02 matt Exp $	*/
 
 /*-
  * Copyright (C) 1995 Wolfgang Solfrank.
@@ -75,6 +75,7 @@
 #define __HAVE_CPU_COUNTER
 #define __HAVE_SYSCALL_INTERN
 #define	__HAVE_CPU_DATA_FIRST
+#define	__HAVE_CPU_UAREA_ROUTINES
 #ifdef _LP64
 #define	__HAVE_ATOMIC64_OPS
 #endif

Index: src/sys/arch/powerpc/powerpc/vm_machdep.c
diff -u src/sys/arch/powerpc/powerpc/vm_machdep.c:1.85 src/sys/arch/powerpc/powerpc/vm_machdep.c:1.86
--- src/sys/arch/powerpc/powerpc/vm_machdep.c:1.85	Mon Jun  6 22:04:34 2011
+++ src/sys/arch/powerpc/powerpc/vm_machdep.c	Mon Jun 13 21:19:02 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: vm_machdep.c,v 1.85 2011/06/06 22:04:34 matt Exp $	*/
+/*	$NetBSD: vm_machdep.c,v 1.86 2011/06/13 21:19:02 matt Exp $	*/
 
 /*
  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.85 2011/06/06 22:04:34 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.86 2011/06/13 21:19:02 matt Exp $");
 
 #include "opt_altivec.h"
 #include "opt_multiprocessor.h"
@@ -281,3 +281,61 @@
 	bp->b_data = bp->b_saveaddr;
 	bp->b_saveaddr = 0;
 }
+
+#ifdef __HAVE_CPU_UAREA_ROUTINES
+void *
+cpu_uarea_alloc(bool system)
+{
+	struct pglist pglist;
+	int error;
+
+	/*
+	 * Allocate a new physically contiguous uarea which can be
+	 * direct-mapped.
+	 */
+	error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
+	if (error) {
+#ifdef _LP64
+		if (!system)
+			return NULL;
+#endif
+		panic("%s: uvm_pglistalloc failed: %d", __func__, error);
+	}
+
+	/*
+	 * Get the physical address from the first page.
+	 */
+	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
+	KASSERT(pg != NULL);
+	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
+
+	/*
+	 * we need to return a direct-mapped VA for the pa.  But since
+	 * we map vajpa 1:1 that's easy/
+	 */
+
+	return (void *)(uintptr_t) pa;
+}
+
+/*
+ * Return true if we freed it, false if we didn't.
+ */
+bool
+cpu_uarea_free(void *vva)
+{
+	vaddr_t va = (vaddr_t) vva;
+	if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)
+		return false;
+
+	/*
+	 * Since the pages are physically contiguous, the vm_page structurs
+	 * will be as well.
+	 */
+	struct vm_page *pg = PHYS_TO_VM_PAGE((paddr_t)va);
+	KASSERT(pg != NULL);
+	for (size_t i = 0; i < UPAGES; i++, pg++) {
+		uvm_pagefree(pg);
+	}
+	return true;
+}
+#endif /* __HAVE_CPU_UAREA_ROUTINES */

Reply via email to