Module Name: src Committed By: tsutsui Date: Sat Dec 5 23:16:58 UTC 2009
Modified Files: src/sys/arch/cesfic/cesfic: pmap_bootstrap.c src/sys/arch/hp300/hp300: pmap_bootstrap.c src/sys/arch/luna68k/luna68k: pmap_bootstrap.c src/sys/arch/mac68k/mac68k: pmap_bootstrap.c src/sys/arch/mvme68k/mvme68k: pmap_bootstrap.c src/sys/arch/news68k/news68k: pmap_bootstrap.c src/sys/arch/next68k/next68k: pmap_bootstrap.c src/sys/arch/x68k/x68k: pmap_bootstrap.c Log Message: Use proper macro, variable names, types, and assignments for readability. To generate a diff of this commit: cvs rdiff -u -r1.22 -r1.23 src/sys/arch/cesfic/cesfic/pmap_bootstrap.c cvs rdiff -u -r1.42 -r1.43 src/sys/arch/hp300/hp300/pmap_bootstrap.c cvs rdiff -u -r1.22 -r1.23 src/sys/arch/luna68k/luna68k/pmap_bootstrap.c cvs rdiff -u -r1.83 -r1.84 src/sys/arch/mac68k/mac68k/pmap_bootstrap.c cvs rdiff -u -r1.36 -r1.37 src/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c cvs rdiff -u -r1.26 -r1.27 src/sys/arch/news68k/news68k/pmap_bootstrap.c cvs rdiff -u -r1.31 -r1.32 src/sys/arch/next68k/next68k/pmap_bootstrap.c cvs rdiff -u -r1.45 -r1.46 src/sys/arch/x68k/x68k/pmap_bootstrap.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/cesfic/cesfic/pmap_bootstrap.c diff -u src/sys/arch/cesfic/cesfic/pmap_bootstrap.c:1.22 src/sys/arch/cesfic/cesfic/pmap_bootstrap.c:1.23 --- src/sys/arch/cesfic/cesfic/pmap_bootstrap.c:1.22 Sat Dec 5 15:31:07 2009 +++ src/sys/arch/cesfic/cesfic/pmap_bootstrap.c Sat Dec 5 23:16:57 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_bootstrap.c,v 1.22 2009/12/05 15:31:07 tsutsui Exp $ */ +/* $NetBSD: pmap_bootstrap.c,v 1.23 2009/12/05 23:16:57 tsutsui Exp $ */ /* * Copyright (c) 1991, 1993 @@ -36,7 +36,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.22 2009/12/05 15:31:07 tsutsui Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.23 2009/12/05 23:16:57 tsutsui Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -85,7 +85,7 @@ { paddr_t kstpa, kptpa, kptmpa, lkptpa, lwp0upa; u_int nptpages, kstsize; - st_entry_t protoste, *ste; + st_entry_t protoste, *ste, *este; pt_entry_t protopte, *pte, *epte; /* @@ -165,10 +165,10 @@ * First invalidate the entire "segment table" pages * (levels 1 and 2 have the same "invalid" value). */ - pte = (u_int *)kstpa; - epte = &pte[kstsize * NPTEPG]; - while (pte < epte) - *pte++ = SG_NV; + ste = (st_entry_t *)kstpa; + este = &ste[kstsize * NPTEPG]; + while (ste < este) + *ste++ = SG_NV; /* * Initialize level 2 descriptors (which immediately @@ -179,24 +179,25 @@ * now to save the HW the expense of doing it. */ num = nptpages * (NPTEPG / SG4_LEV3SIZE); - pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; - epte = &pte[num]; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE]; + este = &ste[num]; protoste = kptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize level 1 descriptors. We need: - * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE + * howmany(num, SG4_LEV2SIZE) * level 1 descriptors to map the `num' level 2's. */ - pte = (u_int *)kstpa; - epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; - protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + ste = (st_entry_t *)kstpa; + este = &ste[howmany(num, SG4_LEV2SIZE)]; + protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); } @@ -204,31 +205,34 @@ * Initialize the final level 1 descriptor to map the last * block of level 2 descriptors. */ - ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; - pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; - *ste = (u_int)pte | SG_U | SG_RW | SG_V; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE - 1]; + este = (st_entry_t *)kstpa; + este = &este[kstsize * NPTEPG - SG4_LEV2SIZE]; + *ste = (paddr_t)este | SG_U | SG_RW | SG_V; /* * Now initialize the final portion of that block of * descriptors to map kptmpa and the "last PT page". */ - pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2]; - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + ste = (st_entry_t *)kstpa; + ste = &ste[kstsize * NPTEPG - NPTEPG / SG4_LEV3SIZE * 2]; + epte = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = kptmpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = lkptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize Sysptmap */ - pte = (u_int *)kptmpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protopte = kptpa | PG_RW | PG_CI | PG_V; while (pte < epte) { @@ -239,7 +243,8 @@ /* * Invalidate all but the last remaining entry. */ - epte = &((u_int *)kptmpa)[NPTEPG-2]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 2]; while (pte < epte) { *pte++ = PG_NV; } @@ -255,8 +260,8 @@ * Map the page table pages in both the HW segment table * and the software Sysptmap. */ - ste = (u_int *)kstpa; - pte = (u_int *)kptmpa; + ste = (st_entry_t *)kstpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protoste = kptpa | SG_RW | SG_V; protopte = kptpa | PG_RW | PG_CI | PG_V; @@ -269,7 +274,8 @@ /* * Invalidate all but the last remaining entries in both. */ - epte = &((u_int *)kptmpa)[NPTEPG-2]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 2]; while (pte < epte) { *ste++ = SG_NV; *pte++ = PG_NV; @@ -286,11 +292,14 @@ *pte = lkptpa | PG_RW | PG_CI | PG_V; } /* - * Invalidate all but the final entry in the last kernel PT page - * (u-area PTEs will be validated later). The final entry maps - * the last page of physical memory. + * Invalidate all but the final entry in the last kernel PT page. + * The final entry maps the last page of physical memory to + * prepare a page that is PA == VA to turn on the MMU. + * + * XXX: This looks copied from hp300 where PA != VA, but + * XXX: it's suspicious if this is also required on this port. */ - pte = (u_int *)lkptpa; + pte = (pt_entry_t *)lkptpa; epte = &pte[NPTEPG]; while (pte < epte) *pte++ = PG_NV; @@ -299,7 +308,7 @@ * Initialize kernel page table. * Start by invalidating the `nptpages' that we have allocated. */ - pte = (u_int *)kptpa; + pte = (pt_entry_t *)kptpa; epte = &pte[nptpages * NPTEPG]; while (pte < epte) *pte++ = PG_NV; @@ -307,7 +316,8 @@ /* * Validate PTEs for kernel text (RO). */ - pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; + pte = (pt_entry_t *)kptpa; + pte = &pte[m68k_btop(KERNBASE)]; epte = &((u_int *)kptpa)[m68k_btop(m68k_trunc_page(&etext))]; protopte = firstpa | PG_RO | PG_V; while (pte < epte) { @@ -319,7 +329,8 @@ * by us so far (nextpa - firstpa bytes), and pages for lwp0 * u-area and page table allocated below (RW). */ - epte = &((u_int *)kptpa)[m68k_btop(KERNBASE + nextpa - firstpa)]; + epte = (pt_entry_t *)kptpa; + epte = &epte[m68k_btop(KERNBASE + nextpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; /* * Enable copy-back caching of data pages Index: src/sys/arch/hp300/hp300/pmap_bootstrap.c diff -u src/sys/arch/hp300/hp300/pmap_bootstrap.c:1.42 src/sys/arch/hp300/hp300/pmap_bootstrap.c:1.43 --- src/sys/arch/hp300/hp300/pmap_bootstrap.c:1.42 Fri Dec 4 18:55:14 2009 +++ src/sys/arch/hp300/hp300/pmap_bootstrap.c Sat Dec 5 23:16:57 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_bootstrap.c,v 1.42 2009/12/04 18:55:14 tsutsui Exp $ */ +/* $NetBSD: pmap_bootstrap.c,v 1.43 2009/12/05 23:16:57 tsutsui Exp $ */ /* * Copyright (c) 1991, 1993 @@ -36,7 +36,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.42 2009/12/04 18:55:14 tsutsui Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.43 2009/12/05 23:16:57 tsutsui Exp $"); #include <sys/param.h> @@ -95,7 +95,7 @@ { paddr_t kstpa, kptpa, kptmpa, lkptpa, lwp0upa; u_int nptpages, kstsize; - st_entry_t protoste, *ste; + st_entry_t protoste, *ste, *este; pt_entry_t protopte, *pte, *epte; /* @@ -173,10 +173,10 @@ * First invalidate the entire "segment table" pages * (levels 1 and 2 have the same "invalid" value). */ - pte = (u_int *)kstpa; - epte = &pte[kstsize * NPTEPG]; - while (pte < epte) - *pte++ = SG_NV; + ste = (st_entry_t *)kstpa; + este = &ste[kstsize * NPTEPG]; + while (ste < este) + *ste++ = SG_NV; /* * Initialize level 2 descriptors (which immediately * follow the level 1 table). We need: @@ -186,53 +186,57 @@ * now to save the HW the expense of doing it. */ num = nptpages * (NPTEPG / SG4_LEV3SIZE); - pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; - epte = &pte[num]; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE]; + este = &ste[num]; protoste = kptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize level 1 descriptors. We need: - * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE + * howmany(num, SG4_LEV2SIZE) * level 1 descriptors to map the `num' level 2's. */ - pte = (u_int *)kstpa; - epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; - protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + ste = (st_entry_t *)kstpa; + este = &ste[howmany(num, SG4_LEV2SIZE)]; + protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); } /* * Initialize the final level 1 descriptor to map the last * block of level 2 descriptors. */ - ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; - pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; - *ste = (u_int)pte | SG_U | SG_RW | SG_V; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE - 1]; + este = (st_entry_t *)kstpa; + este = &este[kstsize * NPTEPG - SG4_LEV2SIZE]; + *ste = (paddr_t)este | SG_U | SG_RW | SG_V; /* * Now initialize the final portion of that block of * descriptors to map kptmpa and the "last PT page". */ - pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2]; - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + ste = (st_entry_t *)kstpa; + ste = &ste[kstsize * NPTEPG - NPTEPG / SG4_LEV3SIZE * 2]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = kptmpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = lkptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize Sysptmap */ - pte = (u_int *)kptmpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protopte = kptpa | PG_RW | PG_CI | PG_V; while (pte < epte) { @@ -242,7 +246,8 @@ /* * Invalidate all but the last remaining entry. */ - epte = &((u_int *)kptmpa)[NPTEPG-2]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 2]; while (pte < epte) { *pte++ = PG_NV; } @@ -258,8 +263,8 @@ * Map the page table pages in both the HW segment table * and the software Sysptmap. */ - ste = (u_int *)kstpa; - pte = (u_int *)kptmpa; + ste = (st_entry_t *)kstpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protoste = kptpa | SG_RW | SG_V; protopte = kptpa | PG_RW | PG_CI | PG_V; @@ -272,7 +277,8 @@ /* * Invalidate all but the last remaining entries in both. */ - epte = &((u_int *)kptmpa)[NPTEPG-2]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 2]; while (pte < epte) { *ste++ = SG_NV; *pte++ = PG_NV; @@ -289,12 +295,12 @@ *pte = lkptpa | PG_RW | PG_CI | PG_V; } /* - * Invalidate all but the final entry in the last kernel PT page - * (u-area PTEs will be validated later). The final entry maps - * the last page of physical memory. + * Invalidate all but the final entry in the last kernel PT page. + * The final entry maps the last page of physical memory to + * prepare a page that is PA == VA to turn on the MMU. */ - pte = (u_int *)lkptpa; - epte = &pte[NPTEPG-1]; + pte = (pt_entry_t *)lkptpa; + epte = &pte[NPTEPG - 1]; while (pte < epte) *pte++ = PG_NV; *pte = MAXADDR | PG_RW | PG_CI | PG_V; @@ -302,7 +308,7 @@ * Initialize kernel page table. * Start by invalidating the `nptpages' that we have allocated. */ - pte = (u_int *)kptpa; + pte = (pt_entry_t *)kptpa; epte = &pte[nptpages * NPTEPG]; while (pte < epte) *pte++ = PG_NV; @@ -320,7 +326,8 @@ * Validate PTEs for kernel text (RO). The first page * of kernel text remains invalid; see locore.s */ - pte = &((u_int *)kptpa)[m68k_btop(KERNBASE + PAGE_SIZE)]; + pte = (pt_entry_t *)kptpa; + pte = &pte[m68k_btop(KERNBASE + PAGE_SIZE)]; epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_V; while (pte < epte) { @@ -332,7 +339,8 @@ * by us so far (nextpa - firstpa bytes), and pages for lwp0 * u-area and page table allocated below (RW). */ - epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; + epte = (pt_entry_t *)kptpa; + epte = &epte[m68k_btop(nextpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; /* * Enable copy-back caching of data pages @@ -355,13 +363,13 @@ protopte = INTIOBASE | PG_RW | PG_CI | PG_V; epte = &pte[IIOMAPSIZE]; - RELOC(intiobase, char *) = (char *)PTE2VA(pte); - RELOC(intiolimit, char *) = (char *)PTE2VA(epte); + RELOC(intiobase, uint8_t *) = (uint8_t *)PTE2VA(pte); + RELOC(intiolimit, uint8_t *) = (uint8_t *)PTE2VA(epte); while (pte < epte) { *pte++ = protopte; protopte += PAGE_SIZE; } - RELOC(extiobase, char *) = (char *)PTE2VA(pte); + RELOC(extiobase, uint8_t *) = (uint8_t *)PTE2VA(pte); pte += EIOMAPSIZE; RELOC(virtual_avail, vaddr_t) = PTE2VA(pte); @@ -371,13 +379,11 @@ /* * Sysseg: base of kernel segment table */ - RELOC(Sysseg, st_entry_t *) = - (st_entry_t *)(kstpa - firstpa); + RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa); /* * Sysptmap: base of kernel page table map */ - RELOC(Sysptmap, pt_entry_t *) = - (pt_entry_t *)(kptmpa - firstpa); + RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa); /* * Sysmap: kernel page table (as mapped through Sysptmap) * Allocated at the end of KVA space. @@ -474,8 +480,8 @@ int num; kpm->pm_stfree = ~l2tobm(0); - num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), - SG4_LEV2SIZE) / SG4_LEV2SIZE; + num = howmany(nptpages * (NPTEPG / SG4_LEV3SIZE), + SG4_LEV2SIZE); while (num) kpm->pm_stfree &= ~l2tobm(num--); kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); Index: src/sys/arch/luna68k/luna68k/pmap_bootstrap.c diff -u src/sys/arch/luna68k/luna68k/pmap_bootstrap.c:1.22 src/sys/arch/luna68k/luna68k/pmap_bootstrap.c:1.23 --- src/sys/arch/luna68k/luna68k/pmap_bootstrap.c:1.22 Fri Dec 4 18:55:14 2009 +++ src/sys/arch/luna68k/luna68k/pmap_bootstrap.c Sat Dec 5 23:16:57 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_bootstrap.c,v 1.22 2009/12/04 18:55:14 tsutsui Exp $ */ +/* $NetBSD: pmap_bootstrap.c,v 1.23 2009/12/05 23:16:57 tsutsui Exp $ */ /* * Copyright (c) 1991, 1993 @@ -36,7 +36,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.22 2009/12/04 18:55:14 tsutsui Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.23 2009/12/05 23:16:57 tsutsui Exp $"); #include <sys/param.h> @@ -86,6 +86,9 @@ paddr_t kstpa, kptpa, kptmpa, lwp0upa; u_int nptpages, kstsize; st_entry_t protoste, *ste; +#if defined(M68040) + st_entry_t *este; +#endif pt_entry_t protopte, *pte, *epte; u_int iiomapsize; @@ -154,10 +157,10 @@ * First invalidate the entire "segment table" pages * (levels 1 and 2 have the same "invalid" value). */ - pte = (u_int *)kstpa; - epte = &pte[kstsize * NPTEPG]; - while (pte < epte) - *pte++ = SG_NV; + ste = (st_entry_t *)kstpa; + este = &ste[kstsize * NPTEPG]; + while (ste < este) + *ste++ = SG_NV; /* * Initialize level 2 descriptors (which immediately * follow the level 1 table). We need: @@ -167,47 +170,50 @@ * now to save the HW the expense of doing it. */ num = nptpages * (NPTEPG / SG4_LEV3SIZE); - pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; - epte = &pte[num]; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE]; + este = &ste[num]; protoste = kptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize level 1 descriptors. We need: - * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE + * howmany(num, SG4_LEV2SIZE) * level 1 descriptors to map the `num' level 2's. */ - pte = (u_int *)kstpa; - epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; - protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + ste = (st_entry_t *)kstpa; + este = &ste[howmany(num, SG4_LEV2SIZE)]; + protoste = (paddr_t)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); } /* * Initialize the level 1 descriptor correspond to * SYSMAP_VA to map the last block of level 2 descriptors. */ - ste = &((u_int *)kstpa)[SYSMAP_VA >> SG4_SHIFT1]; - pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; - *ste = (u_int)pte | SG_U | SG_RW | SG_V; + ste = (st_entry_t *)kstpa; + ste = &ste[SYSMAP_VA >> SG4_SHIFT1]; + este = (st_entry_t *)kstpa; + este = &este[kstsize * NPTEPG - SG4_LEV2SIZE]; + *ste = (paddr_t)este | SG_U | SG_RW | SG_V; /* * Now initialize the portion of that block of * descriptors to map Sysptmap. */ - pte = &pte[((SYSMAP_VA & SG4_MASK2) >> SG4_SHIFT2)]; - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + ste = &este[((SYSMAP_VA & SG4_MASK2) >> SG4_SHIFT2)]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = kptmpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize Sysptmap */ - pte = (u_int *)kptmpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protopte = kptpa | PG_RW | PG_CI | PG_V; while (pte < epte) { @@ -217,7 +223,8 @@ /* * Invalidate all remaining entries. */ - epte = &((u_int *)kptmpa)[NPTEPG]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG]; while (pte < epte) { *pte++ = PG_NV; } @@ -225,8 +232,8 @@ * Initialize the one corresponding SYSMAP_VA * to point to Sysptmap. */ - pte = (u_int *)kptmpa; - pte[SYSMAP_VA/(NPTEPG*PAGE_SIZE)] = + pte = (pt_entry_t *)kptmpa; + pte[SYSMAP_VA / (NPTEPG * PAGE_SIZE)] = kptmpa | PG_RW | PG_CI | PG_V; } else #endif @@ -235,8 +242,8 @@ * Map the page table pages in both the HW segment table * and the software Sysptmap. */ - ste = (u_int *)kstpa; - pte = (u_int *)kptmpa; + ste = (st_entry_t *)kstpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protoste = kptpa | SG_RW | SG_V; protopte = kptpa | PG_RW | PG_CI | PG_V; @@ -249,7 +256,8 @@ /* * Invalidate all remaining entries in both. */ - epte = &((u_int *)kptmpa)[NPTEPG]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG]; while (pte < epte) { *ste++ = SG_NV; *pte++ = PG_NV; @@ -258,11 +266,11 @@ * Initialize the one corresponding to SYSMAP_VA * to point to Sysptmap. */ - ste = (u_int *)kstpa; - pte = (u_int *)kptmpa; - pte[SYSMAP_VA/(NPTEPG*PAGE_SIZE)] = + ste = (st_entry_t *)kstpa; + pte = (pt_entry_t *)kptmpa; + pte[SYSMAP_VA / (NPTEPG * PAGE_SIZE)] = kptmpa | SG_RW | SG_V; - ste[SYSMAP_VA/(NPTEPG*PAGE_SIZE)] = + ste[SYSMAP_VA / (NPTEPG * PAGE_SIZE)] = kptmpa | PG_RW | PG_CI | PG_V; } @@ -270,14 +278,15 @@ * Initialize kernel page table. * Start by invalidating the `nptpages' that we have allocated. */ - pte = (u_int *)kptpa; + pte = (pt_entry_t *)kptpa; epte = &pte[nptpages * NPTEPG]; while (pte < epte) *pte++ = PG_NV; /* * Validate PTEs for kernel text (RO) */ - pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; + pte = (pt_entry_t *)kptpa; + pte = &pte[m68k_btop(KERNBASE)]; epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; protopte = firstpa | PG_RO | PG_V; while (pte < epte) { @@ -289,7 +298,8 @@ * by us so far (nextpa - firstpa bytes), and pages for lwp0 * u-area and page table allocated below (RW). */ - epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; + epte = (pt_entry_t *)kptpa; + epte = &epte[m68k_btop(nextpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; /* * Enable copy-back caching of data pages @@ -393,8 +403,8 @@ int num; kpm->pm_stfree = ~l2tobm(0); - num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), - SG4_LEV2SIZE) / SG4_LEV2SIZE; + num = howmany(nptpages * (NPTEPG / SG4_LEV3SIZE), + SG4_LEV2SIZE); while (num) kpm->pm_stfree &= ~l2tobm(num--); kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); Index: src/sys/arch/mac68k/mac68k/pmap_bootstrap.c diff -u src/sys/arch/mac68k/mac68k/pmap_bootstrap.c:1.83 src/sys/arch/mac68k/mac68k/pmap_bootstrap.c:1.84 --- src/sys/arch/mac68k/mac68k/pmap_bootstrap.c:1.83 Fri Dec 4 18:55:14 2009 +++ src/sys/arch/mac68k/mac68k/pmap_bootstrap.c Sat Dec 5 23:16:57 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_bootstrap.c,v 1.83 2009/12/04 18:55:14 tsutsui Exp $ */ +/* $NetBSD: pmap_bootstrap.c,v 1.84 2009/12/05 23:16:57 tsutsui Exp $ */ /* * Copyright (c) 1991, 1993 @@ -36,7 +36,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.83 2009/12/04 18:55:14 tsutsui Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.84 2009/12/05 23:16:57 tsutsui Exp $"); #include "opt_ddb.h" #include "opt_kgdb.h" @@ -116,7 +116,7 @@ int avail_remaining; int avail_range; int i; - st_entry_t protoste, *ste; + st_entry_t protoste, *ste, *este; pt_entry_t protopte, *pte, *epte; extern char start[]; @@ -202,10 +202,10 @@ * First invalidate the entire "segment table" pages * (levels 1 and 2 have the same "invalid" value). */ - pte = PA2VA(kstpa, u_int *); - epte = &pte[kstsize * NPTEPG]; - while (pte < epte) - *pte++ = SG_NV; + ste = PA2VA(kstpa, st_entry_t *); + este = &ste[kstsize * NPTEPG]; + while (ste < este) + *ste++ = SG_NV; /* * Initialize level 2 descriptors (which immediately * follow the level 1 table). We need: @@ -215,48 +215,51 @@ * now to save the HW the expense of doing it. */ num = nptpages * (NPTEPG / SG4_LEV3SIZE); - pte = &(PA2VA(kstpa, u_int *))[SG4_LEV1SIZE]; - epte = &pte[num]; + ste = PA2VA(kstpa, st_entry_t *); + ste = &ste[SG4_LEV1SIZE]; + este = &ste[num]; protoste = kptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize level 1 descriptors. We need: - * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE + * howmany(num, SG4_LEV2SIZE) * level 1 descriptors to map the `num' level 2's. */ - pte = PA2VA(kstpa, u_int *); - epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; - protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + ste = PA2VA(kstpa, u_int *); + este = &ste[howmany(num, SG4_LEV2SIZE)]; + protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); } /* * Initialize the final level 1 descriptor to map the last * block of level 2 descriptors. */ - ste = &(PA2VA(kstpa, u_int*))[SG4_LEV1SIZE-1]; - pte = &(PA2VA(kstpa, u_int*))[kstsize*NPTEPG - SG4_LEV2SIZE]; - *ste = (u_int)pte | SG_U | SG_RW | SG_V; + ste = PA2VA(kstpa, st_entry_t *); + ste = &ste[SG4_LEV1SIZE - 1]; + este = PA2VA(kstpa, st_entry_t *); + este = &este[kstsize * NPTEPG - SG4_LEV2SIZE]; + *ste = (paddr_t)este | SG_U | SG_RW | SG_V; /* * Now initialize the final portion of that block of * descriptors to map Sysmap. */ - pte = &(PA2VA(kstpa, u_int*)) - [kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + ste = PA2VA(kstpa, st_entry_t *); + ste = &ste[kstsize * NPTEPG - NPTEPG / SG4_LEV3SIZE]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = kptmpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize Sysptmap */ - pte = PA2VA(kptmpa, u_int *); + pte = PA2VA(kptmpa, pt_entry_t *); epte = &pte[nptpages]; protopte = kptpa | PG_RW | PG_CI | PG_V; while (pte < epte) { @@ -266,7 +269,8 @@ /* * Invalidate all but the last remaining entry. */ - epte = &(PA2VA(kptmpa, u_int *))[NPTEPG - 1]; + epte = PA2VA(kptmpa, pt_entry_t *); + epte = &epte[NPTEPG - 1]; while (pte < epte) { *pte++ = PG_NV; } @@ -279,8 +283,8 @@ * Map the page table pages in both the HW segment table * and the software Sysptmap. */ - ste = PA2VA(kstpa, u_int*); - pte = PA2VA(kptmpa, u_int*); + ste = PA2VA(kstpa, st_entry_t *); + pte = PA2VA(kptmpa, pt_entry_t *); epte = &pte[nptpages]; protoste = kptpa | SG_RW | SG_V; protopte = kptpa | PG_RW | PG_CI | PG_V; @@ -293,7 +297,8 @@ /* * Invalidate all but the last remaining entries in both. */ - epte = &(PA2VA(kptmpa, u_int *))[NPTEPG - 1]; + epte = PA2VA(kptmpa, pt_entry_t *); + epte = &epte[NPTEPG - 1]; while (pte < epte) { *ste++ = SG_NV; *pte++ = PG_NV; @@ -309,7 +314,7 @@ * Initialize kernel page table. * Start by invalidating the `nptpages' that we have allocated. */ - pte = PA2VA(kptpa, u_int *); + pte = PA2VA(kptpa, pt_entry_t *); epte = &pte[nptpages * NPTEPG]; while (pte < epte) *pte++ = PG_NV; @@ -318,7 +323,8 @@ * Validate PTEs for kernel text (RO). * Pages up to "start" must be writable for the ROM. */ - pte = &(PA2VA(kptpa, u_int *))[m68k_btop(KERNBASE)]; + pte = PA2VA(kptpa, pt_entry_t *); + pte = &pte[m68k_btop(KERNBASE)]; /* XXX why KERNBASE relative? */ epte = &pte[m68k_btop(m68k_round_page(start))]; protopte = firstpa | PG_RW | PG_V; @@ -338,7 +344,8 @@ * by us so far (nextpa - firstpa bytes), and pages for lwp0 * u-area and page table allocated below (RW). */ - epte = &(PA2VA(kptpa, u_int *))[m68k_btop(nextpa - firstpa)]; + epte = PA2VA(kptpa, pt_entry_t *); + epte = &epte[m68k_btop(nextpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; /* * Enable copy-back caching of data pages @@ -478,8 +485,8 @@ int num; kpm->pm_stfree = ~l2tobm(0); - num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), - SG4_LEV2SIZE) / SG4_LEV2SIZE; + num = howmany(nptpages * (NPTEPG / SG4_LEV3SIZE), + SG4_LEV2SIZE); while (num) kpm->pm_stfree &= ~l2tobm(num--); kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); Index: src/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c diff -u src/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c:1.36 src/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c:1.37 --- src/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c:1.36 Fri Dec 4 18:55:14 2009 +++ src/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c Sat Dec 5 23:16:57 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_bootstrap.c,v 1.36 2009/12/04 18:55:14 tsutsui Exp $ */ +/* $NetBSD: pmap_bootstrap.c,v 1.37 2009/12/05 23:16:57 tsutsui Exp $ */ /* * Copyright (c) 1991, 1993 @@ -36,7 +36,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.36 2009/12/04 18:55:14 tsutsui Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.37 2009/12/05 23:16:57 tsutsui Exp $"); #include <sys/param.h> #include <sys/kcore.h> @@ -91,6 +91,9 @@ paddr_t kstpa, kptpa, kptmpa, lkptpa, lwp0upa; u_int nptpages, kstsize; st_entry_t protoste, *ste; +#if defined(M68040) || defined(M68060) + st_entry_t *este; +#endif pt_entry_t protopte, *pte, *epte; psize_t size; u_int iiomappages; @@ -185,10 +188,10 @@ * First invalidate the entire "segment table" pages * (levels 1 and 2 have the same "invalid" value). */ - pte = (u_int *)kstpa; - epte = &pte[kstsize * NPTEPG]; - while (pte < epte) - *pte++ = SG_NV; + ste = (st_entry_t *)kstpa; + este = &ste[kstsize * NPTEPG]; + while (ste < este) + *ste++ = SG_NV; /* * Initialize level 2 descriptors (which immediately * follow the level 1 table). We need: @@ -198,11 +201,12 @@ * now to save the HW the expense of doing it. */ num = nptpages * (NPTEPG / SG4_LEV3SIZE); - pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; - epte = &pte[num]; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE]; + este = &ste[num]; protoste = kptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* @@ -210,41 +214,44 @@ * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE * level 1 descriptors to map the `num' level 2's. */ - pte = (u_int *)kstpa; - epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; - protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + ste = (st_entry_t *)kstpa; + este = &ste[howmany(num, SG4_LEV2SIZE)]; + protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); } /* * Initialize the final level 1 descriptor to map the last * block of level 2 descriptors. */ - ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; - pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; - *ste = (u_int)pte | SG_U | SG_RW | SG_V; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE - 1]; + este = (st_entry_t *)kstpa; + este = &este[kstsize * NPTEPG - SG4_LEV2SIZE]; + *ste = (paddr_t)este | SG_U | SG_RW | SG_V; /* * Now initialize the final portion of that block of * descriptors to map kptmpa and the "last PT page". */ - pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2]; - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + ste = (st_entry_t *)kstpa; + ste = &ste[kstsize * NPTEPG - NPTEPG / SG4_LEV3SIZE * 2]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = kptmpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = lkptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize Sysptmap */ - pte = (u_int *)kptmpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protopte = kptpa | PG_RW | PG_CI | PG_U | PG_V; while (pte < epte) { @@ -254,7 +261,8 @@ /* * Invalidate all but the last remaining entry. */ - epte = &((u_int *)kptmpa)[NPTEPG-2]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 2]; while (pte < epte) { *pte++ = PG_NV; } @@ -268,8 +276,8 @@ * Map the page table pages in both the HW segment table * and the software Sysptmap. */ - ste = (u_int *)kstpa; - pte = (u_int *)kptmpa; + ste = (st_entry_t *)kstpa; + pte = (st_entry_t *)kptmpa; epte = &pte[nptpages]; protoste = kptpa | SG_RW | SG_V; protopte = kptpa | PG_RW | PG_CI | PG_V; @@ -282,7 +290,8 @@ /* * Invalidate all but the last two remaining entries in both. */ - epte = &((u_int *)kptmpa)[NPTEPG-2]; + epte = (st_entry_t *)kptmpa; + epte = &epte[NPTEPG - 2]; while (pte < epte) { *ste++ = SG_NV; *pte++ = PG_NV; @@ -299,11 +308,14 @@ *pte = lkptpa | PG_RW | PG_CI | PG_V; } /* - * Invalidate all but the final entry in the last kernel PT page - * (u-area PTEs will be validated later). The final entry maps - * the last page of physical memory. + * Invalidate all but the final entry in the last kernel PT page. + * The final entry maps the last page of physical memory to + * prepare a page that is PA == VA to turn on the MMU. + * + * XXX: This looks copied from hp300 where PA != VA, but + * XXX: it's suspicious if this is also required on this port. */ - pte = (u_int *)lkptpa; + pte = (pt_entry_t *)lkptpa; epte = &pte[NPTEPG - 1]; while (pte < epte) *pte++ = PG_NV; @@ -312,14 +324,15 @@ * Initialize kernel page table. * Start by invalidating the `nptpages' that we have allocated. */ - pte = (u_int *)kptpa; + pte = (pt_entry_t *)kptpa; epte = &pte[nptpages * NPTEPG]; while (pte < epte) *pte++ = PG_NV; /* * Validate PTEs for kernel text (RO) */ - pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; + pte = (pt_entry_t *)kptpa; + pte = &pte[m68k_btop(KERNBASE)]; epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; protopte = firstpa | PG_RO | PG_U | PG_V; while (pte < epte) { @@ -331,7 +344,8 @@ * by us so far (kstpa - firstpa bytes), and pages for lwp0 * u-area and page table allocated below (RW). */ - epte = &((u_int *)kptpa)[m68k_btop(kstpa - firstpa)]; + epte = (pt_entry_t *)kptpa; + epte = &epte[m68k_btop(kstpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; /* * Enable copy-back caching of data pages @@ -347,7 +361,8 @@ * these machines (for the 68040 not strictly necessary, but * recommended by Motorola; for the 68060 mandatory) */ - epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; + epte = (pt_entry_t *)kptpa; + epte = &epte[m68k_btop(nextpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; if (RELOC(mmutype, int) == MMU_68040) { protopte &= ~PG_CMASK; @@ -521,8 +536,8 @@ int num; kpm->pm_stfree = ~l2tobm(0); - num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), - SG4_LEV2SIZE) / SG4_LEV2SIZE; + num = howmany(nptpages * (NPTEPG / SG4_LEV3SIZE), + SG4_LEV2SIZE); while (num) kpm->pm_stfree &= ~l2tobm(num--); kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); Index: src/sys/arch/news68k/news68k/pmap_bootstrap.c diff -u src/sys/arch/news68k/news68k/pmap_bootstrap.c:1.26 src/sys/arch/news68k/news68k/pmap_bootstrap.c:1.27 --- src/sys/arch/news68k/news68k/pmap_bootstrap.c:1.26 Fri Dec 4 18:55:14 2009 +++ src/sys/arch/news68k/news68k/pmap_bootstrap.c Sat Dec 5 23:16:58 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_bootstrap.c,v 1.26 2009/12/04 18:55:14 tsutsui Exp $ */ +/* $NetBSD: pmap_bootstrap.c,v 1.27 2009/12/05 23:16:58 tsutsui Exp $ */ /* * Copyright (c) 1991, 1993 @@ -39,7 +39,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.26 2009/12/04 18:55:14 tsutsui Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.27 2009/12/05 23:16:58 tsutsui Exp $"); #include <sys/param.h> @@ -92,6 +92,9 @@ paddr_t kstpa, kptpa, kptmpa, lwp0upa; u_int nptpages, kstsize; st_entry_t protoste, *ste; +#ifdef M68040 + st_entry_t *este; +#endif pt_entry_t protopte, *pte, *epte; u_int iiomapsize, eiomapsize; @@ -181,10 +184,10 @@ * First invalidate the entire "segment table" pages * (levels 1 and 2 have the same "invalid" value). */ - pte = (u_int *)kstpa; - epte = &pte[kstsize * NPTEPG]; - while (pte < epte) - *pte++ = SG_NV; + ste = (st_entry_t *)kstpa; + este = &ste[kstsize * NPTEPG]; + while (ste < este) + *ste++ = SG_NV; /* * Initialize level 2 descriptors (which immediately * follow the level 1 table). We need: @@ -194,47 +197,51 @@ * now to save the HW the expense of doing it. */ num = nptpages * (NPTEPG / SG4_LEV3SIZE); - pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; - epte = &pte[num]; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE]; + este = &ste[num]; protoste = kptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize level 1 descriptors. We need: - * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE + * howmany(num, SG4_LEV2SIZE) * level 1 descriptors to map the `num' level 2's. */ - pte = (u_int *)kstpa; - epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; - protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + ste = (st_entry_t *)kstpa; + este = &ste[howmany(num, SG4_LEV2SIZE)]; + protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); } /* * Initialize the final level 1 descriptor to map the last * block of level 2 descriptors. */ - ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; - pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; - *ste = (u_int)pte | SG_U | SG_RW | SG_V; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE - 1]; + este = (st_entry_t *)kstpa; + este = &este[kstsize * NPTEPG - SG4_LEV2SIZE]; + *ste = (paddr_t)epte | SG_U | SG_RW | SG_V; /* * Now initialize the final portion of that block of * descriptors to map Sysmap. */ - pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + ste = (st_entry_t *)kstpa; + ste = &ste[kstsize * NPTEPG - NPTEPG / SG4_LEV3SIZE]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = kptmpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize Sysptmap */ - pte = (u_int *)kptmpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protopte = kptpa | PG_RW | PG_CI | PG_V; while (pte < epte) { @@ -244,7 +251,8 @@ /* * Invalidate all but the last remaining entry. */ - epte = &((u_int *)kptmpa)[NPTEPG-1]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 1]; while (pte < epte) { *pte++ = PG_NV; } @@ -259,8 +267,8 @@ * Map the page table pages in both the HW segment table * and the software Sysptmap. */ - ste = (u_int *)kstpa; - pte = (u_int *)kptmpa; + ste = (st_entry_t *)kstpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protoste = kptpa | SG_RW | SG_V; protopte = kptpa | PG_RW | PG_CI | PG_V; @@ -273,7 +281,8 @@ /* * Invalidate all but the last remaining entries in both. */ - epte = &((u_int *)kptmpa)[NPTEPG-1]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 1]; while (pte < epte) { *ste++ = SG_NV; *pte++ = PG_NV; @@ -289,7 +298,7 @@ * Initialize kernel page table. * Start by invalidating the `nptpages' that we have allocated. */ - pte = (u_int *)kptpa; + pte = (pt_entry_t *)kptpa; epte = &pte[nptpages * NPTEPG]; while (pte < epte) *pte++ = PG_NV; @@ -297,7 +306,8 @@ /* * Validate PTEs for kernel text (RO). */ - pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; + pte = (pt_entry_t *)kptpa; + pte = &pte[m68k_btop(KERNBASE)]; epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; protopte = firstpa | PG_RO | PG_V; while (pte < epte) { @@ -309,7 +319,8 @@ * by us so far (nextpa - firstpa bytes), and pages for lwp0 * u-area and page table allocated below (RW). */ - epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; + epte = (pt_entry_t *)kptpa; + epte = &epte[m68k_btop(nextpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; /* * Enable copy-back caching of data pages @@ -330,13 +341,13 @@ protopte = RELOC(intiobase_phys, u_int) | PG_RW | PG_CI | PG_V; epte = &pte[iiomapsize]; - RELOC(intiobase, char *) = (char *)PTE2VA(pte); - RELOC(intiolimit, char *) = (char *)PTE2VA(epte); + RELOC(intiobase, uint8_t *) = (uint8_t *)PTE2VA(pte); + RELOC(intiolimit, uint8_t *) = (uint8_t *)PTE2VA(epte); while (pte < epte) { *pte++ = protopte; protopte += PAGE_SIZE; } - RELOC(extiobase, char *) = (char *)PTE2VA(pte); + RELOC(extiobase, uint8_t *) = (uint8_t *)PTE2VA(pte); pte += eiomapsize; RELOC(virtual_avail, vaddr_t) = PTE2VA(pte); @@ -346,13 +357,11 @@ /* * Sysseg: base of kernel segment table */ - RELOC(Sysseg, st_entry_t *) = - (st_entry_t *)(kstpa - firstpa); + RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa); /* * Sysptmap: base of kernel page table map */ - RELOC(Sysptmap, pt_entry_t *) = - (pt_entry_t *)(kptmpa - firstpa); + RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa); /* * Sysmap: kernel page table (as mapped through Sysptmap) * Allocated at the end of KVA space. @@ -392,10 +401,10 @@ #endif #ifdef news1700 if (RELOC(systype, int) == NEWS1700) { - RELOC(cache_ctl, char *) = 0xe1300000 - INTIOBASE1700 + - RELOC(intiobase, char *); - RELOC(cache_clr, char *) = 0xe1900000 - INTIOBASE1700 + - RELOC(intiobase, char *); + RELOC(cache_ctl, uint8_t *) = 0xe1300000 - INTIOBASE1700 + + RELOC(intiobase, uint8_t *); + RELOC(cache_clr, uint8_t *) = 0xe1900000 - INTIOBASE1700 + + RELOC(intiobase, uint8_t *); } #endif @@ -444,8 +453,8 @@ int num; kpm->pm_stfree = ~l2tobm(0); - num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), - SG4_LEV2SIZE) / SG4_LEV2SIZE; + num = howmany(nptpages * (NPTEPG / SG4_LEV3SIZE), + SG4_LEV2SIZE); while (num) kpm->pm_stfree &= ~l2tobm(num--); kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); Index: src/sys/arch/next68k/next68k/pmap_bootstrap.c diff -u src/sys/arch/next68k/next68k/pmap_bootstrap.c:1.31 src/sys/arch/next68k/next68k/pmap_bootstrap.c:1.32 --- src/sys/arch/next68k/next68k/pmap_bootstrap.c:1.31 Fri Dec 4 18:55:14 2009 +++ src/sys/arch/next68k/next68k/pmap_bootstrap.c Sat Dec 5 23:16:58 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_bootstrap.c,v 1.31 2009/12/04 18:55:14 tsutsui Exp $ */ +/* $NetBSD: pmap_bootstrap.c,v 1.32 2009/12/05 23:16:58 tsutsui Exp $ */ /* * This file was taken from mvme68k/mvme68k/pmap_bootstrap.c @@ -45,7 +45,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.31 2009/12/04 18:55:14 tsutsui Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.32 2009/12/05 23:16:58 tsutsui Exp $"); #include <sys/param.h> #include <sys/kcore.h> @@ -104,6 +104,9 @@ paddr_t kstpa, kptpa, kptmpa, lkptpa, lwp0upa; u_int nptpages, kstsize; st_entry_t protoste, *ste; +#if defined(M68040) || defined(M68060) + st_entry_t *este; +#endif pt_entry_t protopte, *pte, *epte; psize_t size; int i; @@ -195,10 +198,10 @@ * First invalidate the entire "segment table" pages * (levels 1 and 2 have the same "invalid" value). */ - pte = (u_int *)kstpa; - epte = &pte[kstsize * NPTEPG]; - while (pte < epte) - *pte++ = SG_NV; + ste = (st_entry_t *)kstpa; + este = &ste[kstsize * NPTEPG]; + while (ste < este) + *ste++ = SG_NV; /* * Initialize level 2 descriptors (which immediately * follow the level 1 table). We need: @@ -208,53 +211,57 @@ * now to save the HW the expense of doing it. */ num = nptpages * (NPTEPG / SG4_LEV3SIZE); - pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; - epte = &pte[num]; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE]; + este = &ste[num]; protoste = kptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize level 1 descriptors. We need: - * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE + * howmany(num, SG4_LEV2SIZE) * level 1 descriptors to map the `num' level 2's. */ - pte = (u_int *)kstpa; - epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; - protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + ste = (st_entry_t *)kstpa; + este = &ste[howmany(num, SG4_LEV2SIZE)]; + protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); } /* * Initialize the final level 1 descriptor to map the last * block of level 2 descriptors. */ - ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; - pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; - *ste = (u_int)pte | SG_U | SG_RW | SG_V; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE - 1]; + este = (st_entry_t *)kstpa; + este = &este[kstsize * NPTEPG - SG4_LEV2SIZE]; + *ste = (paddr_t)este | SG_U | SG_RW | SG_V; /* * Now initialize the final portion of that block of * descriptors to map the "last PT page". */ - pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2]; - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + ste = (st_entry_t *)kstpa; + ste = &ste[kstsize * NPTEPG - NPTEPG / SG4_LEV3SIZE * 2]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = kptmpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = lkptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize Sysptmap */ - pte = (u_int *)kptmpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protopte = kptpa | PG_RW | PG_CI | PG_U | PG_V; while (pte < epte) { @@ -264,7 +271,8 @@ /* * Invalidate all but the last two remaining entries. */ - epte = &((u_int *)kptmpa)[NPTEPG-2]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 2]; while (pte < epte) { *pte++ = PG_NV; } @@ -282,8 +290,8 @@ * Map the page table pages in both the HW segment table * and the software Sysptmap */ - ste = (u_int *)kstpa; - pte = (u_int *)kptmpa; + ste = (st_entry_t *)kstpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protoste = kptpa | SG_RW | SG_V; protopte = kptpa | PG_RW | PG_CI | PG_V; @@ -296,7 +304,8 @@ /* * Invalidate all but the last two remaining entries in both. */ - epte = &((u_int *)kptmpa)[NPTEPG-2]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 2]; while (pte < epte) { *ste++ = SG_NV; *pte++ = PG_NV; @@ -313,12 +322,15 @@ *pte = lkptpa | PG_RW | PG_CI | PG_V; } /* - * Invalidate all but the final entry in the last kernel PT page - * (u-area PTEs will be validated later). The final entry maps - * the last page of physical memory. + * Invalidate all but the final entry in the last kernel PT page. + * The final entry maps the last page of physical memory to + * prepare a page that is PA == VA to turn on the MMU. + * + * XXX: This looks copied from hp300 where PA != VA, but + * XXX: it's suspicious if this is also required on this port. */ - pte = (u_int *)lkptpa; - epte = &pte[NPTEPG-1]; + pte = (pt_entry_t *)lkptpa; + epte = &pte[NPTEPG - 1]; while (pte < epte) *pte++ = PG_NV; #ifdef MAXADDR @@ -329,7 +341,7 @@ * Initialize kernel page table. * Start by invalidating the `nptpages' that we have allocated. */ - pte = (u_int *)kptpa; + pte = (pt_entry_t *)kptpa; epte = &pte[nptpages * NPTEPG]; while (pte < epte) *pte++ = PG_NV; @@ -337,7 +349,8 @@ * Validate PTEs for kernel text (RO). The first page * of kernel text remains invalid; see locore.s */ - pte = &((u_int *)kptpa)[m68k_btop(KERNBASE + PAGE_SIZE)]; + pte = (pt_entry_t *)kptpa; + pte = &pte[m68k_btop(KERNBASE + PAGE_SIZE)]; epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_U | PG_V; while (pte < epte) { @@ -349,7 +362,8 @@ * by us so far (kstpa - firstpa bytes), and pages for lwp0 * u-area and page table allocated below (RW). */ - epte = &((u_int *)kptpa)[m68k_btop(kstpa - firstpa)]; + epte = (pt_entry_t *)kptpa; + epte = &epte[m68k_btop(kstpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; /* * Enable copy-back caching of data pages @@ -365,7 +379,8 @@ * these machines (for the 68040 not strictly necessary, but * recommended by Motorola; for the 68060 mandatory) */ - epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; + epte = (pt_entry_t *)kptpa; + epte = &epte[m68k_btop(nextpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; if (RELOC(mmutype, int) == MMU_68040) { protopte &= ~PG_CMASK; @@ -423,13 +438,11 @@ /* * Sysseg: base of kernel segment table */ - RELOC(Sysseg, st_entry_t *) = - (st_entry_t *)(kstpa - firstpa); + RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa); /* * Sysptmap: base of kernel page table map */ - RELOC(Sysptmap, pt_entry_t *) = - (pt_entry_t *)(kptmpa - firstpa); + RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa); /* * Sysmap: kernel page table (as mapped through Sysptmap) * Allocated at the end of KVA space. @@ -564,8 +577,8 @@ int num; kpm->pm_stfree = ~l2tobm(0); - num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), - SG4_LEV2SIZE) / SG4_LEV2SIZE; + num = howmany(nptpages * (NPTEPG / SG4_LEV3SIZE), + SG4_LEV2SIZE); while (num) kpm->pm_stfree &= ~l2tobm(num--); kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); Index: src/sys/arch/x68k/x68k/pmap_bootstrap.c diff -u src/sys/arch/x68k/x68k/pmap_bootstrap.c:1.45 src/sys/arch/x68k/x68k/pmap_bootstrap.c:1.46 --- src/sys/arch/x68k/x68k/pmap_bootstrap.c:1.45 Fri Dec 4 18:55:14 2009 +++ src/sys/arch/x68k/x68k/pmap_bootstrap.c Sat Dec 5 23:16:58 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_bootstrap.c,v 1.45 2009/12/04 18:55:14 tsutsui Exp $ */ +/* $NetBSD: pmap_bootstrap.c,v 1.46 2009/12/05 23:16:58 tsutsui Exp $ */ /* * Copyright (c) 1991, 1993 @@ -36,7 +36,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.45 2009/12/04 18:55:14 tsutsui Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.46 2009/12/05 23:16:58 tsutsui Exp $"); #include "opt_m680x0.h" @@ -87,6 +87,9 @@ paddr_t kstpa, kptpa, kptmpa, lwp0upa; u_int nptpages, kstsize; st_entry_t protoste, *ste; +#if defined(M68040) || defined(M68060) + st_entry_t *este; +#endif pt_entry_t protopte, *pte, *epte; /* @@ -157,10 +160,10 @@ * First invalidate the entire "segment table" pages * (levels 1 and 2 have the same "invalid" value). */ - pte = (u_int *)kstpa; - epte = &pte[kstsize * NPTEPG]; - while (pte < epte) - *pte++ = SG_NV; + ste = (st_entry_t *)kstpa; + este = &ste[kstsize * NPTEPG]; + while (ste < este) + *ste++ = SG_NV; /* * Initialize level 2 descriptors (which immediately * follow the level 1 table). We need: @@ -170,47 +173,51 @@ * now to save the HW the expense of doing it. */ num = nptpages * (NPTEPG / SG4_LEV3SIZE); - pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; - epte = &pte[num]; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE]; + este = &ste[num]; protoste = kptpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize level 1 descriptors. We need: - * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE + * howmany(num, SG4_LEV2SIZE) * level 1 descriptors to map the `num' level 2's. */ - pte = (u_int *)kstpa; - epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; - protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + ste = (st_entry_t *)kstpa; + este = &ste[howmany(num, SG4_LEV2SIZE)]; + protoste = (paddr_t)este[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); } /* * Initialize the final level 1 descriptor to map the last * block of level 2 descriptors. */ - ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; - pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; - *ste = (u_int)pte | SG_U | SG_RW | SG_V; + ste = (st_entry_t *)kstpa; + ste = &ste[SG4_LEV1SIZE - 1]; + este = (st_entry_t *)kstpa; + este = &este[kstsize * NPTEPG - SG4_LEV2SIZE]; + *ste = (paddr_t)este | SG_U | SG_RW | SG_V; /* * Now initialize the final portion of that block of * descriptors to map kptmpa. */ - pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; - epte = &pte[NPTEPG/SG4_LEV3SIZE]; + ste = (st_entry_t *)kstpa; + ste = &ste[kstsize * NPTEPG - NPTEPG / SG4_LEV3SIZE]; + este = &ste[NPTEPG / SG4_LEV3SIZE]; protoste = kptmpa | SG_U | SG_RW | SG_V; - while (pte < epte) { - *pte++ = protoste; + while (ste < este) { + *ste++ = protoste; protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); } /* * Initialize Sysptmap */ - pte = (u_int *)kptmpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protopte = kptpa | PG_RW | PG_CI | PG_V; while (pte < epte) { @@ -220,7 +227,8 @@ /* * Invalidate all but the last remaining entry. */ - epte = &((u_int *)kptmpa)[NPTEPG-1]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 1]; while (pte < epte) { *pte++ = PG_NV; } @@ -235,8 +243,8 @@ * Map the page table pages in both the HW segment table * and the software Sysptmap. */ - ste = (u_int *)kstpa; - pte = (u_int *)kptmpa; + ste = (st_entry_t *)kstpa; + pte = (pt_entry_t *)kptmpa; epte = &pte[nptpages]; protoste = kptpa | SG_RW | SG_V; protopte = kptpa | PG_RW | PG_CI | PG_V; @@ -249,7 +257,8 @@ /* * Invalidate all but the last remaining entries in both. */ - epte = &((u_int *)kptmpa)[NPTEPG-1]; + epte = (pt_entry_t *)kptmpa; + epte = &epte[NPTEPG - 1]; while (pte < epte) { *ste++ = SG_NV; *pte++ = PG_NV; @@ -265,14 +274,15 @@ * Initialize kernel page table. * Start by invalidating the `nptpages' that we have allocated. */ - pte = (u_int *)kptpa; + pte = (pt_entry_t *)kptpa; epte = &pte[nptpages * NPTEPG]; while (pte < epte) *pte++ = PG_NV; /* * Validate PTEs for kernel text (RO) */ - pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; + pte = (pt_entry_t *)kptpa; + pte = &pte[m68k_btop(KERNBASE)]; /* XXX why KERNBASE relative? */ epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; protopte = firstpa | PG_RO | PG_V; @@ -285,7 +295,8 @@ * by us so far (kstpa - firstpa bytes), and pages for lwp0 * u-area and page table allocated below (RW). */ - epte = &((u_int *)kptpa)[m68k_btop(kstpa - firstpa)]; + epte = (pt_entry_t *)kptpa; + epte = &epte[m68k_btop(kstpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; /* * Enable copy-back caching of data pages @@ -301,7 +312,8 @@ * these machines (for the 68040 not strictly necessary, but * recommended by Motorola; for the 68060 mandatory) */ - epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; + epte = (pt_entry_t *)kptpa; + epte = &epte[m68k_btop(nextpa - firstpa)]; protopte = (protopte & ~PG_PROT) | PG_RW; if (RELOC(mmutype, int) == MMU_68040) { protopte &= ~PG_CCB; @@ -320,8 +332,8 @@ protopte = INTIOBASE | PG_RW | PG_CI | PG_V; epte = &pte[IIOMAPSIZE]; - RELOC(intiobase, u_int8_t *) = (char *)PTE2VA(pte); - RELOC(IODEVbase, u_int8_t *) = RELOC(intiobase, u_int8_t *); /* XXX */ + RELOC(intiobase, uint8_t *) = (uint8_t *)PTE2VA(pte); + RELOC(IODEVbase, uint8_t *) = RELOC(intiobase, uint8_t *); /* XXX */ RELOC(intiolimit, char *) = (char *)PTE2VA(epte); while (pte < epte) { *pte++ = protopte; @@ -335,13 +347,11 @@ /* * Sysseg: base of kernel segment table */ - RELOC(Sysseg, st_entry_t *) = - (st_entry_t *)(kstpa - firstpa); + RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa); /* * Sysptmap: base of kernel page table map */ - RELOC(Sysptmap, pt_entry_t *) = - (pt_entry_t *)(kptmpa - firstpa); + RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa); /* * Sysmap: kernel page table (as mapped through Sysptmap) * Allocated at the end of KVA space. @@ -412,8 +422,8 @@ int num; kpm->pm_stfree = ~l2tobm(0); - num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), - SG4_LEV2SIZE) / SG4_LEV2SIZE; + num = howmany(nptpages * (NPTEPG / SG4_LEV3SIZE), + SG4_LEV2SIZE); while (num) kpm->pm_stfree &= ~l2tobm(num--); kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1);