Simple cleanups for amd64 pmap
- use __func__ in panics/printfs (fixes some out of sync function names)
- tell the compiler that code paths where we print diagnostics are
unlikely
- use pmap_valid_entry() in some places
- KERNSPACE is not used anywhere
diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c
index cd395e1..05cbd3e 100644
--- a/sys/arch/amd64/amd64/pmap.c
+++ b/sys/arch/amd64/amd64/pmap.c
@@ -436,7 +436,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
#ifdef LARGEPAGES
/* XXX For now... */
if (opte & PG_PS)
- panic("pmap_kenter_pa: PG_PS");
+ panic("%s: PG_PS", __func__);
#endif
if (pmap_valid_entry(opte)) {
if (pa & PMAP_NOCACHE && (opte & PG_N) == 0)
@@ -899,7 +899,7 @@ pmap_get_ptp(struct pmap *pmap, vaddr_t va, pd_entry_t
**pdes)
pptp = pmap_find_ptp(pmap, va, ppa, i);
#ifdef DIAGNOSTIC
if (pptp == NULL)
- panic("pde page disappeared");
+ panic("%s: pde page disappeared", __func__);
#endif
pptp->wire_count++;
}
@@ -915,7 +915,7 @@ pmap_get_ptp(struct pmap *pmap, vaddr_t va, pd_entry_t
**pdes)
if (ptp == NULL) {
printf("va %lx ppa %lx\n", (unsigned long)va,
(unsigned long)ppa);
- panic("pmap_get_ptp: unmanaged user PTP");
+ panic("%s: unmanaged user PTP", __func__);
}
#endif
}
@@ -1031,8 +1031,8 @@ pmap_destroy(struct pmap *pmap)
*/
#ifdef DIAGNOSTIC
- if (pmap->pm_cpus != 0)
- printf("pmap_destroy: pmap %p cpus=0x%llx\n",
+ if (__predict_false(pmap->pm_cpus != 0))
+ printf("%s: pmap %p cpus=0x%llx\n", __func__,
(void *)pmap, pmap->pm_cpus);
#endif
@@ -1132,7 +1132,7 @@ pmap_pdes_valid(vaddr_t va, pd_entry_t **pdes, pd_entry_t
*lastpde)
for (i = PTP_LEVELS; i > 1; i--) {
index = pl_i(va, i);
pde = pdes[i - 2][index];
- if ((pde & PG_V) == 0)
+ if (!pmap_valid_entry(pde))
return FALSE;
}
if (lastpde != NULL)
@@ -1171,7 +1171,7 @@ pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
pte = ptes[pl1_i(va)];
pmap_unmap_ptes(pmap);
- if (__predict_true((pte & PG_V) != 0)) {
+ if (__predict_true(pmap_valid_entry(pte))) {
if (pap != NULL)
*pap = (pte & PG_FRAME) | (va & 0xfff);
return (TRUE);
@@ -1321,16 +1321,16 @@ pmap_remove_ptes(struct pmap *pmap, struct vm_page
*ptp, vaddr_t ptpva,
if ((opte & PG_PVLIST) == 0) {
#ifdef DIAGNOSTIC
if (pg != NULL)
- panic("pmap_remove_ptes: managed page without "
- "PG_PVLIST for 0x%lx", startva);
+ panic("%s: managed page without PG_PVLIST "
+ "for 0x%lx", __func__, startva);
#endif
continue;
}
#ifdef DIAGNOSTIC
if (pg == NULL)
- panic("pmap_remove_ptes: unmanaged page marked "
- "PG_PVLIST, va = 0x%lx, pa = 0x%lx",
+ panic("%s: unmanaged page marked PG_PVLIST, "
+ "va = 0x%lx, pa = 0x%lx", __func__,
startva, (u_long)(opte & PG_FRAME));
#endif
@@ -1388,17 +1388,16 @@ pmap_remove_pte(struct pmap *pmap, struct vm_page *ptp,
pt_entry_t *pte,
if ((opte & PG_PVLIST) == 0) {
#ifdef DIAGNOSTIC
if (pg != NULL)
- panic("pmap_remove_pte: managed page without "
- "PG_PVLIST for 0x%lx", va);
+ panic("%s: managed page without PG_PVLIST for 0x%lx",
+ __func__, va);
#endif
return(TRUE);
}
#ifdef DIAGNOSTIC
if (pg == NULL)
- panic("pmap_remove_pte: unmanaged page marked "
- "PG_PVLIST, va = 0x%lx, pa = 0x%lx", va,
- (u_long)(opte & PG_FRAME));
+ panic("%s: unmanaged page marked PG_PVLIST, va = 0x%lx, "
+ "pa = 0x%lx", __func__, va, (u_long)(opte & PG_FRAME));
#endif
/* sync R/M bits */
@@ -1464,8 +1463,8 @@ pmap_do_remove(struct pmap *pmap, vaddr_t sva, vaddr_t
eva, int flags)
ptp = pmap_find_ptp(pmap, sva, ptppa, 1);
#ifdef DIAGNOSTIC
if (ptp == NULL)
- panic("pmap_remove: unmanaged "
- "PTP detected");
+ panic("%s: unmanaged PTP detected",
+ __func__);
#endif
}
@@ -1537,8 +1536,7 @@ pmap_do_remove(struct pmap *pmap, vaddr_t sva, vaddr_t
eva, int flags)
ptp = pmap_find_ptp(pmap, va, ptppa, 1);
#ifdef DIAGNOSTIC
if (ptp == NULL)
- panic("pmap_remove: unmanaged PTP "
- "detected");
+ panic("%s: unmanaged PTP detected", __func__);
#endif
}
pmap_remove_ptes(pmap, ptp,
@@ -1596,14 +1594,14 @@ pmap_page_remove(struct vm_page *pg)
#ifdef DIAGNOSTIC
if (pve->pv_ptp && pmap_pdes_valid(pve->pv_va, pdes, &pde) &&
(pde & PG_FRAME) != VM_PAGE_TO_PHYS(pve->pv_ptp)) {
- printf("pmap_page_remove: pg=%p: va=%lx, pv_ptp=%p\n",
+ printf("%s: pg=%p: va=%lx, pv_ptp=%p\n", __func__,
pg, pve->pv_va, pve->pv_ptp);
- printf("pmap_page_remove: PTP's phys addr: "
- "actual=%lx, recorded=%lx\n",
+ printf("%s: PTP's phys addr: "
+ "actual=%lx, recorded=%lx\n", __func__,
(unsigned long)(pde & PG_FRAME),
VM_PAGE_TO_PHYS(pve->pv_ptp));
- panic("pmap_page_remove: mapped managed page has "
- "invalid pv_ptp field");
+ panic("%s: mapped managed page has "
+ "invalid pv_ptp field", __func__);
}
#endif
@@ -1709,8 +1707,7 @@ pmap_clear_attrs(struct vm_page *pg, unsigned long
clearbits)
pmap_map_ptes(pve->pv_pmap, &ptes, &pdes);
#ifdef DIAGNOSTIC
if (!pmap_pdes_valid(pve->pv_va, pdes, NULL))
- panic("pmap_change_attrs: mapping without PTP "
- "detected");
+ panic("%s: mapping without PTP detected", __func__);
#endif
opte = ptes[pl1_i(pve->pv_va)];
@@ -1801,14 +1798,14 @@ pmap_write_protect(struct pmap *pmap, vaddr_t sva,
vaddr_t eva, vm_prot_t prot)
#ifdef DIAGNOSTIC
if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
- panic("pmap_write_protect: PTE space");
+ panic("%s: PTE space", __func__);
#endif
spte = &ptes[pl1_i(va)];
epte = &ptes[pl1_i(blockend)];
for (/*null */; spte < epte ; spte++) {
- if (!(*spte & PG_V))
+ if (!pmap_valid_entry(*spte))
continue;
pmap_pte_clearbits(spte, PG_RW);
pmap_pte_setbits(spte, nx);
@@ -1847,23 +1844,23 @@ pmap_unwire(struct pmap *pmap, vaddr_t va)
#ifdef DIAGNOSTIC
if (!pmap_valid_entry(ptes[pl1_i(va)]))
- panic("pmap_unwire: invalid (unmapped) va 0x%lx", va);
+ panic("%s: invalid (unmapped) va 0x%lx", __func__, va);
#endif
- if ((ptes[pl1_i(va)] & PG_W) != 0) {
+ if (__predict_true((ptes[pl1_i(va)] & PG_W) != 0)) {
pmap_pte_clearbits(&ptes[pl1_i(va)], PG_W);
pmap->pm_stats.wired_count--;
}
#ifdef DIAGNOSTIC
else {
- printf("pmap_unwire: wiring for pmap %p va 0x%lx "
- "didn't change!\n", pmap, va);
+ printf("%s: wiring for pmap %p va 0x%lx "
+ "didn't change!\n", __func__, pmap, va);
}
#endif
pmap_unmap_ptes(pmap);
}
#ifdef DIAGNOSTIC
else {
- panic("pmap_unwire: invalid PDE");
+ panic("%s: invalid PDE", __func__);
}
#endif
}
@@ -1923,12 +1920,12 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa,
vm_prot_t prot, int flags)
#ifdef DIAGNOSTIC
if (va == (vaddr_t) PDP_BASE || va == (vaddr_t) APDP_BASE)
- panic("pmap_enter: trying to map over PDP/APDP!");
+ panic("%s: trying to map over PDP/APDP!", __func__);
/* sanity check: kernel PTPs should already have been pre-allocated */
if (va >= VM_MIN_KERNEL_ADDRESS &&
!pmap_valid_entry(pmap->pm_pdir[pl_i(va, PTP_LEVELS)]))
- panic("pmap_enter: missing kernel PTP for va %lx!", va);
+ panic("%s: missing kernel PTP for va %lx!", __func__, va);
#endif
@@ -1949,7 +1946,7 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa,
vm_prot_t prot, int flags)
error = ENOMEM;
goto out;
}
- panic("pmap_enter: get ptp failed");
+ panic("%s: get ptp failed", __func__);
}
}
opte = ptes[pl1_i(va)]; /* old PTE */
@@ -1986,18 +1983,18 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa,
vm_prot_t prot, int flags)
pg = PHYS_TO_VM_PAGE(pa);
#ifdef DIAGNOSTIC
if (pg == NULL)
- panic("pmap_enter: same pa PG_PVLIST "
+ panic("%s: same pa PG_PVLIST "
"mapping with unmanaged page "
- "pa = 0x%lx (0x%lx)", pa,
- atop(pa));
+ "pa = 0x%lx (0x%lx)", __func__,
+ pa, atop(pa));
#endif
pmap_sync_flags_pte(pg, opte);
} else {
#ifdef DIAGNOSTIC
if (PHYS_TO_VM_PAGE(pa) != NULL)
- panic("pmap_enter: same pa, managed "
+ panic("%s: same pa, managed "
"page, no PG_VLIST pa: 0x%lx\n",
- pa);
+ __func__, pa);
#endif
}
goto enter_now;
@@ -2016,9 +2013,9 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa,
vm_prot_t prot, int flags)
pg = PHYS_TO_VM_PAGE(opte & PG_FRAME);
#ifdef DIAGNOSTIC
if (pg == NULL)
- panic("pmap_enter: PG_PVLIST mapping with "
- "unmanaged page "
- "pa = 0x%lx (0x%lx)", pa, atop(pa));
+ panic("%s: PG_PVLIST mapping with unmanaged "
+ "page pa = 0x%lx (0x%lx)",
+ __func__, pa, atop(pa));
#endif
pmap_sync_flags_pte(pg, opte);
pve = pmap_remove_pv(pg, pmap, va);
@@ -2055,7 +2052,7 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa,
vm_prot_t prot, int flags)
error = ENOMEM;
goto out;
}
- panic("pmap_enter: no pv entries available");
+ panic("%s: no pv entries available", __func__);
}
}
pmap_enter_pv(pg, pve, pmap, va, ptp);
@@ -2076,7 +2073,7 @@ enter_now:
ptp->wire_count += ptpdelta;
if (pg != PHYS_TO_VM_PAGE(pa))
- panic("wtf?");
+ panic("%s: wtf?", __func__);
npte = pa | protection_codes[prot] | PG_V;
if (pg != NULL) {
@@ -2109,7 +2106,7 @@ enter_now:
* If we changed anything other than modified/used bits,
* flush the TLB. (is this overkill?)
*/
- if (opte & PG_V) {
+ if (pmap_valid_entry(opte)) {
if (nocache && (opte & PG_N) == 0)
wbinvd();
pmap_tlb_shootpage(pmap, va);
@@ -2147,7 +2144,7 @@ pmap_get_physpage(vaddr_t va, int level, paddr_t *paddrp)
ptp_va2o(va, level), NULL,
UVM_PGA_USERESERVE|UVM_PGA_ZERO);
if (ptp == NULL)
- panic("pmap_get_physpage: out of memory");
+ panic("%s: out of memory", __func__);
atomic_clearbits_int(&ptp->pg_flags, PG_BUSY);
ptp->wire_count = 1;
*paddrp = VM_PAGE_TO_PHYS(ptp);
@@ -2231,7 +2228,7 @@ pmap_growkernel(vaddr_t maxkvaddr)
* XXX only need to check toplevel.
*/
if (target_nptp > nkptpmax[i])
- panic("out of KVA space");
+ panic("%s: out of KVA space", __func__);
needed_kptp[i] = target_nptp - nkptp[i] + 1;
}
@@ -2287,7 +2284,7 @@ pmap_steal_memory(vsize_t size, vaddr_t *start, vaddr_t
*end)
break;
}
if (segno == vm_nphysseg) {
- panic("pmap_steal_memory: out of memory");
+ panic("%s: out of memory", __func__);
} else {
if (seg->avail_start == seg->start) {
pa = ptoa(seg->avail_start);
@@ -2305,7 +2302,7 @@ pmap_steal_memory(vsize_t size, vaddr_t *start, vaddr_t
*end)
*/
if (seg->start == seg->end) {
if (vm_nphysseg-- == 1)
- panic("pmap_steal_memory: out of memory");
+ panic("%s: out of memory", __func__);
while (segno < vm_nphysseg) {
seg[0] = seg[1]; /* struct copy */
seg++;
@@ -2444,7 +2441,7 @@ pmap_tlb_shootpage(struct pmap *pm, vaddr_t va)
if ((mask & (1ULL << ci->ci_cpuid)) == 0)
continue;
if (x86_fast_ipi(ci, LAPIC_IPI_INVLPG) != 0)
- panic("pmap_tlb_shootpage: ipi failed");
+ panic("%s: ipi failed", __func__);
}
splx(s);
}
@@ -2483,7 +2480,7 @@ pmap_tlb_shootrange(struct pmap *pm, vaddr_t sva, vaddr_t
eva)
if ((mask & (1ULL << ci->ci_cpuid)) == 0)
continue;
if (x86_fast_ipi(ci, LAPIC_IPI_INVLRANGE) != 0)
- panic("pmap_tlb_shootrange: ipi failed");
+ panic("%s: ipi failed", __func__);
}
splx(s);
}
@@ -2520,7 +2517,7 @@ pmap_tlb_shoottlb(void)
if ((mask & (1ULL << ci->ci_cpuid)) == 0)
continue;
if (x86_fast_ipi(ci, LAPIC_IPI_INVLTLB) != 0)
- panic("pmap_tlb_shoottlb: ipi failed");
+ panic("%s: ipi failed", __func__);
}
splx(s);
}
diff --git a/sys/arch/amd64/include/pmap.h b/sys/arch/amd64/include/pmap.h
index b3ba005..3796fd3 100644
--- a/sys/arch/amd64/include/pmap.h
+++ b/sys/arch/amd64/include/pmap.h
@@ -218,8 +218,6 @@
#define NTOPLEVEL_PDES (PAGE_SIZE / (sizeof (pd_entry_t)))
-#define KERNSPACE (NKL4_ENTRIES * NBPD_L4)
-
#define NPDPG (PAGE_SIZE / sizeof (pd_entry_t))
/*