On 23/12/20(Wed) 18:24, Mark Kettenis wrote:
> Diff below switches the i386 pmap to use the modern km_alloc(9)
> functions and uses IPL_VM for the pmap pool, following the example of
> amd64.

Diff below is the one I sent you last year.  It has an "#if notyet"
around the allocation that generates the following fault:

panic: uvm_fault(0xd0e39af8, 0xf1dfc000, 0, 1) -> e
Stopped at      db_enter+0x4:   popl    %ebp
    TID    PID    UID     PRFLAGS     PFLAGS  CPU  COMMAND
*     0      0      0     0x10000      0x200    0K swapper
db_enter(d0e53909,d10c5df4,0,f1dfc000,d0ecca7c) at db_enter+0x4
panic(d0c38a96,d0e39af8,f1dfc000,1,e) at panic+0xd3
kpageflttrap(d10c5e60,f1dfc000,f1dfc000,ffff,d0f78b00) at kpageflttrap+0x14d
trap(d10c5e60) at trap+0x26a
calltrap(8,10006,d1d91cc0,f1ee2000,d083107c) at calltrap+0xc
docopyf(d1d91cc0) at docopyf+0x5
pmap_create(1,1000,61c1cc4d,d1da2ea4,d0f7af34) at pmap_create+0xa8
uvmspace_fork(d0f7ab0c,d1d94ca0,d0f7ab0c,1,d10c5f70) at uvmspace_fork+0x56
process_new(d1d94ca0,d0f7ab0c,1) at process_new+0xeb
fork1(d0ecca7c,1,d08c8d40,0,0,d10c5f90) at fork1+0x1ba

> Don't have easy access to an i386 machine right now, so this has only
> been compile tested.

This can be reproduced in vmm(4) in case you'd like to debug it.

Index: arch/i386/i386/pmap.c
===================================================================
RCS file: /cvs/src/sys/arch/i386/i386/pmap.c,v
retrieving revision 1.210
diff -u -p -r1.210 pmap.c
--- arch/i386/i386/pmap.c       28 Dec 2020 14:02:08 -0000      1.210
+++ arch/i386/i386/pmap.c       28 Dec 2020 14:17:45 -0000
@@ -1365,7 +1365,7 @@ void
 pmap_pinit_pd_86(struct pmap *pmap)
 {
        /* allocate PDP */
-       pmap->pm_pdir = uvm_km_alloc(kernel_map, NBPG);
+       pmap->pm_pdir = (vaddr_t)km_alloc(NBPG, &kv_any, &kp_dirty, &kd_waitok);
        if (pmap->pm_pdir == 0)
                panic("pmap_pinit_pd_86: kernel_map out of virtual space!");
        pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
@@ -1397,7 +1397,8 @@ pmap_pinit_pd_86(struct pmap *pmap)
         * execution, one that lacks all kernel mappings.
         */
        if (cpu_meltdown) {
-               pmap->pm_pdir_intel = uvm_km_zalloc(kernel_map, NBPG);
+               pmap->pm_pdir_intel = (vaddr_t)km_alloc(NBPG, &kv_any, &kp_zero,
+                   &kd_waitok);
                if (pmap->pm_pdir_intel == 0)
                        panic("%s: kernel_map out of virtual space!", __func__);
 
@@ -1449,11 +1450,12 @@ pmap_destroy(struct pmap *pmap)
                uvm_pagefree(pg);
        }
 
-       uvm_km_free(kernel_map, pmap->pm_pdir, pmap->pm_pdirsize);
+       km_free((void *)pmap->pm_pdir, pmap->pm_pdirsize, &kv_any, &kp_dirty);
        pmap->pm_pdir = 0;
 
        if (pmap->pm_pdir_intel) {
-               uvm_km_free(kernel_map, pmap->pm_pdir_intel, pmap->pm_pdirsize);
+               km_free((void *)pmap->pm_pdir_intel, pmap->pm_pdirsize,
+                   &kv_any, &kp_dirty);
                pmap->pm_pdir_intel = 0;
        }
 
@@ -2522,8 +2524,9 @@ pmap_enter_special_86(vaddr_t va, paddr_
                    __func__, va);
 
        if (!pmap->pm_pdir_intel) {
-               if ((pmap->pm_pdir_intel = uvm_km_zalloc(kernel_map, NBPG))
-                   == 0)
+               pmap->pm_pdir_intel = (vaddr_t)km_alloc(NBPG, &kv_any, &kp_zero,
+                   &kd_waitok);
+               if (pmap->pm_pdir_intel == 0)
                        panic("%s: kernel_map out of virtual space!", __func__);
                if (!pmap_extract(pmap, pmap->pm_pdir_intel,
                    &pmap->pm_pdirpa_intel))
Index: arch/i386/i386/pmapae.c
===================================================================
RCS file: /cvs/src/sys/arch/i386/i386/pmapae.c,v
retrieving revision 1.60
diff -u -p -r1.60 pmapae.c
--- arch/i386/i386/pmapae.c     23 Sep 2020 15:13:26 -0000      1.60
+++ arch/i386/i386/pmapae.c     28 Dec 2020 14:17:45 -0000
@@ -738,7 +738,7 @@ pmap_bootstrap_pae(void)
                                    (uint32_t)VM_PAGE_TO_PHYS(ptppg));
                        }
                }
-               uvm_km_free(kernel_map, (vaddr_t)pd, NBPG);
+               km_free(pd, NBPG, &kv_any, &kp_dirty);
                DPRINTF("%s: freeing PDP 0x%x\n", __func__, (uint32_t)pd);
        }
 
@@ -944,7 +944,8 @@ pmap_pinit_pd_pae(struct pmap *pmap)
        paddr_t pdidx[4];
 
        /* allocate PDP */
-       pmap->pm_pdir = uvm_km_alloc(kernel_map, 4 * NBPG);
+       pmap->pm_pdir = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_dirty,
+           &kd_waitok);
        if (pmap->pm_pdir == 0)
                panic("pmap_pinit_pd_pae: kernel_map out of virtual space!");
        /* page index is in the pmap! */
@@ -997,7 +998,8 @@ pmap_pinit_pd_pae(struct pmap *pmap)
        if (cpu_meltdown) {
                int i;
 
-               if ((va = uvm_km_zalloc(kernel_map, 4 * NBPG)) == 0)
+               va = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_zero, &kd_waitok);
+               if (va == 0)
                        panic("%s: kernel_map out of virtual space!", __func__);
                if (!pmap_extract(pmap_kernel(),
                    (vaddr_t)&pmap->pm_pdidx_intel, &pmap->pm_pdirpa_intel))
@@ -1936,7 +1938,12 @@ pmap_enter_special_pae(vaddr_t va, paddr
                    __func__, va);
 
        if (!pmap->pm_pdir_intel) {
-               if ((vapd = uvm_km_zalloc(kernel_map, 4 * NBPG)) == 0)
+#if notyet
+               vapd = (vaddr_t)km_alloc(4*NBPG, &kv_any, &kp_zero, &kd_waitok);
+#else
+               vapd = uvm_km_zalloc(kernel_map, 4 * NBPG);
+#endif
+               if (vapd == 0)
                        panic("%s: kernel_map out of virtual space!", __func__);
                pmap->pm_pdir_intel = vapd;
                if (!pmap_extract(pmap, (vaddr_t)&pmap->pm_pdidx_intel,

Reply via email to