The branch main has been updated by kib:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=4dd828c80828637452a8a4e07a64e294c82e5d8b

commit 4dd828c80828637452a8a4e07a64e294c82e5d8b
Author:     Konstantin Belousov <k...@freebsd.org>
AuthorDate: 2025-06-15 21:39:10 +0000
Commit:     Konstantin Belousov <k...@freebsd.org>
CommitDate: 2025-07-14 22:03:28 +0000

    amd64: add kva_layout
    
    Reviewed by:    markj
    Tested by:      pho
    Sponsored by:   The FreeBSD Foundation
    Differential revision:  https://reviews.freebsd.org/D51085
---
 sys/amd64/amd64/mem.c              |  4 +-
 sys/amd64/amd64/minidump_machdep.c | 10 ++---
 sys/amd64/amd64/pmap.c             | 77 ++++++++++++++++++++++----------------
 sys/amd64/amd64/trap.c             |  2 +-
 sys/amd64/include/param.h          |  5 ++-
 sys/amd64/include/pmap.h           | 12 ++++++
 sys/amd64/include/vmparam.h        | 26 +++++--------
 sys/vm/vm_kern.c                   |  9 ++++-
 8 files changed, 86 insertions(+), 59 deletions(-)

diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
index 413b7c74890e..851f2df0e6e1 100644
--- a/sys/amd64/amd64/mem.c
+++ b/sys/amd64/amd64/mem.c
@@ -105,8 +105,8 @@ memrw(struct cdev *dev, struct uio *uio, int flags)
                         * PAGE_SIZE, the uiomove() call does not
                         * access past the end of the direct map.
                         */
-                       if (v >= DMAP_MIN_ADDRESS &&
-                           v < DMAP_MIN_ADDRESS + dmaplimit) {
+                       if (v >= kva_layout.dmap_low &&
+                           v < kva_layout.dmap_high) {
                                error = uiomove((void *)v, c, uio);
                                break;
                        }
diff --git a/sys/amd64/amd64/minidump_machdep.c 
b/sys/amd64/amd64/minidump_machdep.c
index 6d0917e16099..43bf81a991bf 100644
--- a/sys/amd64/amd64/minidump_machdep.c
+++ b/sys/amd64/amd64/minidump_machdep.c
@@ -186,7 +186,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct 
minidumpstate *state)
         * tables, so care must be taken to read each entry only once.
         */
        pmapsize = 0;
-       for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; ) {
+       for (va = kva_layout.km_low; va < kva_end; ) {
                /*
                 * We always write a page, even if it is zero. Each
                 * page written corresponds to 1GB of space
@@ -279,9 +279,9 @@ cpu_minidumpsys(struct dumperinfo *di, const struct 
minidumpstate *state)
        mdhdr.msgbufsize = mbp->msg_size;
        mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages));
        mdhdr.pmapsize = pmapsize;
-       mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
-       mdhdr.dmapbase = DMAP_MIN_ADDRESS;
-       mdhdr.dmapend = DMAP_MAX_ADDRESS;
+       mdhdr.kernbase = kva_layout.km_low;
+       mdhdr.dmapbase = kva_layout.dmap_low;
+       mdhdr.dmapend = kva_layout.dmap_high;
        mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
 
        dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION,
@@ -323,7 +323,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct 
minidumpstate *state)
 
        /* Dump kernel page directory pages */
        bzero(fakepd, sizeof(fakepd));
-       for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += NBPDP) {
+       for (va = kva_layout.km_low; va < kva_end; va += NBPDP) {
                ii = pmap_pml4e_index(va);
                pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii;
                pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 15a812fefbaa..c5f3176f8c37 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -415,7 +415,7 @@ SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
 
 static int ndmpdp;
 vm_paddr_t dmaplimit;
-vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
+vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS_LA48;
 pt_entry_t pg_nx;
 
 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
@@ -480,6 +480,18 @@ static int         ndmpdpphys;     /* number of DMPDPphys 
pages */
 vm_paddr_t             kernphys;       /* phys addr of start of bootstrap data 
*/
 vm_paddr_t             KERNend;        /* and the end */
 
+struct kva_layout_s    kva_layout = {
+       .kva_min =      KV4ADDR(PML4PML4I, 0, 0, 0),
+       .dmap_low =     KV4ADDR(DMPML4I, 0, 0, 0),
+       .dmap_high =    KV4ADDR(DMPML4I + NDMPML4E, 0, 0, 0),
+       .lm_low =       KV4ADDR(LMSPML4I, 0, 0, 0),
+       .lm_high =      KV4ADDR(LMEPML4I + 1, 0, 0, 0),
+       .km_low =       KV4ADDR(KPML4BASE, 0, 0, 0),
+       .km_high =      KV4ADDR(KPML4BASE + NKPML4E - 1, NPDPEPG - 1,
+                           NPDEPG - 1, NPTEPG - 1),
+       .rec_pt =       KV4ADDR(PML4PML4I, 0, 0, 0),
+};
+
 /*
  * pmap_mapdev support pre initialization (i.e. console)
  */
@@ -549,8 +561,8 @@ static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for 
x86 pmaps */
 
 static vmem_t *large_vmem;
 static u_int lm_ents;
-#define        PMAP_ADDRESS_IN_LARGEMAP(va)    ((va) >= LARGEMAP_MIN_ADDRESS 
&& \
-       (va) < LARGEMAP_MIN_ADDRESS + NBPML4 * (u_long)lm_ents)
+#define        PMAP_ADDRESS_IN_LARGEMAP(va)    ((va) >= kva_layout.lm_low && \
+       (va) < kva_layout.lm_high)
 
 int pmap_pcid_enabled = 1;
 SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
@@ -2025,7 +2037,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
         */
        virtual_avail = (vm_offset_t)KERNSTART + round_2mpage(KERNend -
            (vm_paddr_t)kernphys);
-       virtual_end = VM_MAX_KERNEL_ADDRESS;
+       virtual_end = kva_layout.km_high;
 
        /*
         * Enable PG_G global pages, then switch to the kernel page
@@ -2421,6 +2433,7 @@ pmap_init(void)
 {
        struct pmap_preinit_mapping *ppim;
        vm_page_t m, mpte;
+       pml4_entry_t *pml4e;
        int error, i, ret, skz63;
 
        /* L1TF, reserve page @0 unconditionally */
@@ -2560,18 +2573,19 @@ pmap_init(void)
                printf("pmap: large map %u PML4 slots (%lu GB)\n",
                    lm_ents, (u_long)lm_ents * (NBPML4 / 1024 / 1024 / 1024));
        if (lm_ents != 0) {
-               large_vmem = vmem_create("large", LARGEMAP_MIN_ADDRESS,
-                   (vmem_size_t)lm_ents * NBPML4, PAGE_SIZE, 0, M_WAITOK);
+               large_vmem = vmem_create("large", kva_layout.lm_low,
+                   (vmem_size_t)kva_layout.lm_high - kva_layout.lm_low,
+                   PAGE_SIZE, 0, M_WAITOK);
                if (large_vmem == NULL) {
                        printf("pmap: cannot create large map\n");
                        lm_ents = 0;
                }
                for (i = 0; i < lm_ents; i++) {
                        m = pmap_large_map_getptp_unlocked();
-                       /* XXXKIB la57 */
-                       kernel_pml4[LMSPML4I + i] = X86_PG_V |
-                           X86_PG_RW | X86_PG_A | X86_PG_M | pg_nx |
-                           VM_PAGE_TO_PHYS(m);
+                       pml4e = pmap_pml4e(kernel_pmap, kva_layout.lm_low +
+                           (u_long)i * NBPML4);
+                       *pml4e = X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M |
+                           pg_nx | VM_PAGE_TO_PHYS(m);
                }
        }
 }
@@ -3900,7 +3914,7 @@ pmap_kextract(vm_offset_t va)
        pd_entry_t pde;
        vm_paddr_t pa;
 
-       if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
+       if (va >= kva_layout.dmap_low && va < kva_layout.dmap_high) {
                pa = DMAP_TO_PHYS(va);
        } else if (PMAP_ADDRESS_IN_LARGEMAP(va)) {
                pa = pmap_large_map_kextract(va);
@@ -4041,7 +4055,7 @@ pmap_qremove(vm_offset_t sva, int count)
                 * enough to one of those pmap_enter() calls for it to
                 * be caught up in a promotion.
                 */
-               KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
+               KASSERT(va >= kva_layout.km_low, ("usermode va %lx", va));
                KASSERT((*vtopde(va) & X86_PG_PS) == 0,
                    ("pmap_qremove on promoted va %#lx", va));
 
@@ -4943,7 +4957,7 @@ pmap_release(pmap_t pmap)
 static int
 kvm_size(SYSCTL_HANDLER_ARGS)
 {
-       unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
+       unsigned long ksize = kva_layout.km_high - kva_layout.km_low;
 
        return sysctl_handle_long(oidp, &ksize, 0, req);
 }
@@ -4954,7 +4968,7 @@ SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | 
CTLFLAG_RD | CTLFLAG_MPSAFE,
 static int
 kvm_free(SYSCTL_HANDLER_ARGS)
 {
-       unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
+       unsigned long kfree = kva_layout.km_high - kernel_vm_end;
 
        return sysctl_handle_long(oidp, &kfree, 0, req);
 }
@@ -5032,7 +5046,7 @@ pmap_page_array_startup(long pages)
 
        vm_page_array_size = pages;
 
-       start = VM_MIN_KERNEL_ADDRESS;
+       start = kva_layout.km_low;
        end = start + pages * sizeof(struct vm_page);
        for (va = start; va < end; va += NBPDR) {
                pfn = first_page + (va - start) / sizeof(struct vm_page);
@@ -6068,8 +6082,8 @@ pmap_demote_pde_mpte(pmap_t pmap, pd_entry_t *pde, 
vm_offset_t va,
                         * so the direct map region is the only part of the
                         * kernel address space that must be handled here.
                         */
-                       KASSERT(!in_kernel || (va >= DMAP_MIN_ADDRESS &&
-                           va < DMAP_MAX_ADDRESS),
+                       KASSERT(!in_kernel || (va >= kva_layout.dmap_low &&
+                           va < kva_layout.dmap_high),
                            ("pmap_demote_pde: No saved mpte for va %#lx", va));
 
                        /*
@@ -7185,7 +7199,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, 
vm_prot_t prot,
        PG_RW = pmap_rw_bit(pmap);
 
        va = trunc_page(va);
-       KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
+       KASSERT(va <= kva_layout.km_high, ("pmap_enter: toobig"));
        KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
            ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
            va));
@@ -9551,7 +9565,7 @@ pmap_unmapdev(void *p, vm_size_t size)
        va = (vm_offset_t)p;
 
        /* If we gave a direct map region in pmap_mapdev, do nothing */
-       if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
+       if (va >= kva_layout.dmap_low && va < kva_layout.dmap_high)
                return;
        offset = va & PAGE_MASK;
        size = round_page(offset + size);
@@ -9730,7 +9744,7 @@ pmap_change_prot(vm_offset_t va, vm_size_t size, 
vm_prot_t prot)
        int error;
 
        /* Only supported within the kernel map. */
-       if (va < VM_MIN_KERNEL_ADDRESS)
+       if (va < kva_layout.km_low)
                return (EINVAL);
 
        PMAP_LOCK(kernel_pmap);
@@ -9761,7 +9775,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, 
vm_prot_t prot,
         * Only supported on kernel virtual addresses, including the direct
         * map but excluding the recursive map.
         */
-       if (base < DMAP_MIN_ADDRESS)
+       if (base < kva_layout.dmap_low)
                return (EINVAL);
 
        /*
@@ -9784,7 +9798,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, 
vm_prot_t prot,
                        pte_bits |= X86_PG_RW;
                }
                if ((prot & VM_PROT_EXECUTE) == 0 ||
-                   va < VM_MIN_KERNEL_ADDRESS) {
+                   va < kva_layout.km_low) {
                        pde_bits |= pg_nx;
                        pte_bits |= pg_nx;
                }
@@ -9880,7 +9894,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, 
vm_prot_t prot,
                                pmap_pte_props(pdpe, pde_bits, pde_mask);
                                changed = true;
                        }
-                       if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
+                       if (tmpva >= kva_layout.km_low &&
                            (*pdpe & PG_PS_FRAME) < dmaplimit) {
                                if (pa_start == pa_end) {
                                        /* Start physical address run. */
@@ -9910,7 +9924,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, 
vm_prot_t prot,
                                pmap_pte_props(pde, pde_bits, pde_mask);
                                changed = true;
                        }
-                       if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
+                       if (tmpva >= kva_layout.km_low &&
                            (*pde & PG_PS_FRAME) < dmaplimit) {
                                if (pa_start == pa_end) {
                                        /* Start physical address run. */
@@ -9938,7 +9952,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, 
vm_prot_t prot,
                                pmap_pte_props(pte, pte_bits, pte_mask);
                                changed = true;
                        }
-                       if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
+                       if (tmpva >= kva_layout.km_low &&
                            (*pte & PG_FRAME) < dmaplimit) {
                                if (pa_start == pa_end) {
                                        /* Start physical address run. */
@@ -10910,8 +10924,8 @@ pmap_large_unmap(void *svaa, vm_size_t len)
        struct spglist spgf;
 
        sva = (vm_offset_t)svaa;
-       if (len == 0 || sva + len < sva || (sva >= DMAP_MIN_ADDRESS &&
-           sva + len <= DMAP_MIN_ADDRESS + dmaplimit))
+       if (len == 0 || sva + len < sva || (sva >= kva_layout.dmap_low &&
+           sva + len < kva_layout.dmap_high))
                return;
 
        SLIST_INIT(&spgf);
@@ -11157,11 +11171,10 @@ pmap_large_map_wb(void *svap, vm_size_t len)
        sva = (vm_offset_t)svap;
        eva = sva + len;
        pmap_large_map_wb_fence();
-       if (sva >= DMAP_MIN_ADDRESS && eva <= DMAP_MIN_ADDRESS + dmaplimit) {
+       if (sva >= kva_layout.dmap_low && eva < kva_layout.dmap_high) {
                pmap_large_map_flush_range(sva, len);
        } else {
-               KASSERT(sva >= LARGEMAP_MIN_ADDRESS &&
-                   eva <= LARGEMAP_MIN_ADDRESS + lm_ents * NBPML4,
+               KASSERT(sva >= kva_layout.lm_low && eva < kva_layout.lm_high,
                    ("pmap_large_map_wb: not largemap %#lx %#lx", sva, len));
                pmap_large_map_wb_large(sva, eva);
        }
@@ -11202,8 +11215,8 @@ pmap_pti_init(void)
        VM_OBJECT_WLOCK(pti_obj);
        pml4_pg = pmap_pti_alloc_page();
        pti_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4_pg));
-       for (va = VM_MIN_KERNEL_ADDRESS; va <= VM_MAX_KERNEL_ADDRESS &&
-           va >= VM_MIN_KERNEL_ADDRESS && va > NBPML4; va += NBPML4) {
+       for (va = kva_layout.km_low; va <= kva_layout.km_high &&
+           va >= kva_layout.km_low && va > NBPML4; va += NBPML4) {
                pdpe = pmap_pti_pdpe(va);
                pmap_pti_wire_pte(pdpe);
        }
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 09ac0a67dbef..eefddad2f142 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -769,7 +769,7 @@ trap_pfault(struct trapframe *frame, bool usermode, int 
*signo, int *ucode)
                        return (-1);
                }
        }
-       if (eva >= VM_MIN_KERNEL_ADDRESS) {
+       if (eva >= kva_layout.km_low) {
                /*
                 * Don't allow user-mode faults in kernel address space.
                 */
diff --git a/sys/amd64/include/param.h b/sys/amd64/include/param.h
index 8db314fa034d..1bbb302259d6 100644
--- a/sys/amd64/include/param.h
+++ b/sys/amd64/include/param.h
@@ -146,8 +146,9 @@
 #define        amd64_btop(x)   ((unsigned long)(x) >> PAGE_SHIFT)
 #define        amd64_ptob(x)   ((unsigned long)(x) << PAGE_SHIFT)
 
-#define        INKERNEL(va) (((va) >= DMAP_MIN_ADDRESS && (va) < 
DMAP_MAX_ADDRESS) \
-    || ((va) >= VM_MIN_KERNEL_ADDRESS && (va) < VM_MAX_KERNEL_ADDRESS))
+#define        INKERNEL(va)    \
+    (((va) >= kva_layout.dmap_low && (va) < kva_layout.dmap_high) || \
+    ((va) >= kva_layout.km_low && (va) < kva_layout.km_high))
 
 #ifdef SMP
 #define SC_TABLESIZE    1024                     /* Must be power of 2. */
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 7d3e91bcd9b9..5790b0ad156b 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -548,6 +548,18 @@ pmap_pml5e_index(vm_offset_t va)
        return ((va >> PML5SHIFT) & ((1ul << NPML5EPGSHIFT) - 1));
 }
 
+struct kva_layout_s {
+       vm_offset_t kva_min;
+       vm_offset_t dmap_low;   /* DMAP_MIN_ADDRESS */
+       vm_offset_t dmap_high;  /* DMAP_MAX_ADDRESS */
+       vm_offset_t lm_low;     /* LARGEMAP_MIN_ADDRESS */
+       vm_offset_t lm_high;    /* LARGEMAP_MAX_ADDRESS */
+       vm_offset_t km_low;     /* VM_MIN_KERNEL_ADDRESS */
+       vm_offset_t km_high;    /* VM_MAX_KERNEL_ADDRESS */
+       vm_offset_t rec_pt;
+};
+extern struct kva_layout_s kva_layout;
+
 #endif /* !LOCORE */
 
 #endif /* !_MACHINE_PMAP_H_ */
diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h
index 0cd9bb4fa7a4..f12e5217a44b 100644
--- a/sys/amd64/include/vmparam.h
+++ b/sys/amd64/include/vmparam.h
@@ -181,12 +181,9 @@
  * 0xffffffff80000000                        KERNBASE
  */
 
-#define        VM_MIN_KERNEL_ADDRESS   KV4ADDR(KPML4BASE, 0, 0, 0)
-#define        VM_MAX_KERNEL_ADDRESS   KV4ADDR(KPML4BASE + NKPML4E - 1, \
-                                       NPDPEPG-1, NPDEPG-1, NPTEPG-1)
-
-#define        DMAP_MIN_ADDRESS        KV4ADDR(DMPML4I, 0, 0, 0)
-#define        DMAP_MAX_ADDRESS        KV4ADDR(DMPML4I + NDMPML4E, 0, 0, 0)
+#define        VM_MIN_KERNEL_ADDRESS_LA48      KV4ADDR(KPML4BASE, 0, 0, 0)
+#define        VM_MIN_KERNEL_ADDRESS           kva_layout.km_low
+#define        VM_MAX_KERNEL_ADDRESS           kva_layout.km_high
 
 #define        KASAN_MIN_ADDRESS       KV4ADDR(KASANPML4I, 0, 0, 0)
 #define        KASAN_MAX_ADDRESS       KV4ADDR(KASANPML4I + NKASANPML4E, 0, 0, 
0)
@@ -199,9 +196,6 @@
 #define        KMSAN_ORIG_MAX_ADDRESS  KV4ADDR(KMSANORIGPML4I + 
NKMSANORIGPML4E, \
                                        0, 0, 0)
 
-#define        LARGEMAP_MIN_ADDRESS    KV4ADDR(LMSPML4I, 0, 0, 0)
-#define        LARGEMAP_MAX_ADDRESS    KV4ADDR(LMEPML4I + 1, 0, 0, 0)
-
 /*
  * Formally kernel mapping starts at KERNBASE, but kernel linker
  * script leaves first PDE reserved.  For legacy BIOS boot, kernel is
@@ -239,21 +233,21 @@
  * vt fb startup needs to be reworked.
  */
 #define        PHYS_IN_DMAP(pa)        (dmaplimit == 0 || (pa) < dmaplimit)
-#define        VIRT_IN_DMAP(va)        ((va) >= DMAP_MIN_ADDRESS &&            
\
-    (va) < (DMAP_MIN_ADDRESS + dmaplimit))
+#define        VIRT_IN_DMAP(va)        \
+    ((va) >= kva_layout.dmap_low && (va) < kva_layout.dmap_high)
 
 #define        PMAP_HAS_DMAP   1
-#define        PHYS_TO_DMAP(x) ({                                              
\
+#define        PHYS_TO_DMAP(x) __extension__ ({                                
\
        KASSERT(PHYS_IN_DMAP(x),                                        \
            ("physical address %#jx not covered by the DMAP",           \
            (uintmax_t)x));                                             \
-       (x) | DMAP_MIN_ADDRESS; })
+       (x) + kva_layout.dmap_low; })
 
-#define        DMAP_TO_PHYS(x) ({                                              
\
+#define        DMAP_TO_PHYS(x) __extension__ ({                                
\
        KASSERT(VIRT_IN_DMAP(x),                                        \
            ("virtual address %#jx not covered by the DMAP",            \
            (uintmax_t)x));                                             \
-       (x) & ~DMAP_MIN_ADDRESS; })
+       (x) - kva_layout.dmap_low; })
 
 /*
  * amd64 maps the page array into KVA so that it can be more easily
@@ -274,7 +268,7 @@
  */
 #ifndef VM_KMEM_SIZE_MAX
 #define        VM_KMEM_SIZE_MAX        ((VM_MAX_KERNEL_ADDRESS - \
-    VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
+    kva_layout.km_low + 1) * 3 / 5)
 #endif
 
 /* initial pagein size of beginning of executable file */
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 875c22d27628..e7d7b6726d2c 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -110,11 +110,18 @@ u_int exec_map_entry_size;
 u_int exec_map_entries;
 
 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
-    SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
+#if defined(__amd64__)
+    &kva_layout.km_low, 0,
+#else
+    SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS,
+#endif
+    "Min kernel address");
 
 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
 #if defined(__arm__)
     &vm_max_kernel_address, 0,
+#elif defined(__amd64__)
+    &kva_layout.km_high, 0,
 #else
     SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
 #endif

Reply via email to