Author: jeff
Date: Wed Aug 7 06:21:20 2013
New Revision: 254025
URL: http://svnweb.freebsd.org/changeset/base/254025
Log:
Replace kernel virtual address space allocation with vmem. This provides
transparent layering and better fragmentation.
- Normalize functions that allocate memory to use kmem_*
- Those that allocate address space are named kva_*
- Those that operate on maps are named kmap_*
- Implement recursive allocation handling for kmem_arena in vmem.
Reviewed by: alc
Tested by: pho
Sponsored by: EMC / Isilon Storage Division
Modified:
head/sys/amd64/amd64/mp_machdep.c
head/sys/amd64/amd64/pmap.c
head/sys/amd64/amd64/sys_machdep.c
head/sys/amd64/amd64/vm_machdep.c
head/sys/arm/arm/bus_space_generic.c
head/sys/arm/arm/busdma_machdep-v6.c
head/sys/arm/arm/busdma_machdep.c
head/sys/arm/arm/mp_machdep.c
head/sys/arm/arm/pmap-v6.c
head/sys/arm/arm/pmap.c
head/sys/arm/arm/vm_machdep.c
head/sys/arm/at91/at91.c
head/sys/arm/mv/armadaxp/armadaxp_mp.c
head/sys/arm/s3c2xx0/s3c2xx0_space.c
head/sys/arm/xscale/i80321/i80321_space.c
head/sys/arm/xscale/i8134x/i81342_space.c
head/sys/arm/xscale/ixp425/ixp425_pci_space.c
head/sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c
head/sys/cddl/compat/opensolaris/sys/kmem.h
head/sys/compat/linux/linux_misc.c
head/sys/compat/ndis/subr_ntoskrnl.c
head/sys/dev/bktr/bktr_core.c
head/sys/dev/drm/drm_scatter.c
head/sys/dev/drm2/drm_scatter.c
head/sys/dev/drm2/i915/intel_ringbuffer.c
head/sys/dev/drm2/ttm/ttm_bo_util.c
head/sys/dev/xen/blkback/blkback.c
head/sys/dev/xen/netback/netback.c
head/sys/dev/xen/xenpci/xenpci.c
head/sys/i386/i386/machdep.c
head/sys/i386/i386/mp_machdep.c
head/sys/i386/i386/pmap.c
head/sys/i386/i386/sys_machdep.c
head/sys/i386/i386/vm_machdep.c
head/sys/i386/ibcs2/imgact_coff.c
head/sys/i386/pci/pci_cfgreg.c
head/sys/i386/xen/mp_machdep.c
head/sys/i386/xen/pmap.c
head/sys/ia64/ia64/mp_machdep.c
head/sys/kern/imgact_gzip.c
head/sys/kern/init_main.c
head/sys/kern/kern_exec.c
head/sys/kern/kern_malloc.c
head/sys/kern/kern_mbuf.c
head/sys/kern/kern_sharedpage.c
head/sys/kern/subr_busdma_bufalloc.c
head/sys/kern/subr_vmem.c
head/sys/kern/vfs_bio.c
head/sys/mips/mips/mp_machdep.c
head/sys/mips/mips/pmap.c
head/sys/mips/mips/vm_machdep.c
head/sys/mips/sibyte/sb_zbpci.c
head/sys/ofed/include/linux/dma-mapping.h
head/sys/ofed/include/linux/gfp.h
head/sys/ofed/include/linux/linux_compat.c
head/sys/pc98/pc98/machdep.c
head/sys/powerpc/aim/mmu_oea.c
head/sys/powerpc/aim/mmu_oea64.c
head/sys/powerpc/aim/vm_machdep.c
head/sys/powerpc/booke/pmap.c
head/sys/powerpc/booke/vm_machdep.c
head/sys/powerpc/powerpc/busdma_machdep.c
head/sys/powerpc/powerpc/mp_machdep.c
head/sys/sparc64/sparc64/bus_machdep.c
head/sys/sparc64/sparc64/mem.c
head/sys/sparc64/sparc64/mp_machdep.c
head/sys/sparc64/sparc64/pmap.c
head/sys/sparc64/sparc64/vm_machdep.c
head/sys/vm/memguard.c
head/sys/vm/memguard.h
head/sys/vm/pmap.h
head/sys/vm/uma_core.c
head/sys/vm/vm_extern.h
head/sys/vm/vm_glue.c
head/sys/vm/vm_init.c
head/sys/vm/vm_kern.c
head/sys/vm/vm_kern.h
head/sys/vm/vm_map.c
head/sys/vm/vm_map.h
head/sys/vm/vm_object.c
head/sys/x86/x86/busdma_machdep.c
head/sys/xen/gnttab.c
Modified: head/sys/amd64/amd64/mp_machdep.c
==============================================================================
--- head/sys/amd64/amd64/mp_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/amd64/amd64/mp_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -938,10 +938,14 @@ start_all_aps(void)
apic_id = cpu_apic_ids[cpu];
/* allocate and set up an idle stack data page */
- bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES *
PAGE_SIZE);
- doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
- nmi_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
- dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
+ KSTACK_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
+ doublefault_stack = (char *)kmem_malloc(kernel_arena,
+ PAGE_SIZE, M_WAITOK | M_ZERO);
+ nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
+ M_WAITOK | M_ZERO);
+ dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE -
8;
bootAP = cpu;
Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Wed Aug 7 06:05:57 2013 (r254024)
+++ head/sys/amd64/amd64/pmap.c Wed Aug 7 06:21:20 2013 (r254025)
@@ -860,7 +860,8 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
+ pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
+ M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
}
@@ -5060,7 +5061,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_
}
offset = pa & PAGE_MASK;
size = round_page(offset + size);
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
pa = trunc_page(pa);
@@ -5096,7 +5097,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t
base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
}
/*
Modified: head/sys/amd64/amd64/sys_machdep.c
==============================================================================
--- head/sys/amd64/amd64/sys_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/amd64/amd64/sys_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -356,7 +356,7 @@ amd64_set_ioperm(td, uap)
*/
pcb = td->td_pcb;
if (pcb->pcb_tssp == NULL) {
- tssp = (struct amd64tss *)kmem_malloc(kernel_map,
+ tssp = (struct amd64tss *)kmem_malloc(kernel_arena,
ctob(IOPAGES+1), M_WAITOK);
if (tssp == NULL)
return (ENOMEM);
@@ -463,7 +463,7 @@ user_ldt_alloc(struct proc *p, int force
return (mdp->md_ldt);
mtx_unlock(&dt_lock);
new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
- new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map,
+ new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
max_ldt_segment * sizeof(struct user_segment_descriptor),
M_WAITOK);
if (new_ldt->ldt_base == NULL) {
@@ -484,7 +484,7 @@ user_ldt_alloc(struct proc *p, int force
mtx_lock(&dt_lock);
pldt = mdp->md_ldt;
if (pldt != NULL && !force) {
- kmem_free(kernel_map, (vm_offset_t)new_ldt->ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)new_ldt->ldt_base,
max_ldt_segment * sizeof(struct user_segment_descriptor));
free(new_ldt, M_SUBPROC);
return (pldt);
@@ -529,7 +529,7 @@ user_ldt_derefl(struct proc_ldt *pldt)
{
if (--pldt->ldt_refcnt == 0) {
- kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
max_ldt_segment * sizeof(struct user_segment_descriptor));
free(pldt, M_SUBPROC);
}
Modified: head/sys/amd64/amd64/vm_machdep.c
==============================================================================
--- head/sys/amd64/amd64/vm_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/amd64/amd64/vm_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -342,7 +342,7 @@ cpu_thread_clean(struct thread *td)
* Clean TSS/iomap
*/
if (pcb->pcb_tssp != NULL) {
- kmem_free(kernel_map, (vm_offset_t)pcb->pcb_tssp,
+ kva_free((vm_offset_t)pcb->pcb_tssp,
ctob(IOPAGES + 1));
pcb->pcb_tssp = NULL;
}
Modified: head/sys/arm/arm/bus_space_generic.c
==============================================================================
--- head/sys/arm/arm/bus_space_generic.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/arm/arm/bus_space_generic.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -73,7 +73,7 @@ generic_bs_map(void *t, bus_addr_t bpa,
offset = bpa & PAGE_MASK;
startpa = trunc_page(bpa);
- va = kmem_alloc_nofault(kernel_map, endpa - startpa);
+ va = kva_alloc(endpa - startpa);
if (va == 0)
return (ENOMEM);
@@ -118,7 +118,7 @@ generic_bs_unmap(void *t, bus_space_hand
pmap_kremove(va);
va += PAGE_SIZE;
}
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
void
Modified: head/sys/arm/arm/busdma_machdep-v6.c
==============================================================================
--- head/sys/arm/arm/busdma_machdep-v6.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/arm/arm/busdma_machdep-v6.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -696,10 +696,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
*vaddr = uma_zalloc(bufzone->umazone, mflags);
} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
- *vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize,
+ *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, memattr);
} else {
- *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize,
+ *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
memattr);
}
@@ -744,7 +744,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
uma_zfree(bufzone->umazone, vaddr);
else
- kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
dmat->map_count--;
free(map, M_DEVBUF);
Modified: head/sys/arm/arm/busdma_machdep.c
==============================================================================
--- head/sys/arm/arm/busdma_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/arm/arm/busdma_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -752,10 +752,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
vaddr = uma_zalloc(bufzone->umazone, mflags);
} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
- vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize,
+ vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, memattr);
} else {
- vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize,
+ vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
memattr);
}
@@ -798,7 +798,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
uma_zfree(bufzone->umazone, vaddr);
else
- kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
}
static void
Modified: head/sys/arm/arm/mp_machdep.c
==============================================================================
--- head/sys/arm/arm/mp_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/arm/arm/mp_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -112,7 +112,8 @@ cpu_mp_start(void)
/* Reserve memory for application processors */
for(i = 0; i < (mp_ncpus - 1); i++)
- dpcpu[i] = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
temp_pagetable_va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE,
M_TEMP, 0, 0x0, 0xffffffff, L1_TABLE_SIZE, 0);
addr = KERNPHYSADDR;
Modified: head/sys/arm/arm/pmap-v6.c
==============================================================================
--- head/sys/arm/arm/pmap-v6.c Wed Aug 7 06:05:57 2013 (r254024)
+++ head/sys/arm/arm/pmap-v6.c Wed Aug 7 06:21:20 2013 (r254025)
@@ -1255,8 +1255,7 @@ pmap_init(void)
pv_entry_high_water = 9 * (pv_entry_max / 10);
pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
- pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
- PAGE_SIZE * pv_maxchunks);
+ pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
if (pv_chunkbase == NULL)
panic("pmap_init: not enough kvm for pv chunks");
@@ -4103,7 +4102,7 @@ pmap_mapdev(vm_offset_t pa, vm_size_t si
GIANT_REQUIRED;
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
Modified: head/sys/arm/arm/pmap.c
==============================================================================
--- head/sys/arm/arm/pmap.c Wed Aug 7 06:05:57 2013 (r254024)
+++ head/sys/arm/arm/pmap.c Wed Aug 7 06:21:20 2013 (r254025)
@@ -4718,7 +4718,7 @@ pmap_mapdev(vm_offset_t pa, vm_size_t si
GIANT_REQUIRED;
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
Modified: head/sys/arm/arm/vm_machdep.c
==============================================================================
--- head/sys/arm/arm/vm_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/arm/arm/vm_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -210,7 +210,7 @@ sf_buf_init(void *arg)
sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
TAILQ_INIT(&sf_buf_freelist);
- sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
M_NOWAIT | M_ZERO);
for (i = 0; i < nsfbufs; i++) {
@@ -667,7 +667,8 @@ uma_small_alloc(uma_zone_t zone, int byt
if (zone == l2zone &&
pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
*flags = UMA_SLAB_KMEM;
- ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
+ ret = ((void *)kmem_malloc(kmem_arena, bytes,
+ M_NOWAIT));
return (ret);
}
pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
@@ -701,7 +702,7 @@ uma_small_free(void *mem, int size, u_in
pt_entry_t *pt;
if (flags & UMA_SLAB_KMEM)
- kmem_free(kmem_map, (vm_offset_t)mem, size);
+ kmem_free(kmem_arena, (vm_offset_t)mem, size);
else {
struct arm_small_page *sp;
Modified: head/sys/arm/at91/at91.c
==============================================================================
--- head/sys/arm/at91/at91.c Wed Aug 7 06:05:57 2013 (r254024)
+++ head/sys/arm/at91/at91.c Wed Aug 7 06:21:20 2013 (r254025)
@@ -85,7 +85,7 @@ at91_bs_unmap(void *t, bus_space_handle_
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
static int
Modified: head/sys/arm/mv/armadaxp/armadaxp_mp.c
==============================================================================
--- head/sys/arm/mv/armadaxp/armadaxp_mp.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/arm/mv/armadaxp/armadaxp_mp.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -113,7 +113,7 @@ platform_mp_start_ap(void)
cputype = cpufunc_id();
cputype &= CPU_ID_CPU_MASK;
- smp_boot = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+ smp_boot = kva_alloc(PAGE_SIZE);
pmap_kenter_nocache(smp_boot, 0xffff0000);
dst = (uint32_t *) smp_boot;
@@ -121,7 +121,7 @@ platform_mp_start_ap(void)
src++, dst++) {
*dst = *src;
}
- kmem_free(kernel_map, smp_boot, PAGE_SIZE);
+ kva_free(smp_boot, PAGE_SIZE);
if (cputype == CPU_ID_MV88SV584X_V7) {
/* Core rev A0 */
Modified: head/sys/arm/s3c2xx0/s3c2xx0_space.c
==============================================================================
--- head/sys/arm/s3c2xx0/s3c2xx0_space.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/arm/s3c2xx0/s3c2xx0_space.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -182,7 +182,7 @@ s3c2xx0_bs_map(void *t, bus_addr_t bpa,
startpa = trunc_page(bpa);
endpa = round_page(bpa + size);
- va = kmem_alloc_nofault(kernel_map, endpa - startpa);
+ va = kva_alloc(endpa - startpa);
if (!va)
return (ENOMEM);
@@ -214,7 +214,7 @@ s3c2xx0_bs_unmap(void *t, bus_space_hand
pmap_kremove(va);
va += PAGE_SIZE;
}
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
int
Modified: head/sys/arm/xscale/i80321/i80321_space.c
==============================================================================
--- head/sys/arm/xscale/i80321/i80321_space.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/arm/xscale/i80321/i80321_space.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -312,7 +312,7 @@ i80321_mem_bs_unmap(void *t, bus_space_h
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
int
Modified: head/sys/arm/xscale/i8134x/i81342_space.c
==============================================================================
--- head/sys/arm/xscale/i8134x/i81342_space.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/arm/xscale/i8134x/i81342_space.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -324,7 +324,7 @@ i81342_mem_bs_unmap(void *t, bus_space_h
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
#endif
}
Modified: head/sys/arm/xscale/ixp425/ixp425_pci_space.c
==============================================================================
--- head/sys/arm/xscale/ixp425/ixp425_pci_space.c Wed Aug 7 06:05:57
2013 (r254024)
+++ head/sys/arm/xscale/ixp425/ixp425_pci_space.c Wed Aug 7 06:21:20
2013 (r254025)
@@ -432,7 +432,7 @@ ixp425_pci_mem_bs_unmap(void *t, bus_spa
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
int
Modified: head/sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c
==============================================================================
--- head/sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c Wed Aug 7
06:05:57 2013 (r254024)
+++ head/sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c Wed Aug 7
06:21:20 2013 (r254025)
@@ -62,6 +62,8 @@ static struct mtx kmem_items_mtx;
MTX_SYSINIT(kmem_items_mtx, &kmem_items_mtx, "kmem_items", MTX_DEF);
#endif /* KMEM_DEBUG */
+#include <sys/vmem.h>
+
void *
zfs_kmem_alloc(size_t size, int kmflags)
{
@@ -135,7 +137,7 @@ uint64_t
kmem_used(void)
{
- return (kmem_map->size);
+ return (vmem_size(kmem_arena, VMEM_ALLOC));
}
static int
Modified: head/sys/cddl/compat/opensolaris/sys/kmem.h
==============================================================================
--- head/sys/cddl/compat/opensolaris/sys/kmem.h Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/cddl/compat/opensolaris/sys/kmem.h Wed Aug 7 06:21:20 2013
(r254025)
@@ -32,6 +32,7 @@
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/malloc.h>
+#include <sys/vmem.h>
#include <vm/uma.h>
#include <vm/vm.h>
@@ -61,8 +62,6 @@ typedef struct kmem_cache {
void *kc_private;
} kmem_cache_t;
-#define vmem_t void
-
void *zfs_kmem_alloc(size_t size, int kmflags);
void zfs_kmem_free(void *buf, size_t size);
uint64_t kmem_size(void);
Modified: head/sys/compat/linux/linux_misc.c
==============================================================================
--- head/sys/compat/linux/linux_misc.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/compat/linux/linux_misc.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -467,7 +467,7 @@ cleanup:
/* Release the temporary mapping. */
if (a_out)
- kmem_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
+ kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
return (error);
}
Modified: head/sys/compat/ndis/subr_ntoskrnl.c
==============================================================================
--- head/sys/compat/ndis/subr_ntoskrnl.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/compat/ndis/subr_ntoskrnl.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -2489,7 +2489,7 @@ MmAllocateContiguousMemorySpecifyCache(s
break;
}
- ret = (void *)kmem_alloc_contig(kernel_map, size, M_ZERO | M_NOWAIT,
+ ret = (void *)kmem_alloc_contig(kernel_arena, size, M_ZERO | M_NOWAIT,
lowest, highest, PAGE_SIZE, boundary, memattr);
if (ret != NULL)
malloc_type_allocated(M_DEVBUF, round_page(size));
Modified: head/sys/dev/bktr/bktr_core.c
==============================================================================
--- head/sys/dev/bktr/bktr_core.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/dev/bktr/bktr_core.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -109,6 +109,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/fcntl.h>
#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/signalvar.h>
@@ -1801,8 +1802,10 @@ video_ioctl( bktr_ptr_t bktr, int unit,
#else
buf = get_bktr_mem(unit, temp*PAGE_SIZE);
if (buf != 0) {
- kmem_free(kernel_map, bktr->bigbuf,
- (bktr->alloc_pages * PAGE_SIZE));
+ contigfree(
+ (void *)(uintptr_t)bktr->bigbuf,
+ (bktr->alloc_pages * PAGE_SIZE),
+ M_DEVBUF);
#endif
bktr->bigbuf = buf;
Modified: head/sys/dev/drm/drm_scatter.c
==============================================================================
--- head/sys/dev/drm/drm_scatter.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/dev/drm/drm_scatter.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -52,7 +52,7 @@ drm_sg_alloc(struct drm_device *dev, str
entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr),
DRM_MEM_SGLISTS, M_WAITOK | M_ZERO);
- entry->vaddr = kmem_alloc_attr(kernel_map, size, M_WAITOK | M_ZERO,
+ entry->vaddr = kmem_alloc_attr(kernel_arena, size, M_WAITOK | M_ZERO,
0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
if (entry->vaddr == 0) {
drm_sg_cleanup(entry);
@@ -99,7 +99,7 @@ drm_sg_cleanup(struct drm_sg_mem *entry)
return;
if (entry->vaddr != 0)
- kmem_free(kernel_map, entry->vaddr, IDX_TO_OFF(entry->pages));
+ kmem_free(kernel_arena, entry->vaddr, IDX_TO_OFF(entry->pages));
free(entry->busaddr, DRM_MEM_SGLISTS);
free(entry, DRM_MEM_DRIVER);
Modified: head/sys/dev/drm2/drm_scatter.c
==============================================================================
--- head/sys/dev/drm2/drm_scatter.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/dev/drm2/drm_scatter.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -52,7 +52,7 @@ drm_sg_alloc(struct drm_device *dev, str
entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr),
DRM_MEM_SGLISTS, M_WAITOK | M_ZERO);
- entry->vaddr = kmem_alloc_attr(kernel_map, size, M_WAITOK | M_ZERO,
+ entry->vaddr = kmem_alloc_attr(kernel_arena, size, M_WAITOK | M_ZERO,
0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
if (entry->vaddr == 0) {
drm_sg_cleanup(entry);
@@ -99,7 +99,7 @@ drm_sg_cleanup(struct drm_sg_mem *entry)
return;
if (entry->vaddr != 0)
- kmem_free(kernel_map, entry->vaddr, IDX_TO_OFF(entry->pages));
+ kmem_free(kernel_arena, entry->vaddr, IDX_TO_OFF(entry->pages));
free(entry->busaddr, DRM_MEM_SGLISTS);
free(entry, DRM_MEM_DRIVER);
Modified: head/sys/dev/drm2/i915/intel_ringbuffer.c
==============================================================================
--- head/sys/dev/drm2/i915/intel_ringbuffer.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/dev/drm2/i915/intel_ringbuffer.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -361,7 +361,7 @@ init_pipe_control(struct intel_ring_buff
goto err_unref;
pc->gtt_offset = obj->gtt_offset;
- pc->cpu_page = (uint32_t *)kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+ pc->cpu_page = (uint32_t *)kva_alloc(PAGE_SIZE);
if (pc->cpu_page == NULL)
goto err_unpin;
pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
@@ -392,7 +392,7 @@ cleanup_pipe_control(struct intel_ring_b
obj = pc->obj;
pmap_qremove((vm_offset_t)pc->cpu_page, 1);
- kmem_free(kernel_map, (uintptr_t)pc->cpu_page, PAGE_SIZE);
+ kva_free((uintptr_t)pc->cpu_page, PAGE_SIZE);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -968,7 +968,7 @@ static void cleanup_status_page(struct i
return;
pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1);
- kmem_free(kernel_map, (vm_offset_t)ring->status_page.page_addr,
+ kva_free((vm_offset_t)ring->status_page.page_addr,
PAGE_SIZE);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -999,8 +999,7 @@ static int init_status_page(struct intel
}
ring->status_page.gfx_addr = obj->gtt_offset;
- ring->status_page.page_addr = (void *)kmem_alloc_nofault(kernel_map,
- PAGE_SIZE);
+ ring->status_page.page_addr = (void *)kva_alloc(PAGE_SIZE);
if (ring->status_page.page_addr == NULL) {
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin;
Modified: head/sys/dev/drm2/ttm/ttm_bo_util.c
==============================================================================
--- head/sys/dev/drm2/ttm/ttm_bo_util.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/dev/drm2/ttm/ttm_bo_util.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -498,8 +498,7 @@ static int ttm_bo_kmap_ttm(struct ttm_bu
ttm_io_prot(mem->placement);
map->bo_kmap_type = ttm_bo_map_vmap;
map->num_pages = num_pages;
- map->virtual = (void *)kmem_alloc_nofault(kernel_map,
- num_pages * PAGE_SIZE);
+ map->virtual = (void *)kva_alloc(num_pages * PAGE_SIZE);
if (map->virtual != NULL) {
for (i = 0; i < num_pages; i++) {
/* XXXKIB hack */
@@ -561,7 +560,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_ob
break;
case ttm_bo_map_vmap:
pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
- kmem_free(kernel_map, (vm_offset_t)map->virtual,
+ kva_free((vm_offset_t)map->virtual,
map->num_pages * PAGE_SIZE);
break;
case ttm_bo_map_kmap:
Modified: head/sys/dev/xen/blkback/blkback.c
==============================================================================
--- head/sys/dev/xen/blkback/blkback.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/dev/xen/blkback/blkback.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -2775,7 +2775,7 @@ xbb_free_communication_mem(struct xbb_so
{
if (xbb->kva != 0) {
#ifndef XENHVM
- kmem_free(kernel_map, xbb->kva, xbb->kva_size);
+ kva_free(xbb->kva, xbb->kva_size);
#else
if (xbb->pseudo_phys_res != NULL) {
bus_release_resource(xbb->dev, SYS_RES_MEMORY,
@@ -3014,7 +3014,7 @@ xbb_alloc_communication_mem(struct xbb_s
device_get_nameunit(xbb->dev), xbb->kva_size,
xbb->reqlist_kva_size);
#ifndef XENHVM
- xbb->kva = kmem_alloc_nofault(kernel_map, xbb->kva_size);
+ xbb->kva = kva_alloc(xbb->kva_size);
if (xbb->kva == 0)
return (ENOMEM);
xbb->gnt_base_addr = xbb->kva;
Modified: head/sys/dev/xen/netback/netback.c
==============================================================================
--- head/sys/dev/xen/netback/netback.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/dev/xen/netback/netback.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -621,7 +621,7 @@ xnb_free_communication_mem(struct xnb_so
{
if (xnb->kva != 0) {
#ifndef XENHVM
- kmem_free(kernel_map, xnb->kva, xnb->kva_size);
+ kva_free(xnb->kva, xnb->kva_size);
#else
if (xnb->pseudo_phys_res != NULL) {
bus_release_resource(xnb->dev, SYS_RES_MEMORY,
@@ -811,7 +811,7 @@ xnb_alloc_communication_mem(struct xnb_s
xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE;
}
#ifndef XENHVM
- xnb->kva = kmem_alloc_nofault(kernel_map, xnb->kva_size);
+ xnb->kva = kva_alloc(xnb->kva_size);
if (xnb->kva == 0)
return (ENOMEM);
xnb->gnt_base_addr = xnb->kva;
Modified: head/sys/dev/xen/xenpci/xenpci.c
==============================================================================
--- head/sys/dev/xen/xenpci/xenpci.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/dev/xen/xenpci/xenpci.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -383,7 +383,7 @@ xenpci_attach(device_t dev)
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
panic("HYPERVISOR_memory_op failed");
- shared_va = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+ shared_va = kva_alloc(PAGE_SIZE);
pmap_kenter(shared_va, shared_info_pa);
HYPERVISOR_shared_info = (void *) shared_va;
Modified: head/sys/i386/i386/machdep.c
==============================================================================
--- head/sys/i386/i386/machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/i386/i386/machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -3178,9 +3178,9 @@ f00f_hack(void *unused)
printf("Intel Pentium detected, installing workaround for F00F bug\n");
- tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
+ tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO);
if (tmp == 0)
- panic("kmem_alloc returned 0");
+ panic("kmem_malloc returned 0");
/* Put the problematic entry (#6) at the end of the lower page. */
new_idt = (struct gate_descriptor*)
@@ -3189,9 +3189,7 @@ f00f_hack(void *unused)
r_idt.rd_base = (u_int)new_idt;
lidt(&r_idt);
idt = new_idt;
- if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
- VM_PROT_READ, FALSE) != KERN_SUCCESS)
- panic("vm_map_protect failed");
+ pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
}
#endif /* defined(I586_CPU) && !NO_F00F_HACK */
Modified: head/sys/i386/i386/mp_machdep.c
==============================================================================
--- head/sys/i386/i386/mp_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/i386/i386/mp_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -959,8 +959,10 @@ start_all_aps(void)
/* allocate and set up a boot stack data page */
bootstacks[cpu] =
- (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
- dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ (char *)kmem_malloc(kernel_arena, KSTACK_PAGES * PAGE_SIZE,
+ M_WAITOK | M_ZERO);
+ dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c Wed Aug 7 06:05:57 2013 (r254024)
+++ head/sys/i386/i386/pmap.c Wed Aug 7 06:21:20 2013 (r254025)
@@ -655,7 +655,7 @@ pmap_pdpt_allocf(uma_zone_t zone, int by
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
- return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
+ return ((void *)kmem_alloc_contig(kernel_arena, bytes, wait, 0x0ULL,
0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
}
#endif
@@ -783,13 +783,13 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
+ pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
+ M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
- pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
- PAGE_SIZE * pv_maxchunks);
+ pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
if (pv_chunkbase == NULL)
panic("pmap_init: not enough kvm for pv chunks");
pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
@@ -1747,8 +1747,7 @@ pmap_pinit(pmap_t pmap)
* page directory table.
*/
if (pmap->pm_pdir == NULL) {
- pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
- NBPTD);
+ pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD);
if (pmap->pm_pdir == NULL) {
PMAP_LOCK_DESTROY(pmap);
return (0);
@@ -5044,7 +5043,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_
if (pa < KERNLOAD && pa + size <= KERNLOAD)
va = KERNBASE + pa;
else
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
@@ -5079,7 +5078,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t
base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
}
/*
Modified: head/sys/i386/i386/sys_machdep.c
==============================================================================
--- head/sys/i386/i386/sys_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/i386/i386/sys_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -164,7 +164,7 @@ sysarch(td, uap)
break;
case I386_SET_LDT:
if (kargs.largs.descs != NULL) {
- lp = (union descriptor *)kmem_malloc(kernel_map,
+ lp = (union descriptor *)kmem_malloc(kernel_arena,
kargs.largs.num * sizeof(union descriptor),
M_WAITOK);
if (lp == NULL) {
@@ -175,7 +175,7 @@ sysarch(td, uap)
kargs.largs.num * sizeof(union descriptor));
if (error == 0)
error = i386_set_ldt(td, &kargs.largs, lp);
- kmem_free(kernel_map, (vm_offset_t)lp,
+ kmem_free(kernel_arena, (vm_offset_t)lp,
kargs.largs.num * sizeof(union descriptor));
} else {
error = i386_set_ldt(td, &kargs.largs, NULL);
@@ -299,7 +299,7 @@ i386_extend_pcb(struct thread *td)
0 /* granularity */
};
- ext = (struct pcb_ext *)kmem_malloc(kernel_map, ctob(IOPAGES+1),
+ ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1),
M_WAITOK);
if (ext == 0)
return (ENOMEM);
@@ -473,7 +473,7 @@ user_ldt_alloc(struct mdproc *mdp, int l
M_SUBPROC, M_WAITOK);
new_ldt->ldt_len = len = NEW_MAX_LD(len);
- new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map,
+ new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
round_page(len * sizeof(union descriptor)), M_WAITOK);
if (new_ldt->ldt_base == NULL) {
free(new_ldt, M_SUBPROC);
@@ -513,7 +513,7 @@ user_ldt_alloc(struct mdproc *mdp, int l
M_SUBPROC, M_WAITOK);
new_ldt->ldt_len = len = NEW_MAX_LD(len);
- new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map,
+ new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
len * sizeof(union descriptor), M_WAITOK);
if (new_ldt->ldt_base == NULL) {
free(new_ldt, M_SUBPROC);
@@ -576,7 +576,7 @@ user_ldt_deref(struct proc_ldt *pldt)
mtx_assert(&dt_lock, MA_OWNED);
if (--pldt->ldt_refcnt == 0) {
mtx_unlock_spin(&dt_lock);
- kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
pldt->ldt_len * sizeof(union descriptor));
free(pldt, M_SUBPROC);
} else
@@ -855,7 +855,7 @@ i386_ldt_grow(struct thread *td, int len
* free the new object and return.
*/
mtx_unlock_spin(&dt_lock);
- kmem_free(kernel_map,
+ kmem_free(kernel_arena,
(vm_offset_t)new_ldt->ldt_base,
new_ldt->ldt_len * sizeof(union descriptor));
free(new_ldt, M_SUBPROC);
@@ -889,7 +889,7 @@ i386_ldt_grow(struct thread *td, int len
mtx_unlock_spin(&dt_lock);
#endif
if (old_ldt_base != NULL_LDT_BASE) {
- kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)old_ldt_base,
old_ldt_len * sizeof(union descriptor));
free(new_ldt, M_SUBPROC);
}
Modified: head/sys/i386/i386/vm_machdep.c
==============================================================================
--- head/sys/i386/i386/vm_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/i386/i386/vm_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -355,7 +355,7 @@ cpu_thread_clean(struct thread *td)
* XXX do we need to move the TSS off the allocated pages
* before freeing them? (not done here)
*/
- kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
+ kva_free((vm_offset_t)pcb->pcb_ext,
ctob(IOPAGES + 1));
pcb->pcb_ext = NULL;
}
@@ -751,7 +751,7 @@ sf_buf_init(void *arg)
sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
TAILQ_INIT(&sf_buf_freelist);
- sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
M_NOWAIT | M_ZERO);
for (i = 0; i < nsfbufs; i++) {
Modified: head/sys/i386/ibcs2/imgact_coff.c
==============================================================================
--- head/sys/i386/ibcs2/imgact_coff.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/i386/ibcs2/imgact_coff.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -146,7 +146,7 @@ load_coff_section(struct vmspace *vmspac
error = copyout(data_buf, (caddr_t) map_addr, copy_len);
- kmem_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE);
+ kmap_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE);
return error;
}
@@ -280,7 +280,7 @@ coff_load_file(struct thread *td, char *
error = 0;
dealloc_and_fail:
- kmem_free_wakeup(exec_map, (vm_offset_t)ptr, PAGE_SIZE);
+ kmap_free_wakeup(exec_map, (vm_offset_t)ptr, PAGE_SIZE);
fail:
VOP_UNLOCK(vp, 0);
unlocked_fail:
@@ -417,7 +417,7 @@ exec_coff_imgact(imgp)
}
free(libbuf, M_TEMP);
}
- kmem_free_wakeup(exec_map, (vm_offset_t)buf, len);
+ kmap_free_wakeup(exec_map, (vm_offset_t)buf, len);
if (error)
goto fail;
}
Modified: head/sys/i386/pci/pci_cfgreg.c
==============================================================================
--- head/sys/i386/pci/pci_cfgreg.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/i386/pci/pci_cfgreg.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -562,7 +562,7 @@ pcie_cfgregopen(uint64_t base, uint8_t m
if (pcie_array == NULL)
return (0);
- va = kmem_alloc_nofault(kernel_map, PCIE_CACHE * PAGE_SIZE);
+ va = kva_alloc(PCIE_CACHE * PAGE_SIZE);
if (va == 0) {
free(pcie_array, M_DEVBUF);
return (0);
Modified: head/sys/i386/xen/mp_machdep.c
==============================================================================
--- head/sys/i386/xen/mp_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/i386/xen/mp_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -746,7 +746,8 @@ start_all_aps(void)
/* Get per-cpu data */
pc = &__pcpu[bootAP];
pcpu_init(pc, bootAP, sizeof(struct pcpu));
- dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), bootAP);
+ dpcpu_init((void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO), bootAP);
pc->pc_apic_id = cpu_apic_ids[bootAP];
pc->pc_prvspace = pc;
pc->pc_curthread = 0;
@@ -833,8 +834,8 @@ cpu_initialize_context(unsigned int cpu)
pmap_zero_page(m[i]);
}
- boot_stack = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
- newPTD = kmem_alloc_nofault(kernel_map, NPGPTD * PAGE_SIZE);
+ boot_stack = kva_alloc(PAGE_SIZE);
+ newPTD = kva_alloc(NPGPTD * PAGE_SIZE);
ma[0] = VM_PAGE_TO_MACH(m[0])|PG_V;
#ifdef PAE
@@ -856,7 +857,7 @@ cpu_initialize_context(unsigned int cpu)
nkpt*sizeof(vm_paddr_t));
pmap_qremove(newPTD, 4);
- kmem_free(kernel_map, newPTD, 4 * PAGE_SIZE);
+ kva_free(newPTD, 4 * PAGE_SIZE);
/*
* map actual idle stack to boot_stack
*/
Modified: head/sys/i386/xen/pmap.c
==============================================================================
--- head/sys/i386/xen/pmap.c Wed Aug 7 06:05:57 2013 (r254024)
+++ head/sys/i386/xen/pmap.c Wed Aug 7 06:21:20 2013 (r254025)
@@ -620,8 +620,7 @@ pmap_init(void)
pv_entry_high_water = 9 * (pv_entry_max / 10);
pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
- pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
- PAGE_SIZE * pv_maxchunks);
+ pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
if (pv_chunkbase == NULL)
panic("pmap_init: not enough kvm for pv chunks");
pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
@@ -1460,8 +1459,7 @@ pmap_pinit(pmap_t pmap)
* page directory table.
*/
if (pmap->pm_pdir == NULL) {
- pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
- NBPTD);
+ pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD);
if (pmap->pm_pdir == NULL) {
PMAP_LOCK_DESTROY(pmap);
#ifdef HAMFISTED_LOCKING
@@ -1470,7 +1468,7 @@ pmap_pinit(pmap_t pmap)
return (0);
}
#ifdef PAE
- pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1);
+ pmap->pm_pdpt = (pd_entry_t *)kva_alloc(1);
#endif
}
@@ -4022,7 +4020,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_
if (pa < KERNLOAD && pa + size <= KERNLOAD)
va = KERNBASE + pa;
else
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
@@ -4057,7 +4055,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t
base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
}
/*
Modified: head/sys/ia64/ia64/mp_machdep.c
==============================================================================
--- head/sys/ia64/ia64/mp_machdep.c Wed Aug 7 06:05:57 2013
(r254024)
+++ head/sys/ia64/ia64/mp_machdep.c Wed Aug 7 06:21:20 2013
(r254025)
@@ -304,7 +304,8 @@ cpu_mp_add(u_int acpi_id, u_int id, u_in
if (cpuid != 0) {
pc = (struct pcpu *)malloc(sizeof(*pc), M_SMP, M_WAITOK);
pcpu_init(pc, cpuid, sizeof(*pc));
- dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
dpcpu_init(dpcpu, cpuid);
} else
pc = pcpup;
Modified: head/sys/kern/imgact_gzip.c
==============================================================================
--- head/sys/kern/imgact_gzip.c Wed Aug 7 06:05:57 2013 (r254024)
+++ head/sys/kern/imgact_gzip.c Wed Aug 7 06:21:20 2013 (r254025)
@@ -137,7 +137,7 @@ exec_gzip_imgact(imgp)
}
if (igz.inbuf)
- kmem_free_wakeup(exec_map, (vm_offset_t)igz.inbuf, PAGE_SIZE);
+ kmap_free_wakeup(exec_map, (vm_offset_t)igz.inbuf, PAGE_SIZE);
if (igz.error || error) {
printf("Output=%lu ", igz.output);
printf("Inflate_error=%d igz.error=%d where=%d\n",
@@ -310,7 +310,7 @@ NextByte(void *vp)
return igz->inbuf[(igz->idx++) - igz->offset];
}
if (igz->inbuf)
- kmem_free_wakeup(exec_map, (vm_offset_t)igz->inbuf, PAGE_SIZE);
+ kmap_free_wakeup(exec_map, (vm_offset_t)igz->inbuf, PAGE_SIZE);
igz->offset = igz->idx & ~PAGE_MASK;
error = vm_mmap(exec_map, /* map */
Modified: head/sys/kern/init_main.c
==============================================================================
--- head/sys/kern/init_main.c Wed Aug 7 06:05:57 2013 (r254024)
+++ head/sys/kern/init_main.c Wed Aug 7 06:21:20 2013 (r254025)
@@ -461,11 +461,6 @@ proc0_init(void *dummy __unused)
sleepinit();
/*
- * additional VM structures
- */
- vm_init2();
-
- /*
* Create process 0 (the swapper).
*/
LIST_INSERT_HEAD(&allproc, p, p_list);
Modified: head/sys/kern/kern_exec.c
==============================================================================
--- head/sys/kern/kern_exec.c Wed Aug 7 06:05:57 2013 (r254024)
+++ head/sys/kern/kern_exec.c Wed Aug 7 06:21:20 2013 (r254025)
@@ -1192,7 +1192,7 @@ int
exec_alloc_args(struct image_args *args)
{
- args->buf = (char *)kmem_alloc_wait(exec_map, PATH_MAX + ARG_MAX);
+ args->buf = (char *)kmap_alloc_wait(exec_map, PATH_MAX + ARG_MAX);
return (args->buf != NULL ? 0 : ENOMEM);
}
@@ -1201,7 +1201,7 @@ exec_free_args(struct image_args *args)
{
if (args->buf != NULL) {
- kmem_free_wakeup(exec_map, (vm_offset_t)args->buf,
+ kmap_free_wakeup(exec_map, (vm_offset_t)args->buf,
PATH_MAX + ARG_MAX);
args->buf = NULL;
}
Modified: head/sys/kern/kern_malloc.c
==============================================================================
--- head/sys/kern/kern_malloc.c Wed Aug 7 06:05:57 2013 (r254024)
+++ head/sys/kern/kern_malloc.c Wed Aug 7 06:21:20 2013 (r254025)
@@ -62,9 +62,11 @@ __FBSDID("$FreeBSD$");
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/time.h>
+#include <sys/vmem.h>
#include <vm/vm.h>
#include <vm/pmap.h>
+#include <vm/vm_pageout.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
@@ -113,12 +115,7 @@ MALLOC_DEFINE(M_TEMP, "temp", "misc temp
MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
-static void kmeminit(void *);
-SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL);
-
static struct malloc_type *kmemstatistics;
-static vm_offset_t kmembase;
-static vm_offset_t kmemlimit;
static int kmemcount;
#define KMEM_ZSHIFT 4
@@ -203,12 +200,12 @@ SYSCTL_UINT(_vm, OID_AUTO, kmem_size_sca
static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
- sysctl_kmem_map_size, "LU", "Current kmem_map allocation size");
+ sysctl_kmem_map_size, "LU", "Current kmem allocation size");
static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
- sysctl_kmem_map_free, "LU", "Largest contiguous free range in kmem_map");
+ sysctl_kmem_map_free, "LU", "Free space in kmem");
/*
* The malloc_mtx protects the kmemstatistics linked list.
@@ -253,7 +250,7 @@ sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS
{
u_long size;
- size = kmem_map->size;
+ size = vmem_size(kmem_arena, VMEM_ALLOC);
return (sysctl_handle_long(oidp, &size, 0, req));
}
@@ -262,10 +259,7 @@ sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS
{
u_long size;
- vm_map_lock_read(kmem_map);
- size = kmem_map->root != NULL ? kmem_map->root->max_free :
- kmem_map->max_offset - kmem_map->min_offset;
- vm_map_unlock_read(kmem_map);
+ size = vmem_size(kmem_arena, VMEM_FREE);
return (sysctl_handle_long(oidp, &size, 0, req));
}
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-h...@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"