First proper use of the new km_alloc.

 - Change pool constraints to use kmem_pa_mode instead of uvm_constraint_range
 - Use km_alloc for all backend allocations in pools.
 - Use km_alloc for the emergmency kentry allocations in uvm_mapent_alloc
 - Garbage collect uvm_km_getpage, uvm_km_getpage_pla and uvm_km_putpage

Please eyeball and test this.

//art

Index: kern/dma_alloc.c
===================================================================
RCS file: /cvs/src/sys/kern/dma_alloc.c,v
retrieving revision 1.5
diff -u -r1.5 dma_alloc.c
--- kern/dma_alloc.c    2 Apr 2011 17:06:21 -0000       1.5
+++ kern/dma_alloc.c    4 Apr 2011 21:30:57 -0000
@@ -37,7 +37,7 @@
                    1 << (i + DMA_BUCKET_OFFSET));
                pool_init(&dmapools[i], 1 << (i + DMA_BUCKET_OFFSET), 0, 0, 0,
                    dmanames[i], NULL);
-               pool_set_constraints(&dmapools[i], &dma_constraint, 1);
+               pool_set_constraints(&dmapools[i], &kp_dma);
                pool_setipl(&dmapools[i], IPL_VM);
                /* XXX need pool_setlowat(&dmapools[i], dmalowat); */
        }
Index: kern/subr_pool.c
===================================================================
RCS file: /cvs/src/sys/kern/subr_pool.c,v
retrieving revision 1.101
diff -u -r1.101 subr_pool.c
--- kern/subr_pool.c    4 Apr 2011 11:13:55 -0000       1.101
+++ kern/subr_pool.c    4 Apr 2011 21:30:58 -0000
@@ -401,8 +401,7 @@
        }
 
        /* pglistalloc/constraint parameters */
-       pp->pr_crange = &no_constraint;
-       pp->pr_pa_nsegs = 0;
+       pp->pr_crange = &kp_dirty;
 
        /* Insert this into the list of all pools. */
        TAILQ_INSERT_HEAD(&pool_head, pp, pr_poollist);
@@ -1013,18 +1012,9 @@
 }
 
 void
-pool_set_constraints(struct pool *pp, struct uvm_constraint_range *range,
-    int nsegs)
+pool_set_constraints(struct pool *pp, struct kmem_pa_mode *mode)
 {
-       /*
-        * Subsequent changes to the constrictions are only
-        * allowed to make them _more_ strict.
-        */
-       KASSERT(pp->pr_crange->ucr_high >= range->ucr_high &&
-           pp->pr_crange->ucr_low <= range->ucr_low);
-
-       pp->pr_crange = range;
-       pp->pr_pa_nsegs = nsegs;
+       pp->pr_crange = mode;
 }
 
 void
@@ -1495,32 +1485,36 @@
 void *
 pool_page_alloc(struct pool *pp, int flags, int *slowdown)
 {
-       int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT;
+       struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER;
+
+       kd.kd_waitok = (flags & PR_WAITOK);
+       kd.kd_slowdown = slowdown;
 
-       return (uvm_km_getpage_pla(kfl, slowdown, pp->pr_crange->ucr_low,
-           pp->pr_crange->ucr_high, 0, 0));
+       return (km_alloc(PAGE_SIZE, &kv_page, pp->pr_crange, &kd));
 }
 
 void
 pool_page_free(struct pool *pp, void *v)
 {
-       uvm_km_putpage(v);
+       km_free(v, PAGE_SIZE, &kv_page, pp->pr_crange);
 }
 
 void *
 pool_large_alloc(struct pool *pp, int flags, int *slowdown)
 {
-       int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT;
-       vaddr_t va;
+       struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER;
+       void *v;
        int s;
 
+       kd.kd_waitok = (flags & PR_WAITOK);
+       kd.kd_slowdown = slowdown;
+
        s = splvm();
-       va = uvm_km_kmemalloc_pla(kmem_map, NULL, pp->pr_alloc->pa_pagesz, 0,
-           kfl, pp->pr_crange->ucr_low, pp->pr_crange->ucr_high,
-           0, 0, pp->pr_pa_nsegs);
+       v = km_alloc(pp->pr_alloc->pa_pagesz, &kv_intrsafe, pp->pr_crange,
+           &kd);
        splx(s);
 
-       return ((void *)va);
+       return (v);
 }
 
 void
@@ -1529,23 +1523,23 @@
        int s;
 
        s = splvm();
-       uvm_km_free(kmem_map, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
+       km_free(v, pp->pr_alloc->pa_pagesz, &kv_intrsafe, pp->pr_crange);
        splx(s);
 }
 
 void *
 pool_large_alloc_ni(struct pool *pp, int flags, int *slowdown)
 {
-       int kfl = (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT;
+       struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER;
+
+       kd.kd_waitok = (flags & PR_WAITOK);
+       kd.kd_slowdown = slowdown;
 
-       return ((void *)uvm_km_kmemalloc_pla(kernel_map, uvm.kernel_object,
-           pp->pr_alloc->pa_pagesz, 0, kfl,
-           pp->pr_crange->ucr_low, pp->pr_crange->ucr_high,
-           0, 0, pp->pr_pa_nsegs));
+       return (km_alloc(pp->pr_alloc->pa_pagesz, &kv_any, pp->pr_crange, &kd));
 }
 
 void
 pool_large_free_ni(struct pool *pp, void *v)
 {
-       uvm_km_free(kernel_map, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
+       km_free(v, pp->pr_alloc->pa_pagesz, &kv_any, pp->pr_crange);
 }
Index: kern/uipc_mbuf.c
===================================================================
RCS file: /cvs/src/sys/kern/uipc_mbuf.c,v
retrieving revision 1.149
diff -u -r1.149 uipc_mbuf.c
--- kern/uipc_mbuf.c    29 Jan 2011 13:15:39 -0000      1.149
+++ kern/uipc_mbuf.c    4 Apr 2011 21:30:59 -0000
@@ -136,7 +136,7 @@
        int i;
 
        pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL);
-       pool_set_constraints(&mbpool, &dma_constraint, 1);
+       pool_set_constraints(&mbpool, &kp_dma);
        pool_setlowat(&mbpool, mblowat);
 
        for (i = 0; i < nitems(mclsizes); i++) {
@@ -144,7 +144,7 @@
                    mclsizes[i] >> 10);
                pool_init(&mclpools[i], mclsizes[i], 0, 0, 0,
                    mclnames[i], NULL);
-               pool_set_constraints(&mclpools[i], &dma_constraint, 1); 
+               pool_set_constraints(&mclpools[i], &kp_dma); 
                pool_setlowat(&mclpools[i], mcllowat);
        }
 
Index: sys/pool.h
===================================================================
RCS file: /cvs/src/sys/sys/pool.h,v
retrieving revision 1.37
diff -u -r1.37 pool.h
--- sys/pool.h  3 Apr 2011 22:07:37 -0000       1.37
+++ sys/pool.h  4 Apr 2011 21:31:00 -0000
@@ -132,8 +132,7 @@
        unsigned long   pr_nidle;       /* # of idle pages */
 
        /* Physical memory configuration. */
-       struct uvm_constraint_range *pr_crange;
-       int             pr_pa_nsegs;
+       struct kmem_pa_mode *pr_crange;
 };
 
 #ifdef _KERNEL
@@ -148,8 +147,7 @@
 void           pool_sethiwat(struct pool *, int);
 int            pool_sethardlimit(struct pool *, u_int, const char *, int);
 struct uvm_constraint_range; /* XXX */
-void           pool_set_constraints(struct pool *,
-                   struct uvm_constraint_range *, int);
+void           pool_set_constraints(struct pool *, struct kmem_pa_mode *mode);
 void           pool_set_ctordtor(struct pool *, int (*)(void *, void *, int),
                    void(*)(void *, void *), void *);
 
Index: uvm/uvm_extern.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_extern.h,v
retrieving revision 1.92
diff -u -r1.92 uvm_extern.h
--- uvm/uvm_extern.h    4 Apr 2011 11:56:12 -0000       1.92
+++ uvm/uvm_extern.h    4 Apr 2011 21:31:00 -0000
@@ -533,14 +533,6 @@
 vaddr_t                        uvm_km_valloc_align(struct vm_map *, vsize_t, 
vsize_t, int);
 vaddr_t                        uvm_km_valloc_prefer_wait(vm_map_t, vsize_t,
                                        voff_t);
-void                   *uvm_km_getpage_pla(boolean_t, int *, paddr_t, paddr_t,
-                           paddr_t, paddr_t);
-/* Wrapper around old function prototype. */
-#define uvm_km_getpage(waitok, slowdown)                               \
-       uvm_km_getpage_pla(((waitok) ? 0 : UVM_KMF_NOWAIT), (slowdown), \
-           (paddr_t)0, (paddr_t)-1, 0, 0)
-
-void                   uvm_km_putpage(void *);
 
 struct vm_map          *uvm_km_suballoc(vm_map_t, vaddr_t *,
                                vaddr_t *, vsize_t, int,
Index: uvm/uvm_km.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_km.c,v
retrieving revision 1.90
diff -u -r1.90 uvm_km.c
--- uvm/uvm_km.c        4 Apr 2011 12:25:23 -0000       1.90
+++ uvm/uvm_km.c        4 Apr 2011 21:31:01 -0000
@@ -811,88 +811,7 @@
                }
        }
 }
-#endif
 
-void *
-uvm_km_getpage_pla(int flags, int *slowdown, paddr_t low, paddr_t high,
-    paddr_t alignment, paddr_t boundary)
-{
-       struct pglist pgl;
-       int pla_flags;
-       struct vm_page *pg;
-       vaddr_t va;
-
-       *slowdown = 0;
-       pla_flags = (flags & UVM_KMF_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
-       if (flags & UVM_KMF_ZERO)
-               pla_flags |= UVM_PLA_ZERO;
-       TAILQ_INIT(&pgl);
-       if (uvm_pglistalloc(PAGE_SIZE, low, high, alignment, boundary, &pgl,
-           1, pla_flags) != 0)
-               return NULL;
-       pg = TAILQ_FIRST(&pgl);
-       KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL);
-       TAILQ_REMOVE(&pgl, pg, pageq);
-
-#ifdef __HAVE_PMAP_DIRECT
-       va = pmap_map_direct(pg);
-       if (__predict_false(va == 0))
-               uvm_pagefree(pg);
-
-#else  /* !__HAVE_PMAP_DIRECT */
-       mtx_enter(&uvm_km_pages.mtx);
-       while (uvm_km_pages.free == 0) {
-               if (flags & UVM_KMF_NOWAIT) {
-                       mtx_leave(&uvm_km_pages.mtx);
-                       uvm_pagefree(pg);
-                       return NULL;
-               }
-               msleep(&uvm_km_pages.free, &uvm_km_pages.mtx, PVM, "getpage",
-                   0);
-       }
-
-       va = uvm_km_pages.page[--uvm_km_pages.free];
-       if (uvm_km_pages.free < uvm_km_pages.lowat &&
-           curproc != uvm_km_pages.km_proc) {
-               *slowdown = 1;
-               wakeup(&uvm_km_pages.km_proc);
-       }
-       mtx_leave(&uvm_km_pages.mtx);
-
-
-       atomic_setbits_int(&pg->pg_flags, PG_FAKE);
-       UVM_PAGE_OWN(pg, NULL);
-
-       pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW);
-       pmap_update(kernel_map->pmap);
-
-#endif /* !__HAVE_PMAP_DIRECT */
-       return ((void *)va);
-}
-
-void
-uvm_km_putpage(void *v)
-{
-#ifdef __HAVE_PMAP_DIRECT
-       vaddr_t va = (vaddr_t)v;
-       struct vm_page *pg;
-
-       pg = pmap_unmap_direct(va);
-
-       uvm_pagefree(pg);
-#else  /* !__HAVE_PMAP_DIRECT */
-       struct uvm_km_free_page *fp = v;
-
-       mtx_enter(&uvm_km_pages.mtx);
-       fp->next = uvm_km_pages.freelist;
-       uvm_km_pages.freelist = fp;
-       if (uvm_km_pages.freelistlen++ > 16)
-               wakeup(&uvm_km_pages.km_proc);
-       mtx_leave(&uvm_km_pages.mtx);
-#endif /* !__HAVE_PMAP_DIRECT */
-}
-
-#ifndef __HAVE_PMAP_DIRECT
 struct uvm_km_free_page *
 uvm_km_doputpage(struct uvm_km_free_page *fp)
 {
Index: uvm/uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.131
diff -u -r1.131 uvm_map.c
--- uvm/uvm_map.c       24 Dec 2010 21:49:04 -0000      1.131
+++ uvm/uvm_map.c       4 Apr 2011 21:31:04 -0000
@@ -396,7 +396,7 @@
 {
        struct vm_map_entry *me, *ne;
        int s, i;
-       int slowdown, pool_flags;
+       int pool_flags;
        UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
 
        pool_flags = PR_WAITOK;
@@ -408,7 +408,8 @@
                simple_lock(&uvm.kentry_lock);
                me = uvm.kentry_free;
                if (me == NULL) {
-                       ne = uvm_km_getpage(0, &slowdown);
+                       ne = km_alloc(PAGE_SIZE, &kv_page, &kp_dirty,
+                           &kd_nowait);
                        if (ne == NULL)
                                panic("uvm_mapent_alloc: cannot allocate map "
                                    "entry");

Reply via email to