Author: np
Date: Fri May 27 21:26:26 2016
New Revision: 300875
URL: https://svnweb.freebsd.org/changeset/base/300875

Log:
  iw_cxgbe: Use vmem(9) to manage PBL and RQT allocations.
  
  Submitted by: Krishnamraju Eraparaju at Chelsio
  Reviewed by:  Steve Wise
  Sponsored by: Chelsio Communications

Modified:
  head/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
  head/sys/dev/cxgbe/iw_cxgbe/resource.c

Modified: head/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
==============================================================================
--- head/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h      Fri May 27 21:12:25 2016        
(r300874)
+++ head/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h      Fri May 27 21:26:26 2016        
(r300875)
@@ -45,6 +45,7 @@
 #include <linux/kref.h>
 #include <linux/timer.h>
 #include <linux/io.h>
+#include <sys/vmem.h>
 
 #include <asm/byteorder.h>
 
@@ -144,8 +145,8 @@ struct c4iw_rdev {
        unsigned long cqshift;
        u32 cqmask;
        struct c4iw_dev_ucontext uctx;
-       struct gen_pool *pbl_pool;
-       struct gen_pool *rqt_pool;
+       vmem_t          *rqt_arena;
+       vmem_t          *pbl_arena;
        u32 flags;
        struct c4iw_stats stats;
 };
@@ -929,75 +930,6 @@ extern struct cxgb4_client t4c_client;
 extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
 extern int c4iw_max_read_depth;
 
-#include <sys/blist.h>
-struct gen_pool {
-        blist_t         gen_list;
-        daddr_t         gen_base;
-        int             gen_chunk_shift;
-        struct mutex      gen_lock;
-};
-
-static __inline struct gen_pool *
-gen_pool_create(daddr_t base, u_int chunk_shift, u_int len)
-{
-        struct gen_pool *gp;
-
-        gp = malloc(sizeof(struct gen_pool), M_DEVBUF, M_NOWAIT);
-        if (gp == NULL)
-                return (NULL);
-
-        memset(gp, 0, sizeof(struct gen_pool));
-        gp->gen_list = blist_create(len >> chunk_shift, M_NOWAIT);
-        if (gp->gen_list == NULL) {
-                free(gp, M_DEVBUF);
-                return (NULL);
-        }
-        blist_free(gp->gen_list, 0, len >> chunk_shift);
-        gp->gen_base = base;
-        gp->gen_chunk_shift = chunk_shift;
-        //mutex_init(&gp->gen_lock, "genpool", NULL, MTX_DUPOK|MTX_DEF);
-        mutex_init(&gp->gen_lock);
-
-        return (gp);
-}
-
-static __inline unsigned long
-gen_pool_alloc(struct gen_pool *gp, int size)
-{
-        int chunks;
-        daddr_t blkno;
-
-        chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
-        mutex_lock(&gp->gen_lock);
-        blkno = blist_alloc(gp->gen_list, chunks);
-        mutex_unlock(&gp->gen_lock);
-
-        if (blkno == SWAPBLK_NONE)
-                return (0);
-
-        return (gp->gen_base + ((1 << gp->gen_chunk_shift) * blkno));
-}
-
-static __inline void
-gen_pool_free(struct gen_pool *gp, daddr_t address, int size)
-{
-        int chunks;
-        daddr_t blkno;
-
-        chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
-        blkno = (address - gp->gen_base) / (1 << gp->gen_chunk_shift);
-        mutex_lock(&gp->gen_lock);
-        blist_free(gp->gen_list, blkno, chunks);
-        mutex_unlock(&gp->gen_lock);
-}
-
-static __inline void
-gen_pool_destroy(struct gen_pool *gp)
-{
-        blist_destroy(gp->gen_list);
-        free(gp, M_DEVBUF);
-}
-
 #if defined(__i386__) || defined(__amd64__)
 #define L1_CACHE_BYTES 128
 #else

Modified: head/sys/dev/cxgbe/iw_cxgbe/resource.c
==============================================================================
--- head/sys/dev/cxgbe/iw_cxgbe/resource.c      Fri May 27 21:12:25 2016        
(r300874)
+++ head/sys/dev/cxgbe/iw_cxgbe/resource.c      Fri May 27 21:26:26 2016        
(r300875)
@@ -248,13 +248,17 @@ void c4iw_destroy_resource(struct c4iw_r
        c4iw_id_table_free(&rscp->pdid_table);
 }
 
-/* PBL Memory Manager.  Uses Linux generic allocator. */
+/* PBL Memory Manager. */
 
-#define MIN_PBL_SHIFT 8                        /* 256B == min PBL size (32 
entries) */
+#define MIN_PBL_SHIFT 5                        /* 32B == min PBL size (4 
entries) */
 
 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
 {
-       unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
+       unsigned long addr;
+
+       vmem_xalloc(rdev->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
+                       4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
+                       M_FIRSTFIT|M_NOWAIT, &addr);
        CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size);
        mutex_lock(&rdev->stats.lock);
        if (addr) {
@@ -273,15 +277,16 @@ void c4iw_pblpool_free(struct c4iw_rdev 
        mutex_lock(&rdev->stats.lock);
        rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
        mutex_unlock(&rdev->stats.lock);
-       gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+       vmem_xfree(rdev->pbl_arena, addr, roundup(size,(1 << MIN_PBL_SHIFT)));
 }
 
 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
 {
-       rdev->pbl_pool = gen_pool_create(rdev->adap->vres.pbl.start,
-                                        MIN_PBL_SHIFT,
-                                        rdev->adap->vres.pbl.size);
-       if (!rdev->pbl_pool)
+       rdev->pbl_arena = vmem_create("PBL_MEM_POOL",
+                                       rdev->adap->vres.pbl.start,
+                                       rdev->adap->vres.pbl.size,
+                                       1, 0, M_FIRSTFIT| M_NOWAIT);
+       if (!rdev->pbl_arena)
                return -ENOMEM;
 
        return 0;
@@ -289,16 +294,21 @@ int c4iw_pblpool_create(struct c4iw_rdev
 
 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
 {
-       gen_pool_destroy(rdev->pbl_pool);
+       vmem_destroy(rdev->pbl_arena);
 }
 
-/* RQT Memory Manager.  Uses Linux generic allocator. */
+/* RQT Memory Manager. */
 
 #define MIN_RQT_SHIFT 10       /* 1KB == min RQT size (16 entries) */
 
 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
 {
-       unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
+       unsigned long addr;
+
+       vmem_xalloc(rdev->rqt_arena,
+                       roundup((size << 6),(1 << MIN_RQT_SHIFT)),
+                       4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
+                       M_FIRSTFIT|M_NOWAIT, &addr);
        CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr,
            size << 6);
        if (!addr)
@@ -321,15 +331,17 @@ void c4iw_rqtpool_free(struct c4iw_rdev 
        mutex_lock(&rdev->stats.lock);
        rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
        mutex_unlock(&rdev->stats.lock);
-       gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+       vmem_xfree(rdev->rqt_arena, addr,
+                      roundup((size << 6),(1 << MIN_RQT_SHIFT)));
 }
 
 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
 {
-       rdev->rqt_pool = gen_pool_create(rdev->adap->vres.rq.start,
-                                        MIN_RQT_SHIFT,
-                                        rdev->adap->vres.rq.size);
-       if (!rdev->rqt_pool)
+       rdev->rqt_arena = vmem_create("RQT_MEM_POOL",
+                                       rdev->adap->vres.rq.start,
+                                       rdev->adap->vres.rq.size,
+                                       1, 0, M_FIRSTFIT| M_NOWAIT);
+       if (!rdev->rqt_arena)
                return -ENOMEM;
 
        return 0;
@@ -337,6 +349,6 @@ int c4iw_rqtpool_create(struct c4iw_rdev
 
 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
 {
-       gen_pool_destroy(rdev->rqt_pool);
+       vmem_destroy(rdev->rqt_arena);
 }
 #endif
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to