Upon pool free request from application, Octeon FPA free
does following:
- Uses mbox to reset fpapf pool setup.
- frees fpavf resources.

Signed-off-by: Santosh Shukla <santosh.shu...@caviumnetworks.com>
Signed-off-by: Jerin Jacob <jerin.ja...@caviumnetworks.com>
---
 drivers/mempool/octeontx/octeontx_fpavf.c       | 107 ++++++++++++++++++++++++
 drivers/mempool/octeontx/octeontx_fpavf.h       |   2 +
 drivers/mempool/octeontx/rte_mempool_octeontx.c |  12 ++-
 3 files changed, 120 insertions(+), 1 deletion(-)

diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c 
b/drivers/mempool/octeontx/octeontx_fpavf.c
index 85ddf0a03..bcbbefd7d 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -582,6 +582,113 @@ octeontx_fpa_bufpool_create(unsigned int object_size, 
unsigned int object_count,
        return (uintptr_t)NULL;
 }
 
+/*
+ * Destroy a buffer pool.
+ */
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
+{
+       void **node, **curr, *head = NULL;
+       uint64_t sz;
+       uint64_t cnt, avail;
+       unsigned int gpool;
+       int ret;
+
+       RTE_SET_USED(node_id);
+
+       /* Wait for all outstanding writes to be comitted */
+       rte_smp_wmb();
+
+       if (unlikely(!octeontx_fpa_handle_valid(handle)))
+               return -EINVAL;
+
+       /* get pool */
+       gpool = octeontx_fpa_handle2gpool(handle);
+
+        /* Check for no outstanding buffers */
+       cnt = fpavf_read64((void *)((uintptr_t)handle +
+                                       FPA_VF_VHAURA_CNT(gpool)));
+       if (cnt) {
+               fpavf_log_dbg("buffer exist in pool cnt %ld\n", cnt);
+               return -EBUSY;
+       }
+
+       rte_spinlock_lock(&fpadev.lock);
+
+       avail = fpavf_read64((void *)((uintptr_t)handle +
+                               FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+       /* Prepare to empty the entire POOL */
+       fpavf_write64(avail, (void *)((uintptr_t)handle +
+                        FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+       fpavf_write64(avail + 1, (void *)((uintptr_t)handle +
+                        FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+       /* Empty the pool */
+       /* Invalidate the POOL */
+       octeontx_gpool_free(gpool);
+
+       /* Process all buffers in the pool */
+       while (avail--) {
+
+               /* Yank a buffer from the pool */
+               node = (void *)(uintptr_t)
+                       fpavf_read64((void *)
+                                    (handle + FPA_VF_VHAURA_OP_ALLOC(gpool)));
+
+               if (node == NULL) {
+                       fpavf_log_err("ERROR: GAURA[%u] missing %lu buffers\n",
+                                     gpool, avail);
+                       break;
+               }
+
+               /* Imsert it into an ordered linked list */
+               for (curr = &head; curr[0] != NULL; curr = curr[0]) {
+                       if ((uintptr_t)node <= (uintptr_t)curr[0])
+                               break;
+               }
+               node[0] = curr[0];
+               curr[0] = node;
+       }
+
+       /* Verify the linked list to be a perfect series */
+       sz = octeontx_fpa_bufpool_block_size(handle) << 7;
+       for (curr = head; curr != NULL && curr[0] != NULL;
+               curr = curr[0]) {
+               if (curr == curr[0] ||
+                       (curr != ((void *)((uintptr_t)curr[0] - sz)))) {
+                       fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
+                                     gpool, curr, curr[0]);
+               }
+       }
+
+       /* Disable pool operation */
+       fpavf_write64(~0ul, (void *)((uintptr_t)handle +
+                        FPA_VF_VHPOOL_START_ADDR(gpool)));
+       fpavf_write64(~0ul, (void *)((uintptr_t)handle +
+                       FPA_VF_VHPOOL_END_ADDR(gpool)));
+
+       (void)octeontx_fpapf_pool_destroy(gpool);
+
+       /* Deactivate the AURA */
+       fpavf_write64(0, (void *)((uintptr_t)handle +
+                       FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+       fpavf_write64(0, (void *)((uintptr_t)handle +
+                       FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+       ret = octeontx_fpapf_aura_detach(gpool);
+       if (ret) {
+               fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
+                             gpool, ret);
+       }
+
+       /* Free VF */
+       (void)octeontx_fpavf_free(gpool);
+
+       rte_spinlock_unlock(&fpadev.lock);
+       return 0;
+}
+
 static void
 octeontx_fpavf_setup(void)
 {
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h 
b/drivers/mempool/octeontx/octeontx_fpavf.h
index 3e8a2682f..936276715 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -135,5 +135,7 @@ octeontx_fpa_bufpool_create(unsigned int object_size, 
unsigned int object_count,
                                unsigned int buf_offset, char **va_start,
                                int node);
 int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
+int
 octeontx_fpa_bufpool_block_size(uintptr_t handle);
 #endif /* __OCTEONTX_FPAVF_H__ */
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c 
b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index 73648aa7f..6754a78c0 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -74,10 +74,20 @@ octeontx_fpavf_alloc(struct rte_mempool *mp)
        return rc;
 }
 
+static void
+octeontx_fpavf_free(struct rte_mempool *mp)
+{
+       uintptr_t pool;
+
+       pool = (uintptr_t)mp->pool_id;
+
+       octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
+}
+
 static struct rte_mempool_ops octeontx_fpavf_ops = {
        .name = "octeontx_fpavf",
        .alloc = octeontx_fpavf_alloc,
-       .free = NULL,
+       .free = octeontx_fpavf_free,
        .enqueue = NULL,
        .dequeue = NULL,
        .get_count = NULL,
-- 
2.11.0

Reply via email to