This patch adds API support to create and free DMA adapter.

Signed-off-by: Amit Prakash Shukla <amitpraka...@marvell.com>
---
 config/rte_config.h                  |   1 +
 lib/eventdev/meson.build             |   1 +
 lib/eventdev/rte_event_dma_adapter.c | 335 +++++++++++++++++++++++++++
 3 files changed, 337 insertions(+)
 create mode 100644 lib/eventdev/rte_event_dma_adapter.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 400e44e3cf..401727703f 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -77,6 +77,7 @@
 #define RTE_EVENT_ETH_INTR_RING_SIZE 1024
 #define RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE 32
 #define RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE 32
+#define RTE_EVENT_DMA_ADAPTER_MAX_INSTANCE 32
 
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
diff --git a/lib/eventdev/meson.build b/lib/eventdev/meson.build
index b46bbbc9aa..250abcb154 100644
--- a/lib/eventdev/meson.build
+++ b/lib/eventdev/meson.build
@@ -17,6 +17,7 @@ sources = files(
         'eventdev_private.c',
         'eventdev_trace_points.c',
         'rte_event_crypto_adapter.c',
+        'rte_event_dma_adapter.c',
         'rte_event_eth_rx_adapter.c',
         'rte_event_eth_tx_adapter.c',
         'rte_event_ring.c',
diff --git a/lib/eventdev/rte_event_dma_adapter.c 
b/lib/eventdev/rte_event_dma_adapter.c
new file mode 100644
index 0000000000..c7ffba1b47
--- /dev/null
+++ b/lib/eventdev/rte_event_dma_adapter.c
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Marvell.
+ */
+
+#include "rte_eventdev.h"
+#include "eventdev_pmd.h"
+#include "rte_event_dma_adapter.h"
+
+#define DMA_BATCH_SIZE 32
+#define DMA_DEFAULT_MAX_NB 128
+#define DMA_ADAPTER_NAME_LEN 32
+#define DMA_ADAPTER_BUFFER_SIZE 1024
+
+#define DMA_ADAPTER_OPS_BUFFER_SIZE (DMA_BATCH_SIZE + DMA_BATCH_SIZE)
+
+#define DMA_ADAPTER_ARRAY "event_dma_adapter_array"
+
+/* Macros to check for valid adapter */
+#define EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \
+       do { \
+               if (!edma_adapter_valid_id(id)) { \
+                       RTE_EDEV_LOG_ERR("Invalid DMA adapter id = %d\n", id); \
+                       return retval; \
+               } \
+       } while (0)
+
+/* DMA ops circular buffer */
+struct dma_ops_circular_buffer {
+       /* Index of head element */
+       uint16_t head;
+
+       /* Index of tail element */
+       uint16_t tail;
+
+       /* Number of elements in buffer */
+       uint16_t count;
+
+       /* Size of circular buffer */
+       uint16_t size;
+
+       /* Pointer to hold rte_event_dma_adapter_op for processing */
+       struct rte_event_dma_adapter_op **op_buffer;
+} __rte_cache_aligned;
+
+/* DMA device information */
+struct dma_device_info {
+       /* Number of vchans configured for a DMA device. */
+       uint16_t num_dma_dev_vchan;
+} __rte_cache_aligned;
+
+struct event_dma_adapter {
+       /* Event device identifier */
+       uint8_t eventdev_id;
+
+       /* Event port identifier */
+       uint8_t event_port_id;
+
+       /* Adapter mode */
+       enum rte_event_dma_adapter_mode mode;
+
+       /* Memory allocation name */
+       char mem_name[DMA_ADAPTER_NAME_LEN];
+
+       /* Socket identifier cached from eventdev */
+       int socket_id;
+
+       /* Lock to serialize config updates with service function */
+       rte_spinlock_t lock;
+
+       /* DMA device structure array */
+       struct dma_device_info *dma_devs;
+
+       /* Circular buffer for processing DMA ops to eventdev */
+       struct dma_ops_circular_buffer ebuf;
+
+       /* Configuration callback for rte_service configuration */
+       rte_event_dma_adapter_conf_cb conf_cb;
+
+       /* Configuration callback argument */
+       void *conf_arg;
+
+       /* Set if  default_cb is being used */
+       int default_cb_arg;
+} __rte_cache_aligned;
+
+static struct event_dma_adapter **event_dma_adapter;
+
+static inline int
+edma_adapter_valid_id(uint8_t id)
+{
+       return id < RTE_EVENT_DMA_ADAPTER_MAX_INSTANCE;
+}
+
+static inline struct event_dma_adapter *
+edma_id_to_adapter(uint8_t id)
+{
+       return event_dma_adapter ? event_dma_adapter[id] : NULL;
+}
+
+static int
+edma_array_init(void)
+{
+       const struct rte_memzone *mz;
+       uint32_t sz;
+
+       mz = rte_memzone_lookup(DMA_ADAPTER_ARRAY);
+       if (mz == NULL) {
+               sz = sizeof(struct event_dma_adapter *) * 
RTE_EVENT_DMA_ADAPTER_MAX_INSTANCE;
+               sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+
+               mz = rte_memzone_reserve_aligned(DMA_ADAPTER_ARRAY, sz, 
rte_socket_id(), 0,
+                                                RTE_CACHE_LINE_SIZE);
+               if (mz == NULL) {
+                       RTE_EDEV_LOG_ERR("Failed to reserve memzone : %s, err = 
%d",
+                                        DMA_ADAPTER_ARRAY, rte_errno);
+                       return -rte_errno;
+               }
+       }
+
+       event_dma_adapter = mz->addr;
+
+       return 0;
+}
+
+static inline int
+edma_circular_buffer_init(const char *name, struct dma_ops_circular_buffer 
*buf, uint16_t sz)
+{
+       buf->op_buffer = rte_zmalloc(name, sizeof(struct 
rte_event_dma_adapter_op *) * sz, 0);
+       if (buf->op_buffer == NULL)
+               return -ENOMEM;
+
+       buf->size = sz;
+
+       return 0;
+}
+
+static inline void
+edma_circular_buffer_free(struct dma_ops_circular_buffer *buf)
+{
+       rte_free(buf->op_buffer);
+}
+
+static int
+edma_default_config_cb(uint8_t id, uint8_t evdev_id, struct 
rte_event_dma_adapter_conf *conf,
+                      void *arg)
+{
+       struct rte_event_port_conf *port_conf;
+       struct rte_event_dev_config dev_conf;
+       struct event_dma_adapter *adapter;
+       struct rte_eventdev *dev;
+       uint8_t port_id;
+       int started;
+       int ret;
+
+       adapter = edma_id_to_adapter(id);
+       if (adapter == NULL)
+               return -EINVAL;
+
+       dev = &rte_eventdevs[adapter->eventdev_id];
+       dev_conf = dev->data->dev_conf;
+
+       started = dev->data->dev_started;
+       if (started)
+               rte_event_dev_stop(evdev_id);
+
+       port_id = dev_conf.nb_event_ports;
+       dev_conf.nb_event_ports += 1;
+
+       port_conf = arg;
+       if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
+               dev_conf.nb_single_link_event_port_queues += 1;
+
+       ret = rte_event_dev_configure(evdev_id, &dev_conf);
+       if (ret) {
+               RTE_EDEV_LOG_ERR("Failed to configure event dev %u\n", 
evdev_id);
+               if (started) {
+                       if (rte_event_dev_start(evdev_id))
+                               return -EIO;
+               }
+               return ret;
+       }
+
+       ret = rte_event_port_setup(evdev_id, port_id, port_conf);
+       if (ret) {
+               RTE_EDEV_LOG_ERR("Failed to setup event port %u\n", port_id);
+               return ret;
+       }
+
+       conf->event_port_id = port_id;
+       conf->max_nb = DMA_DEFAULT_MAX_NB;
+       if (started)
+               ret = rte_event_dev_start(evdev_id);
+
+       adapter->default_cb_arg = 1;
+       adapter->event_port_id = conf->event_port_id;
+
+       return ret;
+}
+
+int
+rte_event_dma_adapter_create_ext(uint8_t id, uint8_t evdev_id,
+                                rte_event_dma_adapter_conf_cb conf_cb,
+                                enum rte_event_dma_adapter_mode mode, void 
*conf_arg)
+{
+       struct rte_event_dev_info dev_info;
+       struct event_dma_adapter *adapter;
+       char name[DMA_ADAPTER_NAME_LEN];
+       struct rte_dma_info info;
+       uint16_t num_dma_dev;
+       int socket_id;
+       uint8_t i;
+       int ret;
+
+       EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+       RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(evdev_id, -EINVAL);
+
+       if (conf_cb == NULL)
+               return -EINVAL;
+
+       if (event_dma_adapter == NULL) {
+               ret = edma_array_init();
+               if (ret)
+                       return ret;
+       }
+
+       adapter = edma_id_to_adapter(id);
+       if (adapter != NULL) {
+               RTE_EDEV_LOG_ERR("ML adapter ID %d already exists!", id);
+               return -EEXIST;
+       }
+
+       socket_id = rte_event_dev_socket_id(evdev_id);
+       snprintf(name, DMA_ADAPTER_NAME_LEN, "rte_event_dma_adapter_%d", id);
+       adapter = rte_zmalloc_socket(name, sizeof(struct event_dma_adapter), 
RTE_CACHE_LINE_SIZE,
+                                    socket_id);
+       if (adapter == NULL) {
+               RTE_EDEV_LOG_ERR("Failed to get mem for event ML adapter!");
+               return -ENOMEM;
+       }
+
+       if (edma_circular_buffer_init("edma_circular_buffer", &adapter->ebuf,
+                                     DMA_ADAPTER_BUFFER_SIZE)) {
+               RTE_EDEV_LOG_ERR("Failed to get memory for event adapter 
circular buffer");
+               rte_free(adapter);
+               return -ENOMEM;
+       }
+
+       ret = rte_event_dev_info_get(evdev_id, &dev_info);
+       if (ret < 0) {
+               RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s", 
evdev_id,
+                                dev_info.driver_name);
+               edma_circular_buffer_free(&adapter->ebuf);
+               rte_free(adapter);
+               return ret;
+       }
+
+       num_dma_dev = rte_dma_count_avail();
+
+       adapter->eventdev_id = evdev_id;
+       adapter->mode = mode;
+       strcpy(adapter->mem_name, name);
+       adapter->socket_id = socket_id;
+       adapter->conf_cb = conf_cb;
+       adapter->conf_arg = conf_arg;
+       adapter->dma_devs = rte_zmalloc_socket(adapter->mem_name,
+                                              num_dma_dev * sizeof(struct 
dma_device_info), 0,
+                                              socket_id);
+       if (adapter->dma_devs == NULL) {
+               RTE_EDEV_LOG_ERR("Failed to get memory for DMA devices\n");
+               edma_circular_buffer_free(&adapter->ebuf);
+               rte_free(adapter);
+               return -ENOMEM;
+       }
+
+       rte_spinlock_init(&adapter->lock);
+       for (i = 0; i < num_dma_dev; i++) {
+               ret = rte_dma_info_get(i, &info);
+               if (ret) {
+                       RTE_EDEV_LOG_ERR("Failed to get dma device info\n");
+                       edma_circular_buffer_free(&adapter->ebuf);
+                       rte_free(adapter);
+                       return ret;
+               }
+
+               adapter->dma_devs[i].num_dma_dev_vchan = info.nb_vchans;
+       }
+
+       event_dma_adapter[id] = adapter;
+
+       return 0;
+}
+
+int
+rte_event_dma_adapter_create(uint8_t id, uint8_t evdev_id, struct 
rte_event_port_conf *port_config,
+                           enum rte_event_dma_adapter_mode mode)
+{
+       struct rte_event_port_conf *pc;
+       int ret;
+
+       if (port_config == NULL)
+               return -EINVAL;
+
+       EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+       pc = rte_malloc(NULL, sizeof(struct rte_event_port_conf), 0);
+       if (pc == NULL)
+               return -ENOMEM;
+
+       rte_memcpy(pc, port_config, sizeof(struct rte_event_port_conf));
+       ret = rte_event_dma_adapter_create_ext(id, evdev_id, 
edma_default_config_cb, mode, pc);
+       if (ret != 0)
+               rte_free(pc);
+
+       return ret;
+}
+
+int
+rte_event_dma_adapter_free(uint8_t id)
+{
+       struct event_dma_adapter *adapter;
+
+       EVENT_DMA_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+       adapter = edma_id_to_adapter(id);
+       if (adapter == NULL)
+               return -EINVAL;
+
+       rte_free(adapter->conf_arg);
+       rte_free(adapter->dma_devs);
+       edma_circular_buffer_free(&adapter->ebuf);
+       rte_free(adapter);
+       event_dma_adapter[id] = NULL;
+
+       return 0;
+}
-- 
2.25.1

Reply via email to