The skeleton driver facilitates, bootstrapping the new eventdev driver and creates a platform to verify the northbound eventdev common code.
The driver supports both VDEV and PCI based eventdev devices. Signed-off-by: Jerin Jacob <jerin.jacob at caviumnetworks.com> --- MAINTAINERS | 1 + config/common_base | 8 + drivers/Makefile | 1 + drivers/event/Makefile | 36 ++ drivers/event/skeleton/Makefile | 55 +++ .../skeleton/rte_pmd_skeleton_event_version.map | 4 + drivers/event/skeleton/skeleton_eventdev.c | 535 +++++++++++++++++++++ drivers/event/skeleton/skeleton_eventdev.h | 72 +++ mk/rte.app.mk | 4 + 9 files changed, 716 insertions(+) create mode 100644 drivers/event/Makefile create mode 100644 drivers/event/skeleton/Makefile create mode 100644 drivers/event/skeleton/rte_pmd_skeleton_event_version.map create mode 100644 drivers/event/skeleton/skeleton_eventdev.c create mode 100644 drivers/event/skeleton/skeleton_eventdev.h diff --git a/MAINTAINERS b/MAINTAINERS index e430ca7..c594a23 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -252,6 +252,7 @@ F: examples/l2fwd-crypto/ Eventdev API - EXPERIMENTAL M: Jerin Jacob <jerin.jacob at caviumnetworks.com> F: lib/librte_eventdev/ +F: drivers/event/skeleton/ Networking Drivers ------------------ diff --git a/config/common_base b/config/common_base index 7a8814e..35aef0a 100644 --- a/config/common_base +++ b/config/common_base @@ -417,6 +417,14 @@ CONFIG_RTE_LIBRTE_EVENTDEV=y CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n CONFIG_RTE_EVENT_MAX_DEVS=16 CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64 + +# +# Compile PMD for skeleton event device +# +CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV=y +CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG=n + +# # Compile librte_ring # CONFIG_RTE_LIBRTE_RING=y diff --git a/drivers/Makefile b/drivers/Makefile index 81c03a8..40b8347 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -33,5 +33,6 @@ include $(RTE_SDK)/mk/rte.vars.mk DIRS-y += net DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto +DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/event/Makefile b/drivers/event/Makefile new file mode 100644 index 0000000..678279f --- /dev/null +++ b/drivers/event/Makefile @@ -0,0 +1,36 @@ +# BSD LICENSE +# +# Copyright(c) 2016 Cavium networks. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Cavium networks nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/event/skeleton/Makefile b/drivers/event/skeleton/Makefile new file mode 100644 index 0000000..e557f6d --- /dev/null +++ b/drivers/event/skeleton/Makefile @@ -0,0 +1,55 @@ +# BSD LICENSE +# +# Copyright(c) 2016 Cavium Networks. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Cavium Networks nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_skeleton_event.a + +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_skeleton_event_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton_eventdev.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += lib/librte_eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += lib/librte_event + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/event/skeleton/rte_pmd_skeleton_event_version.map b/drivers/event/skeleton/rte_pmd_skeleton_event_version.map new file mode 100644 index 0000000..31eca32 --- /dev/null +++ b/drivers/event/skeleton/rte_pmd_skeleton_event_version.map @@ -0,0 +1,4 @@ +DPDK_17.02 { + + local: *; +}; diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c new file mode 100644 index 0000000..da9f444 --- /dev/null +++ b/drivers/event/skeleton/skeleton_eventdev.c @@ -0,0 +1,535 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <assert.h> +#include <stdio.h> +#include <stdbool.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> + +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_debug.h> +#include <rte_dev.h> +#include <rte_eal.h> +#include <rte_log.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_malloc.h> +#include <rte_pci.h> +#include <rte_lcore.h> +#include <rte_vdev.h> + +#include "skeleton_eventdev.h" + +static int +skeleton_eventdev_enqueue(void *port, struct rte_event *ev) +{ + struct skeleton_port *sp = port; + + RTE_SET_USED(sp); + RTE_SET_USED(ev); + RTE_SET_USED(port); + + return -ENOTSUP; +} + +static uint16_t +skeleton_eventdev_enqueue_burst(void *port, struct rte_event ev[], + uint16_t nb_events) +{ + struct skeleton_port *sp = port; + + RTE_SET_USED(sp); + RTE_SET_USED(ev); + RTE_SET_USED(port); + RTE_SET_USED(nb_events); + + return 0; +} + +static bool +skeleton_eventdev_dequeue(void *port, struct rte_event *ev, uint64_t wait) +{ + struct skeleton_port *sp = port; + + RTE_SET_USED(sp); + RTE_SET_USED(ev); + RTE_SET_USED(wait); + + return 0; +} + +static uint16_t +skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[], + uint16_t nb_events, uint64_t wait) +{ + struct skeleton_port *sp = port; + + RTE_SET_USED(sp); + RTE_SET_USED(ev); + RTE_SET_USED(nb_events); + RTE_SET_USED(wait); + + return 0; +} + +static void +skeleton_eventdev_info_get(struct rte_eventdev *dev, + struct rte_event_dev_info *dev_info) +{ + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(skel); + + dev_info->min_dequeue_wait_ns = 1; + dev_info->max_dequeue_wait_ns = 10000; + dev_info->dequeue_wait_ns = 25; + dev_info->max_event_queues = 64; + dev_info->max_event_queue_flows = (1ULL << 20); + dev_info->max_event_queue_priority_levels = 8; + dev_info->max_event_priority_levels = 8; + dev_info->max_event_ports = 32; + dev_info->max_event_port_dequeue_depth = 16; + dev_info->max_event_port_enqueue_depth = 16; + dev_info->max_num_events = (1ULL << 20); + dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | + RTE_EVENT_DEV_CAP_EVENT_QOS; +} + +static int +skeleton_eventdev_configure(struct rte_eventdev *dev) +{ + struct rte_eventdev_data *data = dev->data; + struct rte_event_dev_config *conf = &data->dev_conf; + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(conf); + RTE_SET_USED(skel); + + PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id); + return 0; +} + +static int +skeleton_eventdev_start(struct rte_eventdev *dev) +{ + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(skel); + + return 0; +} + +static void +skeleton_eventdev_stop(struct rte_eventdev *dev) +{ + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(skel); +} + +static int +skeleton_eventdev_close(struct rte_eventdev *dev) +{ + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(skel); + + return 0; +} + +static void +skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, + struct rte_event_queue_conf *queue_conf) +{ + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(skel); + RTE_SET_USED(queue_id); + + queue_conf->nb_atomic_flows = (1ULL << 20); + queue_conf->nb_atomic_order_sequences = (1ULL << 20); + queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_DEFAULT; + queue_conf->priority = RTE_EVENT_QUEUE_PRIORITY_NORMAL; +} + +static void +skeleton_eventdev_queue_release(void *queue) +{ + struct skeleton_queue *sq = queue; + PMD_DRV_FUNC_TRACE(); + + rte_free(sq); +} + +static int +skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, + struct rte_event_queue_conf *queue_conf) +{ + struct skeleton_queue *sq; + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(skel); + RTE_SET_USED(queue_conf); + + /* Free memory prior to re-allocation if needed */ + if (dev->data->queues[queue_id] != NULL) { + PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d", + queue_id); + skeleton_eventdev_queue_release(dev->data->queues[queue_id]); + dev->data->queues[queue_id] = NULL; + } + + /* Allocate event queue memory */ + sq = rte_zmalloc_socket("eventdev queue", + sizeof(struct skeleton_queue), RTE_CACHE_LINE_SIZE, + dev->data->socket_id); + if (sq == NULL) { + PMD_DRV_ERR("Failed to allocate sq queue_id=%d", queue_id); + return -ENOMEM; + } + + sq->queue_id = queue_id; + + PMD_DRV_LOG(DEBUG, "[%d] sq=%p", queue_id, sq); + + dev->data->queues[queue_id] = sq; + return 0; +} + +static void +skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, + struct rte_event_port_conf *port_conf) +{ + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(skel); + RTE_SET_USED(port_id); + + port_conf->new_event_threshold = 32 * 1024; + port_conf->dequeue_depth = 16; + port_conf->enqueue_depth = 16; +} + +static void +skeleton_eventdev_port_release(void *port) +{ + struct skeleton_port *sp = port; + PMD_DRV_FUNC_TRACE(); + + rte_free(sp); +} + +static int +skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id, + struct rte_event_port_conf *port_conf) +{ + struct skeleton_port *sp; + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(skel); + RTE_SET_USED(port_conf); + + /* Free memory prior to re-allocation if needed */ + if (dev->data->ports[port_id] != NULL) { + PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d", + port_id); + skeleton_eventdev_port_release(dev->data->ports[port_id]); + dev->data->ports[port_id] = NULL; + } + + /* Allocate event port memory */ + sp = rte_zmalloc_socket("eventdev port", + sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE, + dev->data->socket_id); + if (sp == NULL) { + PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id); + return -ENOMEM; + } + + sp->port_id = port_id; + + PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp); + + dev->data->ports[port_id] = sp; + return 0; +} + +static int +skeleton_eventdev_port_link(void *port, struct rte_event_queue_link link[], + uint16_t nb_links) +{ + struct skeleton_port *sp = port; + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(sp); + RTE_SET_USED(link); + + /* Linked all the queues */ + return (int)nb_links; +} + +static int +skeleton_eventdev_port_unlink(void *port, uint8_t queues[], + uint16_t nb_unlinks) +{ + struct skeleton_port *sp = port; + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(sp); + RTE_SET_USED(queues); + + /* Unlinked all the queues */ + return (int)nb_unlinks; + +} + +static void +skeleton_eventdev_wait_time(struct rte_eventdev *dev, uint64_t ns, + uint64_t *wait_ticks) +{ + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + uint32_t scale = 1; + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(skel); + *wait_ticks = ns * scale; +} + +static void +skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f) +{ + struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); + + PMD_DRV_FUNC_TRACE(); + + RTE_SET_USED(skel); + RTE_SET_USED(f); +} + + +/* Initialize and register event driver with DPDK Application */ +static const struct rte_eventdev_ops skeleton_eventdev_ops = { + .dev_infos_get = skeleton_eventdev_info_get, + .dev_configure = skeleton_eventdev_configure, + .dev_start = skeleton_eventdev_start, + .dev_stop = skeleton_eventdev_stop, + .dev_close = skeleton_eventdev_close, + .queue_def_conf = skeleton_eventdev_queue_def_conf, + .queue_setup = skeleton_eventdev_queue_setup, + .queue_release = skeleton_eventdev_queue_release, + .port_def_conf = skeleton_eventdev_port_def_conf, + .port_setup = skeleton_eventdev_port_setup, + .port_release = skeleton_eventdev_port_release, + .port_link = skeleton_eventdev_port_link, + .port_unlink = skeleton_eventdev_port_unlink, + .wait_time = skeleton_eventdev_wait_time, + .dump = skeleton_eventdev_dump +}; + +static int +skeleton_eventdev_init(struct rte_eventdev *eventdev) +{ + struct rte_pci_device *pci_dev; + struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev); + int ret = 0; + + PMD_DRV_FUNC_TRACE(); + + eventdev->dev_ops = &skeleton_eventdev_ops; + eventdev->schedule = NULL; + eventdev->enqueue = skeleton_eventdev_enqueue; + eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst; + eventdev->dequeue = skeleton_eventdev_dequeue; + eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst; + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + pci_dev = eventdev->pci_dev; + + skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; + if (!skel->reg_base) { + PMD_DRV_ERR("Failed to map BAR0"); + ret = -ENODEV; + goto fail; + } + + skel->device_id = pci_dev->id.device_id; + skel->vendor_id = pci_dev->id.vendor_id; + skel->subsystem_device_id = pci_dev->id.subsystem_device_id; + skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + + PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u", + pci_dev->id.vendor_id, pci_dev->id.device_id, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + + PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)", + eventdev->data->dev_id, eventdev->data->socket_id, + skel->vendor_id, skel->device_id); + +fail: + return ret; +} + +/* PCI based event device */ + +#define EVENTDEV_SKEL_VENDOR_ID 0x177d +#define EVENTDEV_SKEL_PRODUCT_ID 0x0001 + +static const struct rte_pci_id pci_id_skeleton_map[] = { + { + RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID, + EVENTDEV_SKEL_PRODUCT_ID) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_eventdev_driver pci_eventdev_skeleton_pmd = { + .pci_drv = { + .id_table = pci_id_skeleton_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = rte_eventdev_pmd_pci_probe, + .remove = rte_eventdev_pmd_pci_remove, + }, + .eventdev_init = skeleton_eventdev_init, + .dev_private_size = sizeof(struct skeleton_eventdev), +}; + +RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map); + +/* VDEV based event device */ + +/** + * Global static parameter used to create a unique name for each skeleton + * event device. + */ +static unsigned int skeleton_unique_id; + +static inline int +skeleton_create_unique_device_name(char *name, size_t size) +{ + int ret; + + if (name == NULL) + return -EINVAL; + + ret = snprintf(name, size, "%s_%u", RTE_STR(EVENTDEV_NAME_SKELETON_PMD), + skeleton_unique_id++); + if (ret < 0) + return ret; + return 0; +} + +static int +skeleton_eventdev_create(int socket_id) +{ + struct rte_eventdev *eventdev; + char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN]; + + /* Create a unique device name */ + if (skeleton_create_unique_device_name(eventdev_name, + RTE_EVENTDEV_NAME_MAX_LEN) != 0) { + PMD_DRV_ERR("Failed to create unique eventdev name"); + return -EINVAL; + } + + eventdev = rte_eventdev_pmd_vdev_init(eventdev_name, + sizeof(struct skeleton_eventdev), socket_id); + if (eventdev == NULL) { + PMD_DRV_ERR("Failed to create eventdev vdev"); + goto fail; + } + + eventdev->dev_ops = &skeleton_eventdev_ops; + eventdev->schedule = NULL; + eventdev->enqueue = skeleton_eventdev_enqueue; + eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst; + eventdev->dequeue = skeleton_eventdev_dequeue; + eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst; + + return 0; +fail: + return -EFAULT; +} + +static int +skeleton_eventdev_probe(const char *name, __rte_unused const char *input_args) +{ + RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d", name, + rte_socket_id()); + return skeleton_eventdev_create(rte_socket_id()); +} + +static int +skeleton_eventdev_remove(const char *name) +{ + if (name == NULL) + return -EINVAL; + + PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id()); + + return 0; +} + +static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = { + .probe = skeleton_eventdev_probe, + .remove = skeleton_eventdev_remove +}; + +RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd); diff --git a/drivers/event/skeleton/skeleton_eventdev.h b/drivers/event/skeleton/skeleton_eventdev.h new file mode 100644 index 0000000..872ba01 --- /dev/null +++ b/drivers/event/skeleton/skeleton_eventdev.h @@ -0,0 +1,72 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SKELETON_EVENTDEV_H__ +#define __SKELETON_EVENTDEV_H__ + +#include <rte_eventdev_pmd.h> + +#ifdef RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG +#define PMD_DRV_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>") +#else +#define PMD_DRV_LOG(level, fmt, args...) do { } while (0) +#define PMD_DRV_FUNC_TRACE() do { } while (0) +#endif + +#define PMD_DRV_ERR(fmt, args...) \ + RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ## args) + +struct skeleton_eventdev { + uintptr_t reg_base; + uint16_t device_id; + uint16_t vendor_id; + uint16_t subsystem_device_id; + uint16_t subsystem_vendor_id; +} __rte_cache_aligned; + +struct skeleton_queue { + uint8_t queue_id; +} __rte_cache_aligned; + +struct skeleton_port { + uint8_t port_id; +} __rte_cache_aligned; + +static inline struct skeleton_eventdev * +skeleton_pmd_priv(struct rte_eventdev *eventdev) +{ + return eventdev->data->dev_private; +} + +#endif /* __SKELETON_EVENTDEV_H__ */ diff --git a/mk/rte.app.mk b/mk/rte.app.mk index 716725a..8341c13 100644 --- a/mk/rte.app.mk +++ b/mk/rte.app.mk @@ -148,6 +148,10 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += -lrte_pmd_zuc _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc endif # CONFIG_RTE_LIBRTE_CRYPTODEV +ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y) +_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += -lrte_pmd_skeleton_event +endif # CONFIG_RTE_LIBRTE_EVENTDEV + endif # !CONFIG_RTE_BUILD_SHARED_LIBS _LDLIBS-y += --no-whole-archive -- 2.5.5