Signed-off-by: Sujith Sankar <ssujith at cisco.com> --- lib/librte_pmd_enic/enic.h | 158 +++++ lib/librte_pmd_enic/enic_clsf.c | 244 +++++++ lib/librte_pmd_enic/enic_compat.h | 142 ++++ lib/librte_pmd_enic/enic_main.c | 1328 +++++++++++++++++++++++++++++++++++++ lib/librte_pmd_enic/enic_res.c | 221 ++++++ lib/librte_pmd_enic/enic_res.h | 168 +++++ 6 files changed, 2261 insertions(+) create mode 100644 lib/librte_pmd_enic/enic.h create mode 100644 lib/librte_pmd_enic/enic_clsf.c create mode 100644 lib/librte_pmd_enic/enic_compat.h create mode 100644 lib/librte_pmd_enic/enic_main.c create mode 100644 lib/librte_pmd_enic/enic_res.c create mode 100644 lib/librte_pmd_enic/enic_res.h
diff --git a/lib/librte_pmd_enic/enic.h b/lib/librte_pmd_enic/enic.h new file mode 100644 index 0000000..b72c048 --- /dev/null +++ b/lib/librte_pmd_enic/enic.h @@ -0,0 +1,158 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ +#ident "$Id$" + +#ifndef _ENIC_H_ +#define _ENIC_H_ + +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_nic.h" +#include "vnic_rss.h" +#include "enic_res.h" + +#define DRV_NAME "enic_pmd" +#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver" +#define DRV_VERSION "1.0.0.4" +#define DRV_COPYRIGHT "Copyright 2008-2014 Cisco Systems, Inc" + +#define ENIC_WQ_MAX 8 +#define ENIC_RQ_MAX 8 +#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) +#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) + +#define VLAN_ETH_HLEN 18 + +#define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) + +#define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */ +#define PKT_TX_TCP_UDP_CKSUM 0x6000 +#define ENIC_CALC_IP_CKSUM 1 +#define ENIC_CALC_TCP_UDP_CKSUM 2 +#define ENIC_MAX_MTU 9000 +#define PAGE_SIZE 4096 +#define PAGE_ROUND_UP(x) \ + ((((unsigned long)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1))) + +#define ENICPMD_VFIO_PATH "/dev/vfio/vfio" +/*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/ + +#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ +#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ + + +#define ENICPMD_FDIR_MAX 64 + +struct enic_fdir_node { + struct rte_fdir_filter filter; + u16 fltr_id; + u16 rq_index; +}; + +struct enic_fdir { + struct rte_eth_fdir stats; + struct rte_hash *hash; + struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX]; +}; + +/* Per-instance private data structure */ +struct enic { + struct enic *next; + struct rte_pci_device *pdev; + struct vnic_enet_config config; + struct vnic_dev_bar bar0; + struct vnic_dev *vdev; + + struct rte_eth_dev *rte_dev; + struct enic_fdir fdir; + char bdf_name[ENICPMD_BDF_LENGTH]; + int dev_fd; + int iommu_group_fd; + int vfio_fd; + int iommu_groupid; + int eventfd; + u_int8_t mac_addr[ETH_ALEN]; + pthread_t err_intr_thread; + int promisc; + int allmulti; + int ig_vlan_strip_en; + int link_status; + u8 hw_ip_checksum; + + unsigned int flags; + unsigned int priv_flags; + + /* work queue */ + struct vnic_wq wq[ENIC_WQ_MAX]; + unsigned int wq_count; + + /* receive queue */ + struct vnic_rq rq[ENIC_RQ_MAX]; + unsigned int rq_count; + + /* completion queue */ + struct vnic_cq cq[ENIC_CQ_MAX]; + unsigned int cq_count; + + /* interrupt resource */ + struct vnic_intr intr; + unsigned int intr_count; +}; + +static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) +{ + return rq; +} + +static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) +{ + return enic->rq_count + wq; +} + +static inline unsigned int enic_msix_err_intr(struct enic *enic) +{ + return 0; +} + +static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev) +{ + return (struct enic *)eth_dev->data->dev_private; +} + +#endif /* _ENIC_H_ */ diff --git a/lib/librte_pmd_enic/enic_clsf.c b/lib/librte_pmd_enic/enic_clsf.c new file mode 100644 index 0000000..3ee87c3 --- /dev/null +++ b/lib/librte_pmd_enic/enic_clsf.c @@ -0,0 +1,244 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ +#ident "$Id$" + +#include <libgen.h> + +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_hash.h> +#include <rte_byteorder.h> + +#include "enic_compat.h" +#include "enic.h" +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_nic.h" + +#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +#include <rte_hash_crc.h> +#define DEFAULT_HASH_FUNC rte_hash_crc +#else +#include <rte_jhash.h> +#define DEFAULT_HASH_FUNC rte_jhash +#endif + +#define SOCKET_0 0 +#define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX +#define ENICPMD_CLSF_BUCKET_ENTRIES 4 + +int enic_fdir_del_fltr(struct enic *enic, struct rte_fdir_filter *params) +{ + int32_t pos; + struct enic_fdir_node *key; + /* See if the key is in the table */ + pos = rte_hash_del_key(enic->fdir.hash, params); + switch (pos) { + case -EINVAL: + case -ENOENT: + enic->fdir.stats.f_remove++; + return -EINVAL; + default: + /* The entry is present in the table */ + key = enic->fdir.nodes[pos]; + + /* Delete the filter */ + vnic_dev_classifier(enic->vdev, CLSF_DEL, + &key->fltr_id, NULL); + rte_free(key); + enic->fdir.nodes[pos] = NULL; + enic->fdir.stats.free++; + enic->fdir.stats.remove++; + break; + } + return 0; +} + +int enic_fdir_add_fltr(struct enic *enic, struct rte_fdir_filter *params, + u16 queue, u8 drop) +{ + struct enic_fdir_node *key; + struct filter fltr = {0}; + int32_t pos; + u8 do_free = 0; + u16 old_fltr_id = 0; + + if (!enic->fdir.hash || params->vlan_id || !params->l4type || + (RTE_FDIR_IPTYPE_IPV6 == params->iptype) || + (RTE_FDIR_L4TYPE_SCTP == params->l4type) || + params->flex_bytes || drop) { + enic->fdir.stats.f_add++; + return -ENOTSUP; + } + + /* See if the key is already there in the table */ + pos = rte_hash_del_key(enic->fdir.hash, params); + switch (pos) { + case -EINVAL: + enic->fdir.stats.f_add++; + return -EINVAL; + case -ENOENT: + /* Add a new classifier entry */ + if (!enic->fdir.stats.free) { + enic->fdir.stats.f_add++; + return -ENOSPC; + } + key = (struct enic_fdir_node *)rte_zmalloc( + "enic_fdir_node", + sizeof(struct enic_fdir_node), 0); + if (!key) { + enic->fdir.stats.f_add++; + return -ENOMEM; + } + break; + default: + /* The entry is already present in the table. + * Check if there is a change in queue + */ + key = enic->fdir.nodes[pos]; + enic->fdir.nodes[pos] = NULL; + if (unlikely(key->rq_index == queue)) { + /* Nothing to be done */ + pos = rte_hash_add_key(enic->fdir.hash, params); + enic->fdir.nodes[pos] = key; + enic->fdir.stats.f_add++; + dev_warning(enic, + "FDIR rule is already present\n"); + return 0; + } + + if (likely(enic->fdir.stats.free)) { + /* Add the filter and then delete the old one. + * This is to avoid packets from going into the + * default queue during the window between + * delete and add + */ + do_free = 1; + old_fltr_id = key->fltr_id; + } else { + /* No free slots in the classifier. + * Delete the filter and add the modified one later + */ + vnic_dev_classifier(enic->vdev, CLSF_DEL, + &key->fltr_id, NULL); + enic->fdir.stats.free++; + } + + break; + } + + key->filter = *params; + key->rq_index = queue; + + fltr.type = FILTER_IPV4_5TUPLE; + fltr.u.ipv4.src_addr = rte_be_to_cpu_32(params->ip_src.ipv4_addr); + fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(params->ip_dst.ipv4_addr); + fltr.u.ipv4.src_port = rte_be_to_cpu_16(params->port_src); + fltr.u.ipv4.dst_port = rte_be_to_cpu_16(params->port_dst); + + if (RTE_FDIR_L4TYPE_TCP == params->l4type) + fltr.u.ipv4.protocol = PROTO_TCP; + else + fltr.u.ipv4.protocol = PROTO_UDP; + + fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; + + if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) { + key->fltr_id = queue; + } else { + dev_err(enic, "Add classifier entry failed\n"); + enic->fdir.stats.f_add++; + rte_free(key); + return -1; + } + + if (do_free) + vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL); + else{ + enic->fdir.stats.free--; + enic->fdir.stats.add++; + } + + pos = rte_hash_add_key(enic->fdir.hash, (void *)key); + enic->fdir.nodes[pos] = key; + return 0; +} + +void enic_clsf_destroy(struct enic *enic) +{ + u32 index; + struct enic_fdir_node *key; + /* delete classifier entries */ + for (index = 0; index < ENICPMD_FDIR_MAX; index++) { + key = enic->fdir.nodes[index]; + if (key) { + vnic_dev_classifier(enic->vdev, CLSF_DEL, + &key->fltr_id, NULL); + rte_free(key); + } + } + + if (enic->fdir.hash) { + rte_hash_free(enic->fdir.hash); + enic->fdir.hash = NULL; + } +} + +int enic_clsf_init(struct enic *enic) +{ + struct rte_hash_parameters hash_params = { + .name = "enicpmd_clsf_hash", + .entries = ENICPMD_CLSF_HASH_ENTRIES, + .bucket_entries = ENICPMD_CLSF_BUCKET_ENTRIES, + .key_len = sizeof(struct rte_fdir_filter), + .hash_func = DEFAULT_HASH_FUNC, + .hash_func_init_val = 0, + .socket_id = SOCKET_0, + }; + + enic->fdir.hash = rte_hash_create(&hash_params); + memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats)); + enic->fdir.stats.free = ENICPMD_FDIR_MAX; + return (NULL == enic->fdir.hash); +} + + + diff --git a/lib/librte_pmd_enic/enic_compat.h b/lib/librte_pmd_enic/enic_compat.h new file mode 100644 index 0000000..d962904 --- /dev/null +++ b/lib/librte_pmd_enic/enic_compat.h @@ -0,0 +1,142 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ +#ident "$Id$" + +#ifndef _ENIC_COMPAT_H_ +#define _ENIC_COMPAT_H_ + +#include <stdio.h> + +#include <rte_atomic.h> +#include <rte_malloc.h> + +#define ENIC_PAGE_ALIGN 4096ULL +#define ENIC_ALIGN ENIC_PAGE_ALIGN +#define NAME_MAX 255 +#define ETH_ALEN 6 + +#define __iomem + +#define rmb() rte_rmb() /* dpdk rte provided rmb */ +#define wmb() rte_wmb() /* dpdk rte provided wmb */ + +#define le16_to_cpu +#define le32_to_cpu +#define le64_to_cpu +#define cpu_to_le16 +#define cpu_to_le32 +#define cpu_to_le64 + +#ifndef offsetof +#define offsetof(t, m) ((size_t) &((t *)0)->m) +#endif + +#define pr_err(y, args...) dev_err(0, y, ##args) +#define pr_warn(y, args...) dev_warning(0, y, ##args) +#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__) + +#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) +#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) +#define udelay usleep +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + +#define kzalloc(size, flags) calloc(1, size) +#define kfree(x) free(x) + +#define dev_err(x, args...) printf("rte_enic_pmd : Error - " args) +#define dev_info(x, args...) printf("rte_enic_pmd: Info - " args) +#define dev_warning(x, args...) printf("rte_enic_pmd: Warning - " args) +#define dev_trace(x, args...) printf("rte_enic_pmd: Trace - " args) + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long long u64; +typedef unsigned long long dma_addr_t; + +static inline u_int32_t ioread32(volatile void *addr) +{ + return *(volatile u_int32_t *)addr; +} + +static inline u16 ioread16(volatile void *addr) +{ + return *(volatile u16 *)addr; +} + +static inline u_int8_t ioread8(volatile void *addr) +{ + return *(volatile u_int8_t *)addr; +} + +static inline void iowrite32(u_int32_t val, volatile void *addr) +{ + *(volatile u_int32_t *)addr = val; +} + +static inline void iowrite16(u16 val, volatile void *addr) +{ + *(volatile u16 *)addr = val; +} + +static inline void iowrite8(u_int8_t val, volatile void *addr) +{ + *(volatile u_int8_t *)addr = val; +} + +static inline unsigned int readl(volatile void __iomem *addr) +{ + return *(volatile unsigned int *)addr; +} + +static inline void writel(unsigned int val, volatile void __iomem *addr) +{ + *(volatile unsigned int *)addr = val; +} + +#define min_t(type, x, y) ({ \ + type __min1 = (x); \ + type __min2 = (y); \ + __min1 < __min2 ? __min1 : __min2; }) + +#define max_t(type, x, y) ({ \ + type __max1 = (x); \ + type __max2 = (y); \ + __max1 > __max2 ? __max1 : __max2; }) + +#endif /* _ENIC_COMPAT_H_ */ diff --git a/lib/librte_pmd_enic/enic_main.c b/lib/librte_pmd_enic/enic_main.c new file mode 100644 index 0000000..e1f81bd --- /dev/null +++ b/lib/librte_pmd_enic/enic_main.c @@ -0,0 +1,1328 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ +#ident "$Id$" + +#include <stdio.h> + +#include <sys/stat.h> +#include <sys/mman.h> +#include <fcntl.h> +#include <libgen.h> +#ifdef RTE_EAL_VFIO +#include <linux/vfio.h> +#endif + +#include <rte_pci.h> +#include <rte_memzone.h> +#include <rte_malloc.h> +#include <rte_mbuf.h> +#include <rte_string_fns.h> +#include <rte_ethdev.h> + +#include "enic_compat.h" +#include "enic.h" +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_nic.h" + +static inline int enic_is_sriov_vf(struct enic *enic) +{ + return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; +} + +static int is_zero_addr(char *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} + +static int is_mcast_addr(char *addr) +{ + return addr[0] & 1; +} + +static int is_eth_addr_valid(char *addr) +{ + return !is_mcast_addr(addr) && !is_zero_addr(addr); +} + +static inline struct rte_mbuf * +enic_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, 0); + return m; +} + +static const struct rte_memzone *ring_dma_zone_reserve( + struct rte_eth_dev *dev, const char *ring_name, + uint16_t queue_id, uint32_t ring_size, int socket_id) +{ + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + dev->driver->pci_drv.name, ring_name, + dev->data->port_id, queue_id); + + mz = rte_memzone_lookup(z_name); + if (mz) + return mz; + + return rte_memzone_reserve_aligned(z_name, (uint64_t) ring_size, + socket_id, RTE_MEMZONE_1GB, ENIC_ALIGN); +} + +void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size) +{ + vnic_set_hdr_split_size(enic->vdev, split_hdr_size); +} + +static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) +{ + struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf; + + rte_mempool_put(mbuf->pool, mbuf); + buf->os_buf = NULL; +} + +static void enic_wq_free_buf(struct vnic_wq *wq, + struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) +{ + enic_free_wq_buf(wq, buf); +} + +static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque) +{ + struct enic *enic = vnic_dev_priv(vdev); + + vnic_wq_service(&enic->wq[q_number], cq_desc, + completed_index, enic_wq_free_buf, + opaque); + + return 0; +} + +static void enic_log_q_error(struct enic *enic) +{ + unsigned int i; + u32 error_status; + + for (i = 0; i < enic->wq_count; i++) { + error_status = vnic_wq_error_status(&enic->wq[i]); + if (error_status) + dev_err(enic, "WQ[%d] error_status %d\n", i, + error_status); + } + + for (i = 0; i < enic->rq_count; i++) { + error_status = vnic_rq_error_status(&enic->rq[i]); + if (error_status) + dev_err(enic, "RQ[%d] error_status %d\n", i, + error_status); + } +} + +unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq) +{ + unsigned int cq = enic_cq_wq(enic, wq->index); + + /* Return the work done */ + return vnic_cq_service(&enic->cq[cq], + -1 /*wq_work_to_do*/, enic_wq_service, NULL); +} + + +int enic_send_pkt(struct enic *enic, struct vnic_wq *wq, + struct rte_mbuf *tx_pkt, unsigned short len, + u_int8_t sop, u_int8_t eop, + u_int16_t ol_flags, u_int16_t vlan_tag) +{ + struct wq_enet_desc *desc = vnic_wq_next_desc(wq); + u_int16_t mss = 0; + u_int16_t header_length = 0; + u_int8_t cq_entry = eop; + u_int8_t vlan_tag_insert = 0; + unsigned char *buf = (unsigned char *)(tx_pkt->buf_addr) + + RTE_PKTMBUF_HEADROOM; +#ifdef RTE_EAL_VFIO + u_int64_t bus_addr = (unsigned long)buf; /* must have IOMMU */ +#else + u_int64_t bus_addr = (dma_addr_t) + (tx_pkt->buf_physaddr + RTE_PKTMBUF_HEADROOM); +#endif + + if (sop) { + if (ol_flags & PKT_TX_VLAN_PKT) + vlan_tag_insert = 1; + + if (enic->hw_ip_checksum) { + if (ol_flags & PKT_TX_IP_CKSUM) + mss |= ENIC_CALC_IP_CKSUM; + + if (ol_flags & PKT_TX_TCP_UDP_CKSUM) + mss |= ENIC_CALC_TCP_UDP_CKSUM; + } + } + + wq_enet_desc_enc(desc, + bus_addr, + len, + mss, + 0 /* header_length */, + 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */, + eop, + cq_entry, + 0 /* fcoe_encap */, + vlan_tag_insert, + vlan_tag, + 0 /* loopback */); + + vnic_wq_post(wq, (void *)tx_pkt, bus_addr, len, + sop, eop, + 1 /*desc_skip_cnt*/, + cq_entry, + 0 /*compressed send*/, + 0 /*wrid*/); + + return 0; +} + +void enic_dev_stats_clear(struct enic *enic) +{ + if (vnic_dev_stats_clear(enic->vdev)) + dev_err(enic, "Error in clearing stats\n"); +} + +void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) +{ + struct vnic_stats *stats; + + memset(r_stats, 0, sizeof(*r_stats)); + if (vnic_dev_stats_dump(enic->vdev, &stats)) { + dev_err(enic, "Error in getting stats\n"); + return; + } + + r_stats->ipackets = stats->rx.rx_frames_ok; + r_stats->opackets = stats->tx.tx_frames_ok; + + r_stats->ibytes = stats->rx.rx_bytes_ok; + r_stats->obytes = stats->tx.tx_bytes_ok; + + r_stats->ierrors = stats->rx.rx_errors; + r_stats->oerrors = stats->tx.tx_errors; + + r_stats->imcasts = stats->rx.rx_multicast_frames_ok; + r_stats->rx_nombuf = stats->rx.rx_no_bufs; +} + +void enic_del_mac_address(struct enic *enic) +{ + if (vnic_dev_del_addr(enic->vdev, enic->mac_addr)) + dev_err(enic, "del mac addr failed\n"); +} + +void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) +{ + int err; + + if (!is_eth_addr_valid(mac_addr)) { + dev_err(enic, "invalid mac address\n"); + return; + } + + err = vnic_dev_del_addr(enic->vdev, mac_addr); + if (err) { + dev_err(enic, "del mac addr failed\n"); + return; + } + + ether_addr_copy((struct ether_addr *)mac_addr, + (struct ether_addr *)enic->mac_addr); + + err = vnic_dev_add_addr(enic->vdev, mac_addr); + if (err) { + dev_err(enic, "add mac addr failed\n"); + return; + } +} + +static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + + if (!buf->os_buf) + return; + + rte_pktmbuf_free((struct rte_mbuf *)buf->os_buf); + buf->os_buf = NULL; +} + +void enic_init_vnic_resources(struct enic *enic) +{ + unsigned int error_interrupt_enable = 1; + unsigned int error_interrupt_offset = 0; + int index = 0; + unsigned int cq_index = 0; + + for (index = 0; index < enic->rq_count; index++) { + vnic_rq_init(&enic->rq[index], + enic_cq_rq(enic, index), + error_interrupt_enable, + error_interrupt_offset); + } + + for (index = 0; index < enic->wq_count; index++) { + vnic_wq_init(&enic->wq[index], + enic_cq_wq(enic, index), + error_interrupt_enable, + error_interrupt_offset); + } + + vnic_dev_stats_clear(enic->vdev); + + for (index = 0; index < enic->cq_count; index++) { + vnic_cq_init(&enic->cq[index], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 0 /* interrupt_enable */, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + 0 /* interrupt offset */, + 0 /* cq_message_addr */); + } + + vnic_intr_init(&enic->intr, + enic->config.intr_timer_usec, + enic->config.intr_timer_type, + /*mask_on_assertion*/1); +} + + +static int enic_rq_alloc_buf(struct vnic_rq *rq) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + void *buf; + dma_addr_t dma_addr; + struct rq_enet_desc *desc = vnic_rq_next_desc(rq); + u_int8_t type = RQ_ENET_TYPE_ONLY_SOP; + u_int16_t len = ENIC_MAX_MTU + VLAN_ETH_HLEN; + u16 split_hdr_size = vnic_get_hdr_split_size(enic->vdev); + struct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp); + struct rte_mbuf *hdr_mbuf = NULL; + + if (!mbuf) { + dev_err(enic, "mbuf alloc in enic_rq_alloc_buf failed\n"); + return -1; + } + + if (unlikely(split_hdr_size)) { + if (vnic_rq_desc_avail(rq) < 2) { + rte_mempool_put(mbuf->pool, mbuf); + return -1; + } + hdr_mbuf = enic_rxmbuf_alloc(rq->mp); + if (!hdr_mbuf) { + rte_mempool_put(mbuf->pool, mbuf); + dev_err(enic, + "hdr_mbuf alloc in enic_rq_alloc_buf failed\n"); + return -1; + } + + hdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM; + buf = rte_pktmbuf_mtod(hdr_mbuf, void *); + + hdr_mbuf->nb_segs = 2; + hdr_mbuf->port = rq->index; + hdr_mbuf->next = mbuf; + +#ifdef RTE_EAL_VFIO + dma_addr = (dma_addr_t)buf; +#else + dma_addr = (dma_addr_t) + (hdr_mbuf->buf_physaddr + hdr_mbuf->data_off); +#endif + + rq_enet_desc_enc(desc, dma_addr, type, split_hdr_size); + + vnic_rq_post(rq, (void *)hdr_mbuf, 0 /*os_buf_index*/, dma_addr, + (unsigned int)split_hdr_size, 0 /*wrid*/); + + desc = vnic_rq_next_desc(rq); + type = RQ_ENET_TYPE_NOT_SOP; + } else { + mbuf->nb_segs = 1; + mbuf->port = rq->index; + } + + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + buf = rte_pktmbuf_mtod(mbuf, void *); + mbuf->next = NULL; + +#ifdef RTE_EAL_VFIO + dma_addr = (dma_addr_t)buf; +#else + dma_addr = (dma_addr_t) + (mbuf->buf_physaddr + mbuf->data_off); +#endif + + rq_enet_desc_enc(desc, dma_addr, type, mbuf->buf_len); + + vnic_rq_post(rq, (void *)mbuf, 0 /*os_buf_index*/, dma_addr, + (unsigned int)mbuf->buf_len, 0 /*wrid*/); + + return 0; +} + +static int enic_rq_indicate_buf(struct vnic_rq *rq, + struct cq_desc *cq_desc, struct vnic_rq_buf *buf, + int skipped, void *opaque) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + struct rte_mbuf **rx_pkt_bucket = (struct rte_mbuf **)opaque; + struct rte_mbuf *rx_pkt = NULL; + struct rte_mbuf *hdr_rx_pkt = NULL; + + u8 type, color, eop, sop, ingress_port, vlan_stripped; + u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; + u8 packet_error; + u16 q_number, completed_index, bytes_written, vlan_tci, checksum; + u32 rss_hash; + + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, + &type, &color, &q_number, &completed_index, + &ingress_port, &fcoe, &eop, &sop, &rss_type, + &csum_not_calc, &rss_hash, &bytes_written, + &packet_error, &vlan_stripped, &vlan_tci, &checksum, + &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, + &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, + &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, + &fcs_ok); + + if (packet_error) { + dev_err(enic, "packet error\n"); + return; + } + + rx_pkt = (struct rte_mbuf *)buf->os_buf; + buf->os_buf = NULL; + + if (unlikely(skipped)) { + rx_pkt->data_len = 0; + return 0; + } + + if (likely(!vnic_get_hdr_split_size(enic->vdev))) { + /* No header split configured */ + *rx_pkt_bucket = rx_pkt; + rx_pkt->pkt_len = bytes_written; + + if (ipv4) { + rx_pkt->ol_flags |= PKT_RX_IPV4_HDR; + if (!csum_not_calc) { + if (unlikely(!ipv4_csum_ok)) + rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD; + + if ((tcp || udp) && (!tcp_udp_csum_ok)) + rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } else if (ipv6) + rx_pkt->ol_flags |= PKT_RX_IPV6_HDR; + } else { + /* Header split */ + if (sop && !eop) { + /* This piece is header */ + *rx_pkt_bucket = rx_pkt; + rx_pkt->pkt_len = bytes_written; + } else { + if (sop && eop) { + /* The packet is smaller than split_hdr_size */ + *rx_pkt_bucket = rx_pkt; + rx_pkt->pkt_len = bytes_written; + if (ipv4) { + rx_pkt->ol_flags |= PKT_RX_IPV4_HDR; + if (!csum_not_calc) { + if (unlikely(!ipv4_csum_ok)) + rx_pkt->ol_flags |= + PKT_RX_IP_CKSUM_BAD; + + if ((tcp || udp) && + (!tcp_udp_csum_ok)) + rx_pkt->ol_flags |= + PKT_RX_L4_CKSUM_BAD; + } + } else if (ipv6) + rx_pkt->ol_flags |= PKT_RX_IPV6_HDR; + } else { + /* Payload */ + hdr_rx_pkt = *rx_pkt_bucket; + hdr_rx_pkt->pkt_len += bytes_written; + if (ipv4) { + hdr_rx_pkt->ol_flags |= PKT_RX_IPV4_HDR; + if (!csum_not_calc) { + if (unlikely(!ipv4_csum_ok)) + hdr_rx_pkt->ol_flags |= + PKT_RX_IP_CKSUM_BAD; + + if ((tcp || udp) && + (!tcp_udp_csum_ok)) + hdr_rx_pkt->ol_flags |= + PKT_RX_L4_CKSUM_BAD; + } + } else if (ipv6) + hdr_rx_pkt->ol_flags |= PKT_RX_IPV6_HDR; + + } + } + } + + rx_pkt->data_len = bytes_written; + + if (rss_hash) { + rx_pkt->ol_flags |= PKT_RX_RSS_HASH; + rx_pkt->hash.rss = rss_hash; + } + + if (vlan_tci) { + rx_pkt->ol_flags |= PKT_RX_VLAN_PKT; + rx_pkt->vlan_tci = vlan_tci; + } + + return eop; +} + +static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque) +{ + struct enic *enic = vnic_dev_priv(vdev); + + return vnic_rq_service(&enic->rq[q_number], cq_desc, + completed_index, VNIC_RQ_RETURN_DESC, + enic_rq_indicate_buf, opaque); + +} + +int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts, + unsigned int budget, unsigned int *work_done) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + unsigned int cq = enic_cq_rq(enic, rq->index); + int err = 0; + + *work_done = vnic_cq_service(&enic->cq[cq], + budget, enic_rq_service, (void *)rx_pkts); + + if (*work_done) { + vnic_rq_fill(rq, enic_rq_alloc_buf); + + /* Need at least one buffer on ring to get going */ + if (vnic_rq_desc_used(rq) == 0) { + dev_err(enic, "Unable to alloc receive buffers\n"); + err = -1; + } + } + return err; +} + +#ifdef RTE_EAL_VFIO +void *enic_buf_map(void *priv, void *addr, size_t size) +{ + struct enic *enic = (struct enic *)priv; + struct vfio_iommu_type1_dma_map dma_map = { .argsz = sizeof(dma_map) }; + + size_t r_size; + + dma_map.vaddr = (__u64)addr; + dma_map.vaddr &= ~(ENIC_PAGE_ALIGN - 1); + r_size = (size_t)PAGE_ROUND_UP( + size + ((__u64)addr & (ENIC_PAGE_ALIGN - 1))); + dma_map.size = r_size; + dma_map.iova = dma_map.vaddr; + dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE; + + ioctl(enic->vfio_fd, VFIO_IOMMU_MAP_DMA, &dma_map); + /* Ignore the return value of this ioctl as this fails + * only because of duplicate entries. + */ + + return (void *)addr; +} +#endif + +void *enic_alloc_consistent(void *priv, size_t size, + dma_addr_t *dma_handle, u8 *name) +{ + struct enic *enic = (struct enic *)priv; + void *vaddr; + const struct rte_memzone *rz; + *dma_handle = 0; + + rz = rte_memzone_reserve_aligned(name, size, 0, 0, ENIC_ALIGN); + if (!rz) { + pr_err("%s : Failed to allocate memory requested for %s", + __func__, name); + return NULL; + } + +#ifdef RTE_EAL_VFIO + vaddr = enic_buf_map(enic, rz->addr, rz->len); + if (!vaddr) { + pr_err("%s : Failed to map allocated memory requested for %s", + __func__, name); + return NULL; + } + + *dma_handle = (dma_addr_t)vaddr; +#else + vaddr = rz->addr; + *dma_handle = (dma_addr_t)rz->phys_addr; +#endif + return vaddr; +} + +void enic_free_consistent(struct rte_pci_device *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + /* Nothing to be done */ +} + +void enic_intr_handler(__rte_unused struct rte_intr_handle *handle, + void *arg) +{ + struct enic *enic = pmd_priv((struct rte_eth_dev *)arg); + + dev_err(enic, "Err intr.\n"); + vnic_intr_return_all_credits(&enic->intr); + + enic_log_q_error(enic); +} + +int enic_enable(struct enic *enic) +{ + int index; + void *res; + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *rmz; + struct rte_eth_dev *eth_dev = enic->rte_dev; + + eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); + eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ + +#ifdef RTE_EAL_VFIO + for (index = 0; index < enic->rq_count; index++) { + snprintf(mz_name, sizeof(mz_name), "MP_%s", + enic->rq[index].mp->name); + rmz = rte_memzone_lookup(mz_name); + enic_buf_map((void *)enic, rmz->addr, (size_t)rmz->len); + } +#endif + + if (enic_clsf_init(enic)) + dev_warning(enic, "Init of hash table for clsf failed."\ + "Flow director feature will not work\n"); + + /* Fill RQ bufs */ + for (index = 0; index < enic->rq_count; index++) { + vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf); + + /* Need at least one buffer on ring to get going + */ + if (vnic_rq_desc_used(&enic->rq[index]) == 0) { + dev_err(enic, "Unable to alloc receive buffers\n"); + return -1; + } + } + + for (index = 0; index < enic->wq_count; index++) + vnic_wq_enable(&enic->wq[index]); + for (index = 0; index < enic->rq_count; index++) + vnic_rq_enable(&enic->rq[index]); + + vnic_dev_enable_wait(enic->vdev); + +#ifndef RTE_EAL_VFIO + /* Register and enable error interrupt */ + rte_intr_callback_register(&(enic->pdev->intr_handle), + enic_intr_handler, (void *)enic->rte_dev); + + rte_intr_enable(&(enic->pdev->intr_handle)); +#endif + vnic_intr_unmask(&enic->intr); + + return 0; +} + +int enic_alloc_intr_resources(struct enic *enic) +{ + int err; + + dev_info(enic, "vNIC resources used: "\ + "wq %d rq %d cq %d intr %d\n", + enic->wq_count, enic->rq_count, + enic->cq_count, enic->intr_count); + + err = vnic_intr_alloc(enic->vdev, &enic->intr, 0); + if (err) + enic_free_vnic_resources(enic); + + return err; +} + +void enic_free_rq(void *rxq) +{ + struct vnic_rq *rq = (struct vnic_rq *)rxq; + struct enic *enic = vnic_dev_priv(rq->vdev); + + vnic_rq_free(rq); + vnic_cq_free(&enic->cq[rq->index]); +} + +void enic_start_wq(struct enic *enic, uint16_t queue_idx) +{ + vnic_wq_enable(&enic->wq[queue_idx]); +} + +int enic_stop_wq(struct enic *enic, uint16_t queue_idx) +{ + return vnic_wq_disable(&enic->wq[queue_idx]); +} + +void enic_start_rq(struct enic *enic, uint16_t queue_idx) +{ + vnic_rq_enable(&enic->rq[queue_idx]); +} + +int enic_stop_rq(struct enic *enic, uint16_t queue_idx) +{ + return vnic_rq_disable(&enic->rq[queue_idx]); +} + +int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, struct rte_mempool *mp, + uint16_t nb_desc) +{ + int err; + struct vnic_rq *rq = &enic->rq[queue_idx]; + + rq->socket_id = socket_id; + rq->mp = mp; + + if (nb_desc) { + if (nb_desc > enic->config.rq_desc_count) { + dev_warning(enic, + "RQ %d - number of rx desc in cmd line (%d)"\ + "is greater than that in the UCSM/CIMC adapter"\ + "policy. Applying the value in the adapter "\ + "policy (%d).\n", + queue_idx, nb_desc, enic->config.rq_desc_count); + } else if (nb_desc != enic->config.rq_desc_count) { + enic->config.rq_desc_count = nb_desc; + dev_info(enic, + "RX Queues - effective number of descs:%d\n", + nb_desc); + } + } + + /* Allocate queue resources */ + err = vnic_rq_alloc(enic->vdev, &enic->rq[queue_idx], queue_idx, + enic->config.rq_desc_count, + sizeof(struct rq_enet_desc)); + if (err) { + dev_err(enic, "error in allocation of rq\n"); + return err; + } + + err = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx, + socket_id, enic->config.rq_desc_count, + sizeof(struct cq_enet_rq_desc)); + if (err) { + vnic_rq_free(rq); + dev_err(enic, "error in allocation of cq for rq\n"); + } + + return err; +} + +void enic_free_wq(void *txq) +{ + struct vnic_wq *wq = (struct vnic_wq *)txq; + struct enic *enic = vnic_dev_priv(wq->vdev); + + vnic_wq_free(wq); + vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); +} + +int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, uint16_t nb_desc) +{ + int err; + struct vnic_wq *wq = &enic->wq[queue_idx]; + unsigned int cq_index = enic_cq_wq(enic, queue_idx); + + wq->socket_id = socket_id; + if (nb_desc) { + if (nb_desc > enic->config.wq_desc_count) { + dev_warning(enic, + "WQ %d - number of tx desc in cmd line (%d)"\ + "is greater than that in the UCSM/CIMC adapter"\ + "policy. Applying the value in the adapter "\ + "policy (%d)\n", + queue_idx, nb_desc, enic->config.wq_desc_count); + } else if (nb_desc != enic->config.wq_desc_count) { + enic->config.wq_desc_count = nb_desc; + dev_info(enic, + "TX Queues - effective number of descs:%d\n", + nb_desc); + } + } + + /* Allocate queue resources */ + err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx, + enic->config.wq_desc_count, + sizeof(struct wq_enet_desc)); + if (err) { + dev_err(enic, "error in allocation of wq\n"); + return err; + } + + err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index, + socket_id, enic->config.wq_desc_count, + sizeof(struct cq_enet_wq_desc)); + if (err) { + vnic_wq_free(wq); + dev_err(enic, "error in allocation of cq for wq\n"); + } + + return err; +} + +int enic_disable(struct enic *enic) +{ + unsigned int i; + int err; + + vnic_intr_mask(&enic->intr); + (void)vnic_intr_masked(&enic->intr); /* flush write */ + + vnic_dev_disable(enic->vdev); + + enic_clsf_destroy(enic); + + if (!enic_is_sriov_vf(enic)) + vnic_dev_del_addr(enic->vdev, enic->mac_addr); + + for (i = 0; i < enic->wq_count; i++) { + err = vnic_wq_disable(&enic->wq[i]); + if (err) + return err; + } + for (i = 0; i < enic->rq_count; i++) { + err = vnic_rq_disable(&enic->rq[i]); + if (err) + return err; + } + + vnic_dev_set_reset_flag(enic->vdev, 1); + vnic_dev_notify_unset(enic->vdev); + + for (i = 0; i < enic->wq_count; i++) + vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); + for (i = 0; i < enic->rq_count; i++) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + for (i = 0; i < enic->cq_count; i++) + vnic_cq_clean(&enic->cq[i]); + vnic_intr_clean(&enic->intr); + + return 0; +} + +static int enic_dev_wait(struct vnic_dev *vdev, + int (*start)(struct vnic_dev *, int), + int (*finished)(struct vnic_dev *, int *), + int arg) +{ + int done; + int err; + int i; + + err = start(vdev, arg); + if (err) + return err; + + /* Wait for func to complete...2 seconds max */ + for (i = 0; i < 2000; i++) { + err = finished(vdev, &done); + if (err) + return err; + if (done) + return 0; + usleep(1000); + } + return -ETIMEDOUT; +} + +static int enic_dev_open(struct enic *enic) +{ + int err; + + err = enic_dev_wait(enic->vdev, vnic_dev_open, + vnic_dev_open_done, 0); + if (err) + dev_err(enic_get_dev(enic), + "vNIC device open failed, err %d\n", err); + + return err; +} + +static int enic_set_rsskey(struct enic *enic) +{ + dma_addr_t rss_key_buf_pa; + union vnic_rss_key *rss_key_buf_va = NULL; + union vnic_rss_key rss_key = { + .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}, + .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}, + .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}, + .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}, + }; + int err; + char name[NAME_MAX]; + + snprintf(name, NAME_MAX, "rss_key-%s", enic->bdf_name); + rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key), + &rss_key_buf_pa, name); + if (!rss_key_buf_va) + return -ENOMEM; + + rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); + + err = enic_set_rss_key(enic, + rss_key_buf_pa, + sizeof(union vnic_rss_key)); + + enic_free_consistent(enic->pdev, sizeof(union vnic_rss_key), + rss_key_buf_va, rss_key_buf_pa); + + return err; +} + +static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) +{ + dma_addr_t rss_cpu_buf_pa; + union vnic_rss_cpu *rss_cpu_buf_va = NULL; + unsigned int i; + int err; + char name[NAME_MAX]; + + snprintf(name, NAME_MAX, "rss_cpu-%s", enic->bdf_name); + rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu), + &rss_cpu_buf_pa, name); + if (!rss_cpu_buf_va) + return -ENOMEM; + + for (i = 0; i < (1 << rss_hash_bits); i++) + (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; + + err = enic_set_rss_cpu(enic, + rss_cpu_buf_pa, + sizeof(union vnic_rss_cpu)); + + enic_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), + rss_cpu_buf_va, rss_cpu_buf_pa); + + return err; +} + +static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, + u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) +{ + const u8 tso_ipid_split_en = 0; + int err; + + /* Enable VLAN tag stripping */ + + err = enic_set_nic_cfg(enic, + rss_default_cpu, rss_hash_type, + rss_hash_bits, rss_base_cpu, + rss_enable, tso_ipid_split_en, + enic->ig_vlan_strip_en); + + return err; +} + +int enic_set_rss_nic_cfg(struct enic *enic) +{ + const u8 rss_default_cpu = 0; + const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | + NIC_CFG_RSS_HASH_TYPE_IPV6 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + const u8 rss_hash_bits = 7; + const u8 rss_base_cpu = 0; + u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); + + if (rss_enable) { + if (!enic_set_rsskey(enic)) { + if (enic_set_rsscpu(enic, rss_hash_bits)) { + rss_enable = 0; + dev_warning(enic, "RSS disabled, "\ + "Failed to set RSS cpu indirection table."); + } + } else { + rss_enable = 0; + dev_warning(enic, + "RSS disabled, Failed to set RSS key.\n"); + } + } + + return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, + rss_hash_bits, rss_base_cpu, rss_enable); +} + +int enic_setup_finish(struct enic *enic) +{ + int ret; + + ret = enic_set_rss_nic_cfg(enic); + if (ret) { + dev_err(enic, "Failed to config nic, aborting.\n"); + return -1; + } + + vnic_dev_add_addr(enic->vdev, enic->mac_addr); + + /* Default conf */ + vnic_dev_packet_filter(enic->vdev, + 1 /* directed */, + 1 /* multicast */, + 1 /* broadcast */, + 0 /* promisc */, + 1 /* allmulti */); + + enic->promisc = 0; + enic->allmulti = 1; + + return 0; +} + +#ifdef RTE_EAL_VFIO +static void enic_eventfd_init(struct enic *enic) +{ + enic->eventfd = enic->pdev->intr_handle.fd; +} + +void *enic_err_intr_handler(void *arg) +{ + struct enic *enic = (struct enic *)arg; + unsigned int intr = enic_msix_err_intr(enic); + ssize_t size; + uint64_t data; + + while (1) { + size = read(enic->eventfd, &data, sizeof(data)); + dev_err(enic, "Err intr.\n"); + vnic_intr_return_all_credits(&enic->intr); + + enic_log_q_error(enic); + } + + return NULL; +} +#endif + +void enic_add_packet_filter(struct enic *enic) +{ + /* Args -> directed, multicast, broadcast, promisc, allmulti */ + vnic_dev_packet_filter(enic->vdev, 1, 1, 1, + enic->promisc, enic->allmulti); +} + +int enic_get_link_status(struct enic *enic) +{ + return vnic_dev_link_status(enic->vdev); +} + + +#ifdef RTE_EAL_VFIO +static int enic_create_err_intr_thread(struct enic *enic) +{ + pthread_attr_t intr_attr; + + /* create threads for error interrupt handling */ + pthread_attr_init(&intr_attr); + pthread_attr_setstacksize(&intr_attr, 0x100000); + + /* ERR */ + if (pthread_create(&enic->err_intr_thread, &intr_attr, + enic_err_intr_handler, (void *)enic)) { + dev_err(enic, "Failed to create err interrupt handler threads\n"); + return -1; + } + + pthread_attr_destroy(&intr_attr); + + return 0; +} + + +static int enic_set_intr_mode(struct enic *enic) +{ + struct vfio_irq_set *irq_set; + int *fds; + int size; + int ret = -1; + int index; + + if (enic->intr_count < 1) { + dev_err(enic, "Unsupported resource conf.\n"); + return -1; + } + vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); + + enic->intr_count = 1; + + enic_eventfd_init(enic); + size = sizeof(*irq_set) + (sizeof(int)); + + irq_set = rte_zmalloc("enic_vfio_irq", size, 0); + irq_set->argsz = size; + irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; + irq_set->start = 0; + irq_set->count = 1; /* For error interrupt only */ + irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | + VFIO_IRQ_SET_ACTION_TRIGGER; + fds = (int *)&irq_set->data; + + fds[0] = enic->eventfd; + + ret = ioctl(enic->pdev->intr_handle.vfio_dev_fd, + VFIO_DEVICE_SET_IRQS, irq_set); + rte_free(irq_set); + if (ret) { + dev_err(enic, "Failed to set eventfds for interrupts\n"); + return -1; + } + + enic_create_err_intr_thread(enic); + return 0; +} + +static void enic_clear_intr_mode(struct enic *enic) +{ + vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); +} +#endif + +static void enic_dev_deinit(struct enic *enic) +{ + unsigned int i; + struct rte_eth_dev *eth_dev = enic->rte_dev; + + if (eth_dev->data->mac_addrs) + rte_free(eth_dev->data->mac_addrs); + +#ifdef RTE_EAL_VFIO + enic_clear_intr_mode(enic); +#endif +} + + +int enic_set_vnic_res(struct enic *enic) +{ + struct rte_eth_dev *eth_dev = enic->rte_dev; + + if ((enic->rq_count < eth_dev->data->nb_rx_queues) || + (enic->wq_count < eth_dev->data->nb_tx_queues)) { + dev_err(dev, "Not enough resources configured, aborting\n"); + return -1; + } + + enic->rq_count = eth_dev->data->nb_rx_queues; + enic->wq_count = eth_dev->data->nb_tx_queues; + if (enic->cq_count < (enic->rq_count + enic->wq_count)) { + dev_err(dev, "Not enough resources configured, aborting\n"); + return -1; + } + + enic->cq_count = enic->rq_count + enic->wq_count; + return 0; +} + +static int enic_dev_init(struct enic *enic) +{ + unsigned int i; + int err; + struct rte_eth_dev *eth_dev = enic->rte_dev; + + vnic_dev_intr_coal_timer_info_default(enic->vdev); + + /* Get vNIC configuration + */ + err = enic_get_vnic_config(enic); + if (err) { + dev_err(dev, "Get vNIC configuration failed, aborting\n"); + return err; + } + + eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0); + if (!eth_dev->data->mac_addrs) { + dev_err(enic, "mac addr storage alloc failed, aborting.\n"); + return -1; + } + ether_addr_copy((struct ether_addr *) enic->mac_addr, + ð_dev->data->mac_addrs[0]); + + + /* Get available resource counts + */ + enic_get_res_counts(enic); + +#ifdef RTE_EAL_VFIO + /* Set interrupt mode based on resource counts and system + * capabilities + */ + err = enic_set_intr_mode(enic); + if (err) { + rte_free(eth_dev->data->mac_addrs); + enic_clear_intr_mode(enic); + dev_err(dev, "Failed to set intr mode based on resource "\ + "counts and system capabilities, aborting\n"); + return err; + } +#endif + + vnic_dev_set_reset_flag(enic->vdev, 0); + + return 0; + +} + +int enic_probe(struct enic *enic) +{ + const char *bdf = enic->bdf_name; + struct rte_pci_device *pdev = enic->pdev; + struct rte_eth_dev *eth_dev = enic->rte_dev; + unsigned int i; + int err = -1; + + dev_info(enic, " Initializing ENIC PMD version %s\n", DRV_VERSION); + + enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr; + enic->bar0.len = pdev->mem_resource[0].len; +#ifdef RTE_EAL_VFIO + enic->vfio_fd = pci_vfio_container_fd(); +#endif + + /* Register vNIC device */ + enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1); + if (!enic->vdev) { + dev_err(enic, "vNIC registration failed, aborting\n"); + goto err_out; + } + + vnic_register_cbacks(enic->vdev, +#ifdef RTE_EAL_VFIO + enic_buf_map, +#endif + enic_alloc_consistent, + enic_free_consistent); + + /* Issue device open to get device in known state */ + err = enic_dev_open(enic); + if (err) { + dev_err(enic, "vNIC dev open failed, aborting\n"); + goto err_out_unregister; + } + + /* Set ingress vlan rewrite mode before vnic initialization */ + err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, + IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); + if (err) { + dev_err(enic, + "Failed to set ingress vlan rewrite mode, aborting.\n"); + goto err_out_dev_close; + } + + /* Issue device init to initialize the vnic-to-switch link. + * We'll start with carrier off and wait for link UP + * notification later to turn on carrier. We don't need + * to wait here for the vnic-to-switch link initialization + * to complete; link UP notification is the indication that + * the process is complete. + */ + + err = vnic_dev_init(enic->vdev, 0); + if (err) { + dev_err(enic, "vNIC dev init failed, aborting\n"); + goto err_out_dev_close; + } + + err = enic_dev_init(enic); + if (err) { + dev_err(enic, "Device initialization failed, aborting\n"); + goto err_out_dev_close; + } + + return 0; + +err_out_dev_close: + vnic_dev_close(enic->vdev); +err_out_unregister: + vnic_dev_unregister(enic->vdev); +err_out: + return err; +} + +void enic_remove(struct enic *enic) +{ + enic_dev_deinit(enic); + vnic_dev_close(enic->vdev); + vnic_dev_unregister(enic->vdev); +} + diff --git a/lib/librte_pmd_enic/enic_res.c b/lib/librte_pmd_enic/enic_res.c new file mode 100644 index 0000000..b38201d --- /dev/null +++ b/lib/librte_pmd_enic/enic_res.c @@ -0,0 +1,221 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ +#ident "$Id: enic_res.c 171146 2014-05-02 07:08:20Z ssujith $" + +#include "enic_compat.h" +#include "rte_ethdev.h" +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_resource.h" +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_nic.h" +#include "vnic_rss.h" +#include "enic_res.h" +#include "enic.h" + +int enic_get_vnic_config(struct enic *enic) +{ + struct vnic_enet_config *c = &enic->config; + int err; + + err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr); + if (err) { + dev_err(enic_get_dev(enic), + "Error getting MAC addr, %d\n", err); + return err; + } + +#define GET_CONFIG(m) \ + do { \ + err = vnic_dev_spec(enic->vdev, \ + offsetof(struct vnic_enet_config, m), \ + sizeof(c->m), &c->m); \ + if (err) { \ + dev_err(enic_get_dev(enic), \ + "Error getting %s, %d\n", #m, err); \ + return err; \ + } \ + } while (0) + + GET_CONFIG(flags); + GET_CONFIG(wq_desc_count); + GET_CONFIG(rq_desc_count); + GET_CONFIG(mtu); + GET_CONFIG(intr_timer_type); + GET_CONFIG(intr_mode); + GET_CONFIG(intr_timer_usec); + GET_CONFIG(loop_tag); + GET_CONFIG(num_arfs); + + c->wq_desc_count = + min_t(u32, ENIC_MAX_WQ_DESCS, + max_t(u32, ENIC_MIN_WQ_DESCS, + c->wq_desc_count)); + c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ + + c->rq_desc_count = + min_t(u32, ENIC_MAX_RQ_DESCS, + max_t(u32, ENIC_MIN_RQ_DESCS, + c->rq_desc_count)); + c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ + + if (c->mtu == 0) + c->mtu = 1500; + c->mtu = min_t(u16, ENIC_MAX_MTU, + max_t(u16, ENIC_MIN_MTU, + c->mtu)); + + c->intr_timer_usec = min_t(u32, c->intr_timer_usec, + vnic_dev_get_intr_coal_timer_max(enic->vdev)); + + dev_info(enic_get_dev(enic), + "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " + "wq/rq %d/%d mtu %d\n", + enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2], + enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5], + c->wq_desc_count, c->rq_desc_count, c->mtu); + dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " + "rss %s intr mode %s type %s timer %d usec " + "loopback tag 0x%04x\n", + ENIC_SETTING(enic, TXCSUM) ? "yes" : "no", + ENIC_SETTING(enic, RXCSUM) ? "yes" : "no", + ENIC_SETTING(enic, RSS) ? "yes" : "no", + c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" : + c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" : + c->intr_mode == VENET_INTR_MODE_ANY ? "any" : + "unknown", + c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" : + c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" : + "unknown", + c->intr_timer_usec, + c->loop_tag); + + return 0; +} + +int enic_add_vlan(struct enic *enic, u16 vlanid) +{ + u64 a0 = vlanid, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait); + if (err) + dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err); + + return err; +} + +int enic_del_vlan(struct enic *enic, u16 vlanid) +{ + u64 a0 = vlanid, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait); + if (err) + dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err); + + return err; +} + +int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en) +{ + u64 a0, a1; + u32 nic_cfg; + int wait = 1000; + + vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, + rss_hash_type, rss_hash_bits, rss_base_cpu, + rss_enable, tso_ipid_split_en, ig_vlan_strip_en); + + a0 = nic_cfg; + a1 = 0; + + return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait); +} + +int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len) +{ + u64 a0 = (u64)key_pa, a1 = len; + int wait = 1000; + + return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait); +} + +int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len) +{ + u64 a0 = (u64)cpu_pa, a1 = len; + int wait = 1000; + + return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait); +} + +void enic_free_vnic_resources(struct enic *enic) +{ + unsigned int i; + + for (i = 0; i < enic->wq_count; i++) + vnic_wq_free(&enic->wq[i]); + for (i = 0; i < enic->rq_count; i++) + vnic_rq_free(&enic->rq[i]); + for (i = 0; i < enic->cq_count; i++) + vnic_cq_free(&enic->cq[i]); + vnic_intr_free(&enic->intr); +} + +void enic_get_res_counts(struct enic *enic) +{ + enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); + enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); + enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); + enic->intr_count = vnic_dev_get_res_count(enic->vdev, + RES_TYPE_INTR_CTRL); + + dev_info(enic_get_dev(enic), + "vNIC resources avail: wq %d rq %d cq %d intr %d\n", + enic->wq_count, enic->rq_count, + enic->cq_count, enic->intr_count); +} + + diff --git a/lib/librte_pmd_enic/enic_res.h b/lib/librte_pmd_enic/enic_res.h new file mode 100644 index 0000000..76e0a28 --- /dev/null +++ b/lib/librte_pmd_enic/enic_res.h @@ -0,0 +1,168 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ +#ident "$Id: enic_res.h 173137 2014-05-16 03:27:22Z sanpilla $" + +#ifndef _ENIC_RES_H_ +#define _ENIC_RES_H_ + +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "vnic_wq.h" +#include "vnic_rq.h" + +#define ENIC_MIN_WQ_DESCS 64 +#define ENIC_MAX_WQ_DESCS 4096 +#define ENIC_MIN_RQ_DESCS 64 +#define ENIC_MAX_RQ_DESCS 4096 + +#define ENIC_MIN_MTU 68 +#define ENIC_MAX_MTU 9000 + +#define ENIC_MULTICAST_PERFECT_FILTERS 32 +#define ENIC_UNICAST_PERFECT_FILTERS 32 + +#define ENIC_NON_TSO_MAX_DESC 16 + +#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) + +static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + unsigned int mss_or_csum_offset, unsigned int hdr_len, + int vlan_tag_insert, unsigned int vlan_tag, + int offload_mode, int cq_entry, int sop, int eop, int loopback) +{ + struct wq_enet_desc *desc = vnic_wq_next_desc(wq); + u8 desc_skip_cnt = 1; + u8 compressed_send = 0; + u64 wrid = 0; + + wq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + (u16)len, + (u16)mss_or_csum_offset, + (u16)hdr_len, (u8)offload_mode, + (u8)eop, (u8)cq_entry, + 0, /* fcoe_encap */ + (u8)vlan_tag_insert, + (u16)vlan_tag, + (u8)loopback); + + vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, + (u8)cq_entry, compressed_send, wrid); +} + +static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + 0, 0, 0, 0, 0, + eop, 0 /* !SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, + dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, + unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + 0, 0, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM, + eop, 1 /* SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + int ip_csum, int tcpudp_csum, int vlan_tag_insert, + unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), + 0, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM, + eop, 1 /* SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + unsigned int csum_offset, unsigned int hdr_len, + int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + csum_offset, hdr_len, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM_L4, + eop, 1 /* SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, + unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + mss, hdr_len, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_TSO, + eop, 1 /* SOP */, eop, loopback); +} +static inline void enic_queue_rq_desc(struct vnic_rq *rq, + void *os_buf, unsigned int os_buf_index, + dma_addr_t dma_addr, unsigned int len) +{ + struct rq_enet_desc *desc = vnic_rq_next_desc(rq); + u64 wrid = 0; + u8 type = os_buf_index ? + RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP; + + rq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + type, (u16)len); + + vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid); +} + +struct enic; + +int enic_get_vnic_config(struct enic *); +int enic_add_vlan(struct enic *enic, u16 vlanid); +int enic_del_vlan(struct enic *enic, u16 vlanid); +int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en); +int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len); +int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len); +void enic_get_res_counts(struct enic *enic); +void enic_init_vnic_resources(struct enic *enic); +int enic_alloc_vnic_resources(struct enic *); +void enic_free_vnic_resources(struct enic *); + +#endif /* _ENIC_RES_H_ */ -- 1.9.1