[PATCH] net/virtio-user: reset used index counter in dev reset
When the virtio device is reinitialized during ethdev reconfiguration, all the virtio rings are recreated and repopulated on the device. Accordingly, reset the used index counter value back to zero. Signed-off-by: Kommula Shiva Shankar --- drivers/net/virtio/virtio_user_ethdev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c index ae6593ba0b..d60c7e188c 100644 --- a/drivers/net/virtio/virtio_user_ethdev.c +++ b/drivers/net/virtio/virtio_user_ethdev.c @@ -204,6 +204,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq, vring->device = (void *)(uintptr_t)used_addr; dev->packed_queues[queue_idx].avail_wrap_counter = true; dev->packed_queues[queue_idx].used_wrap_counter = true; + dev->packed_queues[queue_idx].used_idx = 0; for (i = 0; i < vring->num; i++) vring->desc[i].flags = 0; -- 2.43.0
[PATCH RFC 2/4] eventdev: refactor rte_event_dma_adapater_op calls
From: Pavan Nikhilesh Migrate all invocations of rte_event_dma_adapter_op API to rte_dma_op. Signed-off-by: Pavan Nikhilesh Change-Id: I56b6e61af72d119287b0d2ba6a9bbacc3ae808d6 --- app/test-eventdev/test_perf_common.c | 6 +-- app/test-eventdev/test_perf_common.h | 4 +- app/test/test_event_dma_adapter.c| 6 +-- drivers/dma/cnxk/cnxk_dmadev.c | 2 +- drivers/dma/cnxk/cnxk_dmadev_fp.c| 12 +++--- lib/eventdev/rte_event_dma_adapter.c | 18 - lib/eventdev/rte_event_dma_adapter.h | 57 7 files changed, 24 insertions(+), 81 deletions(-) diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c index 627f07caa1..4e0109db52 100644 --- a/app/test-eventdev/test_perf_common.c +++ b/app/test-eventdev/test_perf_common.c @@ -562,11 +562,11 @@ crypto_adapter_enq_op_fwd(struct prod_data *p) static inline void dma_adapter_enq_op_fwd(struct prod_data *p) { - struct rte_event_dma_adapter_op *ops[BURST_SIZE] = {NULL}; + struct rte_dma_op *ops[BURST_SIZE] = {NULL}; struct test_perf *t = p->t; const uint32_t nb_flows = t->nb_flows; const uint64_t nb_pkts = t->nb_pkts; - struct rte_event_dma_adapter_op op; + struct rte_dma_op op; struct rte_event evts[BURST_SIZE]; const uint8_t dev_id = p->dev_id; struct evt_options *opt = t->opt; @@ -2114,7 +2114,7 @@ perf_mempool_setup(struct evt_test *test, struct evt_options *opt) } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) { t->pool = rte_mempool_create(test->name, /* mempool name */ opt->pool_sz, /* number of elements*/ -sizeof(struct rte_event_dma_adapter_op) + +sizeof(struct rte_dma_op) + (sizeof(struct rte_dma_sge) * 2), cache_sz, /* cache size*/ 0, NULL, NULL, NULL, /* obj constructor */ diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h index d7333ad390..63078b0ee2 100644 --- a/app/test-eventdev/test_perf_common.h +++ b/app/test-eventdev/test_perf_common.h @@ -139,7 +139,7 @@ perf_mark_fwd_latency(enum evt_prod_type prod_type, struct rte_event *const ev) } pe->timestamp = rte_get_timer_cycles(); } else if (prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) { - struct rte_event_dma_adapter_op *op = ev->event_ptr; + struct rte_dma_op *op = ev->event_ptr; op->user_meta = rte_get_timer_cycles(); } else { @@ -297,7 +297,7 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, enum evt_prod_ty tstamp = pe->timestamp; rte_crypto_op_free(op); } else if (prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) { - struct rte_event_dma_adapter_op *op = ev->event_ptr; + struct rte_dma_op *op = ev->event_ptr; to_free_in_bulk = op; tstamp = op->user_meta; diff --git a/app/test/test_event_dma_adapter.c b/app/test/test_event_dma_adapter.c index 9988d4fc7b..7f72a4e81d 100644 --- a/app/test/test_event_dma_adapter.c +++ b/app/test/test_event_dma_adapter.c @@ -234,7 +234,7 @@ test_op_forward_mode(void) { struct rte_mbuf *src_mbuf[TEST_MAX_OP]; struct rte_mbuf *dst_mbuf[TEST_MAX_OP]; - struct rte_event_dma_adapter_op *op; + struct rte_dma_op *op; struct rte_event ev[TEST_MAX_OP]; int ret, i; @@ -266,7 +266,7 @@ test_op_forward_mode(void) op->vchan = TEST_DMA_VCHAN_ID; op->event_meta = dma_response_info.event; - /* Fill in event info and update event_ptr with rte_event_dma_adapter_op */ + /* Fill in event info and update event_ptr with rte_dma_op */ memset(&ev[i], 0, sizeof(struct rte_event)); ev[i].event = 0; ev[i].op = RTE_EVENT_OP_NEW; @@ -396,7 +396,7 @@ configure_dmadev(void) rte_socket_id()); RTE_TEST_ASSERT_NOT_NULL(params.dst_mbuf_pool, "Can't create DMA_DST_MBUFPOOL\n"); - elt_size = sizeof(struct rte_event_dma_adapter_op) + (sizeof(struct rte_dma_sge) * 2); + elt_size = sizeof(struct rte_dma_op) + (sizeof(struct rte_dma_sge) * 2); params.op_mpool = rte_mempool_create("EVENT_DMA_OP_POOL", DMA_OP_POOL_SIZE, elt_size, 0, 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0); RTE_TEST_ASSERT_NOT_NULL(params.op_mpool, "Can't create DMA_OP_POOL\n"); diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c index e7be3767b2..60b3d28d65 100644 --- a/drivers/dma/cnxk/c
[PATCH RFC 3/4] doc: update prog guide to use rte_dma_op
From: Pavan Nikhilesh Update the documentation to replace all instances of rte_event_dma_adapter_op with rte_dma_op Signed-off-by: Pavan Nikhilesh Change-Id: I0fe65d18f4601709826c11c6738cacec8991515d --- doc/guides/prog_guide/eventdev/event_dma_adapter.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/guides/prog_guide/eventdev/event_dma_adapter.rst b/doc/guides/prog_guide/eventdev/event_dma_adapter.rst index e040d89e8b..e8437a3297 100644 --- a/doc/guides/prog_guide/eventdev/event_dma_adapter.rst +++ b/doc/guides/prog_guide/eventdev/event_dma_adapter.rst @@ -144,7 +144,7 @@ on which it enqueues events towards the DMA adapter using ``rte_event_enqueue_bu uint32_t cap; int ret; - /* Fill in event info and update event_ptr with rte_event_dma_adapter_op */ + /* Fill in event info and update event_ptr with rte_dma_op */ memset(&ev, 0, sizeof(ev)); . . @@ -244,11 +244,11 @@ Set event response information ~~ In the ``RTE_EVENT_DMA_ADAPTER_OP_FORWARD`` / ``RTE_EVENT_DMA_ADAPTER_OP_NEW`` mode, -the application specifies the dmadev ID and vchan ID in ``struct rte_event_dma_adapter_op`` +the application specifies the dmadev ID and vchan ID in ``struct rte_dma_op`` and the event information (response information) needed to enqueue an event after the DMA operation has completed. The response information is specified in ``struct rte_event`` -and appended to the ``struct rte_event_dma_adapter_op``. +and appended to the ``struct rte_dma_op``. Start the adapter instance -- 2.43.0
[PATCH RFC 1/4] dmadev: add enqueue dequeue operations
From: Pavan Nikhilesh Add enqueue/dequeue operations that use struct rte_dma_op to communicate with the dma device. These operations need to be enabled at dma device configuration time by setting the flag rte_dma_conf::enable_enq_deq if the device supports RTE_DMA_CAPA_OPS_ENQ_DEQ capability. The enqueue dequeue operations are not compatible with rte_dma_copy, rte_dma_copy_sg, rte_dma_fill, rte_dma_submit, rte_dma_completed, rte_dma_completed_status range of APIs. Signed-off-by: Pavan Nikhilesh Change-Id: I6587b19608264a3511ea4dd3cf7b865cc5cac441 --- lib/dmadev/rte_dmadev.c | 18 lib/dmadev/rte_dmadev.h | 145 +++ lib/dmadev/rte_dmadev_core.h | 10 ++ lib/dmadev/rte_dmadev_trace_fp.h | 20 lib/dmadev/rte_dmadev_trace_points.c | 6 ++ 5 files changed, 199 insertions(+) diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c index 8bb7824aa1..4c108ef26e 100644 --- a/lib/dmadev/rte_dmadev.c +++ b/lib/dmadev/rte_dmadev.c @@ -921,6 +921,22 @@ dummy_burst_capacity(__rte_unused const void *dev_private, return 0; } +static uint16_t +dummy_enqueue(__rte_unused void *dev_private, __rte_unused uint16_t vchan, + __rte_unused struct rte_dma_op **ops, __rte_unused uint16_t nb_ops) +{ + RTE_DMA_LOG(ERR, "Enqueue not configured or not supported."); + return 0; +} + +static uint16_t +dummy_dequeue(__rte_unused void *dev_private, __rte_unused uint16_t vchan, + __rte_unused struct rte_dma_op **ops, __rte_unused uint16_t nb_ops) +{ + RTE_DMA_LOG(ERR, "Enqueue not configured or not supported."); + return 0; +} + static void dma_fp_object_dummy(struct rte_dma_fp_object *obj) { @@ -932,6 +948,8 @@ dma_fp_object_dummy(struct rte_dma_fp_object *obj) obj->completed= dummy_completed; obj->completed_status = dummy_completed_status; obj->burst_capacity = dummy_burst_capacity; + obj->enqueue = dummy_enqueue; + obj->dequeue = dummy_dequeue; } static int diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h index 2f9304a9db..e11bff64d8 100644 --- a/lib/dmadev/rte_dmadev.h +++ b/lib/dmadev/rte_dmadev.h @@ -265,6 +265,11 @@ int16_t rte_dma_next_dev(int16_t start_dev_id); * known from 'nb_priorities' field in struct rte_dma_info. */ #define RTE_DMA_CAPA_PRI_POLICY_SP RTE_BIT64(8) +/** Support enqueue and dequeue operations. + * + * @see struct rte_dma_op + */ +#define RTE_DMA_CAPA_OPS_ENQ_DEQRTE_BIT64(9) /** Support copy operation. * This capability start with index of 32, so that it could leave gap between @@ -351,6 +356,15 @@ struct rte_dma_conf { * Lowest value indicates higher priority and vice-versa. */ uint16_t priority; + /** Indicates whether to use enqueue dequeue operations using rte_dma_op. +* false-default mode, true-enqueue, dequeue mode. +* This value can be set to true only when ENQ_DEQ_OPS capability is +* supported. When enabled, only calls to `rte_dma_enqueue_ops` and +* `rte_dma_dequeue_ops` are valid. +* +* @see RTE_DMA_CAPA_OPS_ENQ_DEQ +*/ + bool enable_enq_deq; }; /** @@ -791,6 +805,63 @@ struct rte_dma_sge { uint32_t length; /**< The DMA operation length. */ }; +/** + * A structure used to hold event based DMA operation entry. All the information + * required for a DMA transfer shall be populated in "struct rte_dma_op" + * instance. + */ +struct rte_dma_op { + uint64_t flags; + /**< Flags related to the operation. +* @see RTE_DMA_OP_FLAG_* +*/ + struct rte_mempool *op_mp; + /**< Mempool from which op is allocated. */ + enum rte_dma_status_code status; + /**< Status code for this operation. */ + uint32_t rsvd; + /**< Reserved for future use. */ + uint64_t impl_opaque[2]; + /**< Implementation-specific opaque data. +* An dma device implementation use this field to hold +* implementation specific values to share between dequeue and enqueue +* operations. +* The application should not modify this field. +*/ + uint64_t user_meta; + /**< Memory to store user specific metadata. +* The dma device implementation should not modify this area. +*/ + uint64_t event_meta; + /**< Event metadata of DMA completion event. +* Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND is not +* supported in OP_NEW mode. +* @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_NEW +* @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND +* +* Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD is not +* supported in OP_FWD mode. +* @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD +* @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNA
[PATCH RFC 4/4] dma/cnxk: implement enqueue dequeue ops
From: Pavan Nikhilesh Implement DMA enqueue/dequeue operations when application enables it via configuration. Signed-off-by: Pavan Nikhilesh Change-Id: I57883ce5d358bf23a9d940ed513d0dc762227dcc --- drivers/dma/cnxk/cnxk_dmadev.c| 25 +- drivers/dma/cnxk/cnxk_dmadev.h| 7 ++ drivers/dma/cnxk/cnxk_dmadev_fp.c | 140 ++ 3 files changed, 171 insertions(+), 1 deletion(-) diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c index 60b3d28d65..18a4914013 100644 --- a/drivers/dma/cnxk/cnxk_dmadev.c +++ b/drivers/dma/cnxk/cnxk_dmadev.c @@ -19,7 +19,7 @@ cnxk_dmadev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_inf dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_MEM_TO_DEV | RTE_DMA_CAPA_DEV_TO_MEM | RTE_DMA_CAPA_DEV_TO_DEV | RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG | -RTE_DMA_CAPA_M2D_AUTO_FREE; +RTE_DMA_CAPA_M2D_AUTO_FREE | RTE_DMA_CAPA_OPS_ENQ_DEQ; if (roc_feature_dpi_has_priority()) { dev_info->dev_capa |= RTE_DMA_CAPA_PRI_POLICY_SP; dev_info->nb_priorities = CN10K_DPI_MAX_PRI; @@ -114,6 +114,21 @@ cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf, if (roc_feature_dpi_has_priority()) dpivf->rdpi.priority = conf->priority; + if (conf->enable_enq_deq) { + dev->fp_obj->copy = NULL; + dev->fp_obj->fill = NULL; + dev->fp_obj->submit = NULL; + dev->fp_obj->copy_sg = NULL; + dev->fp_obj->completed = NULL; + dev->fp_obj->completed_status = NULL; + + dev->fp_obj->enqueue = cnxk_dma_ops_enqueue; + dev->fp_obj->dequeue = cnxk_dma_ops_dequeue; + + if (roc_model_is_cn10k()) + dev->fp_obj->enqueue = cn10k_dma_ops_enqueue; + } + return 0; } @@ -270,6 +285,14 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, return -ENOMEM; } + size = (max_desc * sizeof(struct rte_dma_op *)); + dpi_conf->c_desc.ops = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); + if (dpi_conf->c_desc.ops == NULL) { + plt_err("Failed to allocate for ops array"); + rte_free(dpi_conf->c_desc.compl_ptr); + return -ENOMEM; + } + for (i = 0; i < max_desc; i++) dpi_conf->c_desc.compl_ptr[i * CNXK_DPI_COMPL_OFFSET] = CNXK_DPI_REQ_CDATA; diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h index 39fd6afbe9..2615cb5b73 100644 --- a/drivers/dma/cnxk/cnxk_dmadev.h +++ b/drivers/dma/cnxk/cnxk_dmadev.h @@ -93,6 +93,7 @@ struct cnxk_dpi_cdesc_data_s { uint16_t head; uint16_t tail; uint8_t *compl_ptr; + struct rte_dma_op **ops; }; struct cnxk_dpi_conf { @@ -132,5 +133,11 @@ int cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iov int cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge *src, const struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, uint64_t flags); +uint16_t cnxk_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops, + uint16_t nb_ops); +uint16_t cn10k_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops, + uint16_t nb_ops); +uint16_t cnxk_dma_ops_dequeue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops, + uint16_t nb_ops); #endif diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c index 340c7601d7..ca9ae7cd3f 100644 --- a/drivers/dma/cnxk/cnxk_dmadev_fp.c +++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c @@ -675,3 +675,143 @@ cnxk_dma_adapter_dequeue(uintptr_t get_work1) return (uintptr_t)op; } + +uint16_t +cnxk_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops) +{ + struct cnxk_dpi_vf_s *dpivf = dev_private; + struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan]; + const struct rte_dma_sge *fptr, *lptr; + uint16_t src, dst, nwords = 0; + struct rte_dma_op *op; + uint16_t space, i; + uint8_t *comp_ptr; + uint64_t hdr[4]; + int rc; + + space = (dpi_conf->c_desc.max_cnt + 1) - + ((dpi_conf->c_desc.tail - dpi_conf->c_desc.head) & dpi_conf->c_desc.max_cnt); + space = RTE_MIN(space, nb_ops); + + for (i = 0; i < space; i++) { + op = ops[i]; + comp_ptr = + &dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail * CNXK_DPI_COMPL_OFFSET]; + dpi_conf->c_desc.ops[dpi_conf->c_desc.tail] = op; + CNXK_DP
[PATCH v2] net/virtio: add virtio hash report feature
This patch adds virtio hash report feature, which is supported in packet queue mode with scalar version Signed-off-by: Kommula Shiva Shankar --- drivers/net/virtio/virtio.h | 2 ++ drivers/net/virtio/virtio_ethdev.c| 20 - drivers/net/virtio/virtio_ethdev.h| 1 + drivers/net/virtio/virtio_rxtx.c | 30 +++ .../net/virtio/virtio_user/virtio_user_dev.c | 1 + drivers/net/virtio/virtqueue.h| 21 + 6 files changed, 74 insertions(+), 1 deletion(-) diff --git a/drivers/net/virtio/virtio.h b/drivers/net/virtio/virtio.h index ef5827c5f5..c2a0fd477c 100644 --- a/drivers/net/virtio/virtio.h +++ b/drivers/net/virtio/virtio.h @@ -30,6 +30,7 @@ #define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the network */ #define VIRTIO_NET_F_MQ22 /* Device supports Receive Flow Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ +#define VIRTIO_NET_F_HASH_REPORT 57/* Supports hash report */ #define VIRTIO_NET_F_RSS 60 /* RSS supported */ /* @@ -187,6 +188,7 @@ struct virtio_hw { uint8_t started; uint8_t weak_barriers; uint8_t vlan_strip; + uint8_t has_hash_report; bool rx_ol_scatter; uint8_t has_tx_offload; uint8_t has_rx_offload; diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 70d4839def..caacbce57a 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1796,7 +1796,9 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; /* Setting up rx_header size for the device */ - if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || + if (virtio_with_feature(hw, VIRTIO_NET_F_HASH_REPORT)) + hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_hash_report); + else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || virtio_with_feature(hw, VIRTIO_F_VERSION_1) || virtio_with_packed_queue(hw)) hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); @@ -2181,6 +2183,10 @@ virtio_dev_configure(struct rte_eth_dev *dev) (1ULL << VIRTIO_NET_F_GUEST_TSO4) | (1ULL << VIRTIO_NET_F_GUEST_TSO6); + if (rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) + req_features |= + (1ULL << VIRTIO_NET_F_HASH_REPORT); + if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) req_features |= (1ULL << VIRTIO_NET_F_CSUM); @@ -2233,6 +2239,9 @@ virtio_dev_configure(struct rte_eth_dev *dev) if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) hw->vlan_strip = 1; + if (rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) + hw->has_hash_report = 1; + hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER); if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) && @@ -2285,6 +2294,12 @@ virtio_dev_configure(struct rte_eth_dev *dev) "disabled packed ring vectorized rx for TCP_LRO enabled"); hw->use_vec_rx = 0; } + + if (rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) { + PMD_DRV_LOG(INFO, + "disabled packed ring vectorized rx for RSS_HASH enabled"); + hw->use_vec_rx = 0; + } } } else { if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) { @@ -2669,6 +2684,9 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->flow_type_rss_offloads = 0; } + if (host_features & (1ULL << VIRTIO_NET_F_HASH_REPORT)) + dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) { /* * According to 2.7 Packed Virtqueues, diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 7be1c9acd0..7aa771fd2b 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -46,6 +46,7 @@ 1u << VIRTIO_NET_F_CSUM |\ 1u << VIRTIO_NET_F_HOST_TSO4 |\ 1u << VIRTIO_NET_F_HOST_TSO6 |\ +1ULL << VIRTIO_NET_F_HASH_REPORT |\ 1ULL << VIRTIO_NET_F_RSS) extern const struct eth_dev_ops virtio_user_secondary_eth_dev_ops; diff --git a/drivers/net/virtio/virt
[PATCH] net/virtio: add virtio hash report feature
This patch adds virtio hash report feature, which is supported in packet queue mode with scalar version Signed-off-by: Kommula Shiva Shankar --- drivers/net/virtio/virtio.h | 2 ++ drivers/net/virtio/virtio_ethdev.c| 20 - drivers/net/virtio/virtio_ethdev.h| 1 + drivers/net/virtio/virtio_rxtx.c | 30 +++ .../net/virtio/virtio_user/virtio_user_dev.c | 1 + drivers/net/virtio/virtqueue.h| 21 + 6 files changed, 74 insertions(+), 1 deletion(-) diff --git a/drivers/net/virtio/virtio.h b/drivers/net/virtio/virtio.h index ef5827c5f5..c2a0fd477c 100644 --- a/drivers/net/virtio/virtio.h +++ b/drivers/net/virtio/virtio.h @@ -30,6 +30,7 @@ #define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the network */ #define VIRTIO_NET_F_MQ22 /* Device supports Receive Flow Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ +#define VIRTIO_NET_F_HASH_REPORT 57/* Supports hash report */ #define VIRTIO_NET_F_RSS 60 /* RSS supported */ /* @@ -187,6 +188,7 @@ struct virtio_hw { uint8_t started; uint8_t weak_barriers; uint8_t vlan_strip; + uint8_t has_hash_report; bool rx_ol_scatter; uint8_t has_tx_offload; uint8_t has_rx_offload; diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 70d4839def..caacbce57a 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1796,7 +1796,9 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; /* Setting up rx_header size for the device */ - if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || + if (virtio_with_feature(hw, VIRTIO_NET_F_HASH_REPORT)) + hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_hash_report); + else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || virtio_with_feature(hw, VIRTIO_F_VERSION_1) || virtio_with_packed_queue(hw)) hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); @@ -2181,6 +2183,10 @@ virtio_dev_configure(struct rte_eth_dev *dev) (1ULL << VIRTIO_NET_F_GUEST_TSO4) | (1ULL << VIRTIO_NET_F_GUEST_TSO6); + if (rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) + req_features |= + (1ULL << VIRTIO_NET_F_HASH_REPORT); + if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) req_features |= (1ULL << VIRTIO_NET_F_CSUM); @@ -2233,6 +2239,9 @@ virtio_dev_configure(struct rte_eth_dev *dev) if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) hw->vlan_strip = 1; + if (rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) + hw->has_hash_report = 1; + hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER); if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) && @@ -2285,6 +2294,12 @@ virtio_dev_configure(struct rte_eth_dev *dev) "disabled packed ring vectorized rx for TCP_LRO enabled"); hw->use_vec_rx = 0; } + + if (rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) { + PMD_DRV_LOG(INFO, + "disabled packed ring vectorized rx for RSS_HASH enabled"); + hw->use_vec_rx = 0; + } } } else { if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) { @@ -2669,6 +2684,9 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->flow_type_rss_offloads = 0; } + if (host_features & (1ULL << VIRTIO_NET_F_HASH_REPORT)) + dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) { /* * According to 2.7 Packed Virtqueues, diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 7be1c9acd0..7aa771fd2b 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -46,6 +46,7 @@ 1u << VIRTIO_NET_F_CSUM |\ 1u << VIRTIO_NET_F_HOST_TSO4 |\ 1u << VIRTIO_NET_F_HOST_TSO6 |\ +1ULL << VIRTIO_NET_F_HASH_REPORT |\ 1ULL << VIRTIO_NET_F_RSS) extern const struct eth_dev_ops virtio_user_secondary_eth_dev_ops; diff --git a/drivers/net/virtio/virt
[PATCH] doc: update virtio prog guide with new feature list
This patch adds newly added virtio hash report feature to prog guide. Signed-off-by: Kommula Shiva Shankar --- doc/guides/nics/virtio.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst index c22ce56a02..0615abfaaf 100644 --- a/doc/guides/nics/virtio.rst +++ b/doc/guides/nics/virtio.rst @@ -56,6 +56,7 @@ In this release, the virtio PMD provides the basic functionality of packet recep command line. Note that, mac/vlan filter is best effort: unwanted packets could still arrive. * "RTE_PKTMBUF_HEADROOM" should be defined +no less than "sizeof(struct virtio_net_hdr_hash_report)", which is 20 bytes when using hash report or no less than "sizeof(struct virtio_net_hdr_mrg_rxbuf)", which is 12 bytes when mergeable or "VIRTIO_F_VERSION_1" is set. no less than "sizeof(struct virtio_net_hdr)", which is 10 bytes, when using non-mergeable. @@ -68,6 +69,8 @@ In this release, the virtio PMD provides the basic functionality of packet recep * Virtio supports software vlan stripping and inserting. +* Virtio supports hash report feature in packed queue mode. + * Virtio supports using port IO to get PCI resource when UIO module is not available. * Virtio supports RSS Rx mode with 40B configurable hash key length, 128 -- 2.43.0