This patches fixes problem of reusing index of outmost loop in nested loops. This bug will lead to failure when starting a multi queue virtio device: rx queues (except from the first one) cannot be started, expecially their vq_ring cannot be initialized, so that when invoking rx func on these queues, segment fault happens.
Fixes: a900472aedef ("virtio: split virtio Rx/Tx queue") Signed-off-by: Jianfeng Tan <jianfeng.tan at intel.com> --- drivers/net/virtio/virtio_rxtx.c | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 2e7205b..b96d0cb 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -331,7 +331,7 @@ virtio_dev_rxtx_start(struct rte_eth_dev *dev) * - Allocate blank mbufs for the each rx descriptor * */ - int i; + int i, j; PMD_INIT_FUNC_TRACE(); @@ -352,15 +352,18 @@ virtio_dev_rxtx_start(struct rte_eth_dev *dev) error = ENOSPC; #ifdef RTE_MACHINE_CPUFLAG_SSSE3 - if (use_simple_rxtx) - for (i = 0; i < vq->vq_nentries; i++) { - vq->vq_ring.avail->ring[i] = i; - vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; + if (use_simple_rxtx) { + uint16_t k; + + for (k = 0; k < vq->vq_nentries; k++) { + vq->vq_ring.avail->ring[k] = k; + vq->vq_ring.desc[k].flags = VRING_DESC_F_WRITE; } + } #endif memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); - for (i = 0; i < RTE_PMD_VIRTIO_RX_MAX_BURST; i++) - vq->sw_ring[vq->vq_nentries + i] = &rxvq->fake_mbuf; + for (j = 0; j < RTE_PMD_VIRTIO_RX_MAX_BURST; j++) + vq->sw_ring[vq->vq_nentries + j] = &rxvq->fake_mbuf; while (!virtqueue_full(vq)) { m = rte_mbuf_raw_alloc(rxvq->mpool); @@ -399,20 +402,21 @@ virtio_dev_rxtx_start(struct rte_eth_dev *dev) #ifdef RTE_MACHINE_CPUFLAG_SSSE3 if (use_simple_rxtx) { int mid_idx = vq->vq_nentries >> 1; - for (i = 0; i < mid_idx; i++) { - vq->vq_ring.avail->ring[i] = i + mid_idx; - vq->vq_ring.desc[i + mid_idx].next = i; - vq->vq_ring.desc[i + mid_idx].addr = + + for (j = 0; j < mid_idx; j++) { + vq->vq_ring.avail->ring[j] = j + mid_idx; + vq->vq_ring.desc[j + mid_idx].next = j; + vq->vq_ring.desc[j + mid_idx].addr = txvq->virtio_net_hdr_mem + offsetof(struct virtio_tx_region, tx_hdr); - vq->vq_ring.desc[i + mid_idx].len = + vq->vq_ring.desc[j + mid_idx].len = vq->hw->vtnet_hdr_size; - vq->vq_ring.desc[i + mid_idx].flags = + vq->vq_ring.desc[j + mid_idx].flags = VRING_DESC_F_NEXT; - vq->vq_ring.desc[i].flags = 0; + vq->vq_ring.desc[j].flags = 0; } - for (i = mid_idx; i < vq->vq_nentries; i++) - vq->vq_ring.avail->ring[i] = i; + for (j = mid_idx; j < vq->vq_nentries; j++) + vq->vq_ring.avail->ring[j] = j; } #endif VIRTQUEUE_DUMP(vq); -- 2.1.4