In the IOV scenario, multi RX queues can be assigned
to one VF. If the dropping is not enabled, when no
descriptors are available for one queue, this queue
can block others.
Fixes: 00e30184daa0 ("ixgbe: add PF support")
CC: [email protected]
Suggested-by: Liang-min Wang <[email protected]>
Signed-off-by: Wenzhuo Lu <[email protected]>
---
drivers/net/ixgbe/ixgbe_pf.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 4715045..d88832e 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -397,15 +397,27 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
uint32_t reg_offset, vf_shift;
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ uint8_t nb_q_per_pool;
+ int i;
vf_shift = vf & VFRE_MASK;
reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
- /* enable transmit and receive for vf */
+ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* enable all queue drop for IOV */
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
+ IXGBE_WRITE_FLUSH(hw);
+ reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
+ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
--
1.9.3