Support rx queue interrupt.

Signed-off-by: Jiawen Wu <jiawe...@trustnetic.com>
---
 doc/guides/nics/features/txgbe.ini |  1 +
 doc/guides/nics/txgbe.rst          |  1 +
 drivers/net/txgbe/txgbe_ethdev.c   | 43 ++++++++++++++++++++++++++++++
 3 files changed, 45 insertions(+)

diff --git a/doc/guides/nics/features/txgbe.ini 
b/doc/guides/nics/features/txgbe.ini
index c8cd58ce2..b2f5f832c 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -7,6 +7,7 @@
 Speed capabilities   = Y
 Link status          = Y
 Link status event    = Y
+Rx interrupt         = Y
 Queue start/stop     = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
diff --git a/doc/guides/nics/txgbe.rst b/doc/guides/nics/txgbe.rst
index 101765a6c..1bf4b6b6f 100644
--- a/doc/guides/nics/txgbe.rst
+++ b/doc/guides/nics/txgbe.rst
@@ -17,6 +17,7 @@ Features
 - TSO offload
 - Jumbo frames
 - Link state information
+- Interrupt mode for RX
 - Scattered and gather for TX and RX
 - LRO
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 3ef99f31e..9151542ef 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -1558,6 +1558,47 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, 
uint8_t on)
        return 0;
 }
 
+static int
+txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       uint32_t mask;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       if (queue_id < 32) {
+               mask = rd32(hw, TXGBE_IMS(0));
+               mask &= (1 << queue_id);
+               wr32(hw, TXGBE_IMS(0), mask);
+       } else if (queue_id < 64) {
+               mask = rd32(hw, TXGBE_IMS(1));
+               mask &= (1 << (queue_id - 32));
+               wr32(hw, TXGBE_IMS(1), mask);
+       }
+       rte_intr_enable(intr_handle);
+
+       return 0;
+}
+
+static int
+txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       uint32_t mask;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       if (queue_id < 32) {
+               mask = rd32(hw, TXGBE_IMS(0));
+               mask &= ~(1 << queue_id);
+               wr32(hw, TXGBE_IMS(0), mask);
+       } else if (queue_id < 64) {
+               mask = rd32(hw, TXGBE_IMS(1));
+               mask &= ~(1 << (queue_id - 32));
+               wr32(hw, TXGBE_IMS(1), mask);
+       }
+
+       return 0;
+}
+
 /**
  * set the IVAR registers, mapping interrupt causes to vectors
  * @param hw
@@ -1691,6 +1732,8 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .tx_queue_start             = txgbe_dev_tx_queue_start,
        .tx_queue_stop              = txgbe_dev_tx_queue_stop,
        .rx_queue_setup             = txgbe_dev_rx_queue_setup,
+       .rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
        .rx_queue_release           = txgbe_dev_rx_queue_release,
        .tx_queue_setup             = txgbe_dev_tx_queue_setup,
        .tx_queue_release           = txgbe_dev_tx_queue_release,
-- 
2.18.4



Reply via email to