Signed-off-by: Danny Zhou <danny.zhou at intel.com>
---
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 203 ++++++++++++++++++++++++++++++++++++
 1 file changed, 203 insertions(+)

diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c 
b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 3fc3738..1d694c5 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -173,6 +173,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
                        uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
@@ -217,6 +218,11 @@ static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
                uint8_t rule_id);

+static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t 
queue_id);
+static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t 
queue_id);
+static void ixgbe_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 
msix_vector);
+static void ixgbe_configure_msix(struct  ixgbe_hw *hw);
+
 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
                uint16_t queue_idx, uint16_t tx_rate);
 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
@@ -332,6 +338,8 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .tx_queue_start       = ixgbe_dev_tx_queue_start,
        .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
+       .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
        .rx_queue_count       = ixgbe_dev_rx_queue_count,
        .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
@@ -1481,6 +1489,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        /* configure PF module if SRIOV enabled */
        ixgbe_pf_host_configure(dev);

+       /* confiugre msix for  sleep until  rx interrupt */
+       ixgbe_configure_msix(hw);
+
        /* initialize transmission unit */
        ixgbe_dev_tx_init(dev);

@@ -1550,6 +1561,10 @@ skip_link_setup:
        if (dev->data->dev_conf.intr_conf.lsc != 0)
                ixgbe_dev_lsc_interrupt_setup(dev);

+       /* check if rxq interrupt is enabled */
+       if (dev->data->dev_conf.intr_conf.rxq != 0)
+               ixgbe_dev_rxq_interrupt_setup(dev);
+
        /* resume enabled intr since hw reset */
        ixgbe_enable_intr(dev);

@@ -2212,6 +2227,28 @@ ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
        return 0;
 }

+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       intr->mask |= IXGBE_EICR_RTX_QUEUE;
+
+       return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -3502,6 +3539,172 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, 
uint8_t rule_id)
        return 0;
 }

+static int
+ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       u32 mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       if (queue_id < 16) {
+               ixgbe_disable_intr(hw);
+               intr->mask |= (1 << queue_id);
+               ixgbe_enable_intr(dev);
+       } else if (queue_id < 32) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+               mask &= (1 << queue_id);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+       } else if (queue_id < 64) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+               mask &= (1 << (queue_id - 32));
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+       }
+
+       return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       u32 mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       if (queue_id < 16) {
+               ixgbe_disable_intr(hw);
+               intr->mask &= ~(1 << queue_id);
+               ixgbe_enable_intr(dev);
+       } else if (queue_id < 32) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+               mask &= ~(1 << queue_id);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+       } else if (queue_id < 64) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+               mask &= ~(1 << (queue_id - 32));
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+       }
+
+       return 0;
+}
+
+/**
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @hw: pointer to ixgbe_hw struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ */
+static void
+ixgbe_set_ivar(struct ixgbe_hw *hw, s8 direction,
+                          u8 queue, u8 msix_vector)
+{
+       u32 ivar, index;
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+               if (direction == -1)
+                       direction = 0;
+               index = (((direction * 64) + queue) >> 2) & 0x1F;
+               ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+               ivar &= ~(0xFF << (8 * (queue & 0x3)));
+               ivar |= (msix_vector << (8 * (queue & 0x3)));
+               IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               if (direction == -1) {
+                       /* other causes */
+                       msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+                       index = ((queue & 1) * 8);
+                       ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+                       ivar &= ~(0xFF << index);
+                       ivar |= (msix_vector << index);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
+                       break;
+               } else {
+                       /* tx or rx causes */
+                       msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+                       index = ((16 * (queue & 1)) + (8 * direction));
+                       ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
+                       ivar &= ~(0xFF << index);
+                       ivar |= (msix_vector << index);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
+                       break;
+               }
+       default:
+               break;
+       }
+}
+
+/**
+ * ixgbe_configure_msix - Configure MSI-X hardware
+ * @hw: board private structure
+ * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ */
+static void
+ixgbe_configure_msix(struct ixgbe_hw *hw)
+{
+       int queue_id;
+       u32 mask;
+       u32 gpie;
+
+       /* set GPIE for in MSI-x mode */
+       gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+       gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+                  IXGBE_GPIE_OCD;
+       gpie |= IXGBE_GPIE_EIAME;
+       /*
+        * use EIAM to auto-mask when MSI-X interrupt is asserted
+        * this saves a register write for every interrupt
+        */
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+       default:
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+               break;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+       /*
+       * Populate the IVAR table and set the ITR values to the
+       * corresponding register.
+       */
+       for (queue_id = 0; queue_id < VFIO_MAX_QUEUE_ID; queue_id++)
+               ixgbe_set_ivar(hw, 0, queue_id, queue_id);
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ixgbe_set_ivar(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
+                              VFIO_MAX_QUEUE_ID);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ixgbe_set_ivar(hw, -1, 1, 32);
+               break;
+       default:
+               break;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_EITR(queue_id), 1950);
+
+       /* set up to autoclear timer, and the vectors */
+       mask = IXGBE_EIMS_ENABLE_MASK;
+       mask &= ~(IXGBE_EIMS_OTHER |
+                 IXGBE_EIMS_MAILBOX |
+                 IXGBE_EIMS_LSC);
+
+       IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+}
+
 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
        uint16_t queue_idx, uint16_t tx_rate)
 {
-- 
1.8.1.4

Reply via email to