This patch enables rx queue interrupt for ixgbevf with below changes:
1) Configure ixgbevf rx queue interrupts
2) Initialize ixgbevf devices in L3fwd-power appropriately
3) Fix VFIO interrupt vector settings

Signed-off-by: Yong Liu <yong.liu at intel.com>
Signed-off-by: Danny Zhou <danny.zhou at intel.com>
---
 examples/l3fwd-power/main.c                        |  46 +++++--
 lib/librte_eal/linuxapp/eal/eal_interrupts.c       |  83 ++++++-----
 .../linuxapp/eal/include/exec-env/rte_interrupts.h |   1 +
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c                | 153 +++++++++++++++++++++
 lib/librte_pmd_ixgbe/ixgbe_ethdev.h                |   6 +
 5 files changed, 233 insertions(+), 56 deletions(-)

diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 71f1d90..3262db2 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -237,7 +237,6 @@ static struct rte_eth_conf port_conf = {
                .mq_mode = ETH_MQ_TX_NONE,
        },
        .intr_conf = {
-               .lsc = 1,
                .rxq = 1, /**< rxq interrupt feature enabled */
        },
 };
@@ -413,15 +412,19 @@ power_timer_cb(__attribute__((unused)) struct rte_timer 
*tim,
        /**
         * check whether need to scale down frequency a step if it sleep a lot.
         */
-       if (sleep_time_ratio >= SCALING_DOWN_TIME_RATIO_THRESHOLD)
-               rte_power_freq_down(lcore_id);
+       if (sleep_time_ratio >= SCALING_DOWN_TIME_RATIO_THRESHOLD) {
+               if (rte_power_freq_down)
+                       rte_power_freq_down(lcore_id);
+       }
        else if ( (unsigned)(stats[lcore_id].nb_rx_processed /
-               stats[lcore_id].nb_iteration_looped) < MAX_PKT_BURST)
+               stats[lcore_id].nb_iteration_looped) < MAX_PKT_BURST) {
                /**
                 * scale down a step if average packet per iteration less
                 * than expectation.
                 */
-               rte_power_freq_down(lcore_id);
+               if (rte_power_freq_down)
+                       rte_power_freq_down(lcore_id);
+       }

        /**
         * initialize another timer according to current frequency to ensure
@@ -946,10 +949,14 @@ start_rx:
                                                rx_queue->freq_up_hint;
                        }

-                       if (lcore_scaleup_hint == FREQ_HIGHEST)
-                               rte_power_freq_max(lcore_id);
-                       else if (lcore_scaleup_hint == FREQ_HIGHER)
-                               rte_power_freq_up(lcore_id);
+                       if (lcore_scaleup_hint == FREQ_HIGHEST) {
+                               if (rte_power_freq_max)
+                                       rte_power_freq_max(lcore_id);
+                       }
+                       else if (lcore_scaleup_hint == FREQ_HIGHER) {
+                               if (rte_power_freq_up)
+                                       rte_power_freq_up(lcore_id);
+                       }
                } else {
                        /**
                         * All Rx queues empty in recent consecutive polls,
@@ -1546,6 +1553,7 @@ main(int argc, char **argv)
        unsigned lcore_id;
        uint64_t hz;
        uint32_t n_tx_queue, nb_lcores;
+       uint32_t dev_rxq_num, dev_txq_num;
        uint8_t portid, nb_rx_queue, queue, socketid;

        /* catch SIGINT and restore cpufreq governor to ondemand */
@@ -1595,10 +1603,18 @@ main(int argc, char **argv)
                printf("Initializing port %d ... ", portid );
                fflush(stdout);

+               rte_eth_dev_info_get(portid, &dev_info);
+               dev_rxq_num = dev_info.max_rx_queues;
+               dev_txq_num = dev_info.max_tx_queues;
+
                nb_rx_queue = get_port_n_rx_queues(portid);
+               if (nb_rx_queue > dev_rxq_num)
+                       rte_exit(EXIT_FAILURE, "Cannot configure not existed 
rxq: "
+                                       "port=%d\n", portid);
+
                n_tx_queue = nb_lcores;
-               if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
-                       n_tx_queue = MAX_TX_QUEUE_PER_PORT;
+               if (n_tx_queue > dev_txq_num)
+                       n_tx_queue = dev_txq_num;
                printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
                        nb_rx_queue, (unsigned)n_tx_queue );
                ret = rte_eth_dev_configure(portid, nb_rx_queue,
@@ -1622,6 +1638,9 @@ main(int argc, char **argv)
                        if (rte_lcore_is_enabled(lcore_id) == 0)
                                continue;

+                       if (queueid >= dev_txq_num)
+                               continue;
+
                        if (numa_on)
                                socketid = \
                                (uint8_t)rte_lcore_to_socket_id(lcore_id);
@@ -1656,8 +1675,9 @@ main(int argc, char **argv)
                /* init power management library */
                ret = rte_power_init(lcore_id);
                if (ret)
-                       rte_exit(EXIT_FAILURE, "Power management library "
-                               "initialization failed on core%u\n", lcore_id);
+                       rte_log(RTE_LOG_ERR, RTE_LOGTYPE_POWER,
+                               "Power management library initialization "
+                               "failed on core%u", lcore_id);

                /* init timer structures for each enabled lcore */
                rte_timer_init(&power_timers[lcore_id]);
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c 
b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index 1be4ba7..e4de20e 100644
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -223,7 +223,7 @@ vfio_disable_intx(struct rte_intr_handle *intr_handle) {
 /* enable MSI-X interrupts */
 static int
 vfio_enable_msi(struct rte_intr_handle *intr_handle) {
-       int len, ret;
+       int len, ret, max_intr;
        char irq_set_buf[IRQ_SET_BUF_LEN];
        struct vfio_irq_set *irq_set;
        int *fd_ptr;
@@ -232,12 +232,19 @@ vfio_enable_msi(struct rte_intr_handle *intr_handle) {

        irq_set = (struct vfio_irq_set *) irq_set_buf;
        irq_set->argsz = len;
-       irq_set->count = 1;
+       if ((!intr_handle->max_intr) ||
+               (intr_handle->max_intr > VFIO_MAX_QUEUE_ID))
+               max_intr = VFIO_MAX_QUEUE_ID + 1;
+       else
+               max_intr = intr_handle->max_intr;
+
+       irq_set->count = max_intr;
        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | 
VFIO_IRQ_SET_ACTION_TRIGGER;
        irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
        irq_set->start = 0;
        fd_ptr = (int *) &irq_set->data;
-       *fd_ptr = intr_handle->fd;
+       memcpy(fd_ptr, intr_handle->queue_fd, sizeof(intr_handle->queue_fd));
+       fd_ptr[max_intr - 1] = intr_handle->fd;

        ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);

@@ -246,23 +253,6 @@ vfio_enable_msi(struct rte_intr_handle *intr_handle) {
                                                intr_handle->fd);
                return -1;
        }
-
-       /* manually trigger interrupt to enable it */
-       memset(irq_set, 0, len);
-       len = sizeof(struct vfio_irq_set);
-       irq_set->argsz = len;
-       irq_set->count = 1;
-       irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
-       irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
-       irq_set->start = 0;
-
-       ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
-
-       if (ret) {
-               RTE_LOG(ERR, EAL, "Error triggering MSI interrupts for fd %d\n",
-                                               intr_handle->fd);
-               return -1;
-       }
        return 0;
 }

@@ -294,7 +284,7 @@ vfio_disable_msi(struct rte_intr_handle *intr_handle) {
 /* enable MSI-X interrupts */
 static int
 vfio_enable_msix(struct rte_intr_handle *intr_handle) {
-       int len, ret;
+       int len, ret, max_intr;
        char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
        struct vfio_irq_set *irq_set;
        int *fd_ptr;
@@ -303,13 +293,19 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) {

        irq_set = (struct vfio_irq_set *) irq_set_buf;
        irq_set->argsz = len;
-       irq_set->count = VFIO_MAX_QUEUE_ID + 1;
+       if ((!intr_handle->max_intr) ||
+               (intr_handle->max_intr > VFIO_MAX_QUEUE_ID))
+               max_intr = VFIO_MAX_QUEUE_ID + 1;
+       else
+               max_intr = intr_handle->max_intr;
+
+       irq_set->count = max_intr;
        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | 
VFIO_IRQ_SET_ACTION_TRIGGER;
        irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
        irq_set->start = 0;
        fd_ptr = (int *) &irq_set->data;
        memcpy(fd_ptr, intr_handle->queue_fd, sizeof(intr_handle->queue_fd));
-       fd_ptr[VFIO_MAX_QUEUE_ID] = intr_handle->fd;
+       fd_ptr[max_intr - 1] = intr_handle->fd;

        ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);

@@ -319,23 +315,6 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) {
                return -1;
        }

-       /* manually trigger interrupt to enable it */
-       memset(irq_set, 0, len);
-       len = sizeof(struct vfio_irq_set);
-       irq_set->argsz = len;
-       irq_set->count = 1;
-       irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
-       irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
-       irq_set->start = VFIO_MAX_QUEUE_ID;
-
-       ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
-
-       if (ret) {
-               RTE_LOG(ERR, EAL, "Error triggering MSI-X interrupts for fd 
%d\n",
-                                               intr_handle->fd);
-               return -1;
-       }
-
        return 0;
 }

@@ -913,10 +892,28 @@ rte_eal_wait_rx_intr(uint8_t port_id, uint8_t queue_id)
        rte_spinlock_lock(&intr_lock);

        ev.events = EPOLLIN | EPOLLPRI;
-       ev.data.fd = intr_handle.queue_fd[queue_id];
+       switch (intr_handle.type) {
+       case RTE_INTR_HANDLE_UIO:
+               ev.data.fd = intr_handle.fd;
+               break;
+#ifdef VFIO_PRESENT
+        case RTE_INTR_HANDLE_VFIO_MSIX:
+               ev.data.fd = intr_handle.queue_fd[queue_id];
+                break;
+        case RTE_INTR_HANDLE_VFIO_MSI:
+               ev.data.fd = intr_handle.queue_fd[queue_id];
+                break;
+        case RTE_INTR_HANDLE_VFIO_LEGACY:
+               ev.data.fd = intr_handle.queue_fd[queue_id];
+                break;
+#endif
+       default:
+               break;
+                       close(pfd);
+                       return -1;
+       }

-       if (epoll_ctl(pfd, EPOLL_CTL_ADD,
-                               intr_handle.queue_fd[queue_id], &ev) < 0){
+       if (epoll_ctl(pfd, EPOLL_CTL_ADD, ev.data.fd, &ev) < 0){
                rte_panic("Error adding fd %d epoll_ctl, %s\n",
                                intr_handle.queue_fd[queue_id], 
strerror(errno));
        } else
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h 
b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
index 83b717c..c6982cf 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
@@ -54,6 +54,7 @@ enum rte_intr_handle_type {
 struct rte_intr_handle {
        int vfio_dev_fd;                 /**< VFIO device file descriptor */
        int fd;                          /**< file descriptor */
+       int max_intr;                    /**< max interrupt requested */
        int queue_fd[VFIO_MAX_QUEUE_ID]; /**< rx and tx queue interrupt file 
descriptor */
        enum rte_intr_handle_type type;  /**< handle type */
 };
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c 
b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 1d694c5..368e4db 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -187,11 +187,14 @@ static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct 
ixgbe_dcb_config *dcb_conf
 /* For Virtual Function support */
 static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
                struct rte_eth_dev *eth_dev);
+static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
                struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -199,8 +202,15 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
                uint16_t queue, int on);
+static void ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 
msix_vector);
 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
+               void *param);
+static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t 
queue_id);
+static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t 
queue_id);
+static void ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction, u8 queue, u8 
msix_vector);
+static void ixgbevf_configure_msix(struct  ixgbe_hw *hw);

 /* For Eth VMDQ APIs support */
 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
@@ -416,8 +426,11 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .vlan_offload_set     = ixgbevf_vlan_offload_set,
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
+       .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
        .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
+       .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
        .mac_addr_add         = ixgbevf_add_mac_addr,
        .mac_addr_remove      = ixgbevf_remove_mac_addr,
 };
@@ -912,6 +925,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct 
eth_driver *eth_drv,
                        eth_dev->data->port_id, pci_dev->id.vendor_id,
                        pci_dev->id.device_id);

+       /* set max interrupt vfio request */
+       pci_dev->intr_handle.max_intr = hw->mac.max_rx_queues + 
IXGBE_MAX_OTHER_INTR;
+
        rte_intr_callback_register(&(pci_dev->intr_handle),
                ixgbe_dev_interrupt_handler, (void *)eth_dev);

@@ -1088,6 +1104,14 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct 
eth_driver *eth_drv,
                        return (-EIO);
        }

+       /* set max interrupt vfio request */
+       pci_dev->intr_handle.max_intr = hw->mac.max_rx_queues + 
IXGBEVF_MAX_OTHER_INTR;
+
+       rte_intr_callback_register(&(pci_dev->intr_handle),
+               ixgbevf_dev_interrupt_handler, (void *)eth_dev);
+
+       rte_intr_enable(&(pci_dev->intr_handle));
+
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
                     eth_dev->data->port_id, pci_dev->id.vendor_id,
                     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
@@ -2286,6 +2310,30 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
        return 0;
 }

+static int
+ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+       uint32_t eicr;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       /* clear all cause mask */
+       ixgbevf_intr_disable(hw);
+
+       /* read-on-clear nic registers here */
+       eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
+       PMD_DRV_LOG(INFO, "eicr %x", eicr);
+
+       intr->flags = 0;
+
+       /* set flag for async link update */
+       if (eicr & IXGBE_EICR_LSC)
+               intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+
+       return 0;
+}
+
 /**
  * It gets and then prints the link status.
  *
@@ -2381,6 +2429,18 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
        return 0;
 }

+static int
+ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_DRV_LOG(DEBUG, "enable intr immediately");
+       ixgbevf_intr_enable(hw);
+       rte_intr_enable(&(dev->pci_dev->intr_handle));
+       return 0;
+}
+
 /**
  * Interrupt handler which shall be registered for alarm callback for delayed
  * handling specific interrupt to wait for the stable nic state. As the
@@ -2442,6 +2502,15 @@ ixgbe_dev_interrupt_handler(__rte_unused struct 
rte_intr_handle *handle,
        ixgbe_dev_interrupt_action(dev);
 }

+static void
+ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+                                                       void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+       ixgbevf_dev_interrupt_get_status(dev);
+       ixgbevf_dev_interrupt_action(dev);
+}
+
 static int
 ixgbe_dev_led_on(struct rte_eth_dev *dev)
 {
@@ -2940,6 +3009,19 @@ ixgbevf_intr_disable(struct ixgbe_hw *hw)
        IXGBE_WRITE_FLUSH(hw);
 }

+static void
+ixgbevf_intr_enable(struct ixgbe_hw *hw)
+{
+       PMD_INIT_FUNC_TRACE();
+
+       /* VF enable interrupt autoclean */
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
+
+       IXGBE_WRITE_FLUSH(hw);
+}
+
 static int
 ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
@@ -3002,6 +3084,11 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)

        ixgbevf_dev_rxtx_start(dev);

+       ixgbevf_configure_msix(hw);
+
+       /* Re-enable interrupt for VF */
+       ixgbevf_intr_enable(hw);
+
        return 0;
 }

@@ -3539,6 +3626,33 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t 
rule_id)
        return 0;
 }

+
+static int
+ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       uint32_t mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+       mask |= ( 1 << queue_id);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+       return 0;
+}
+
+static int
+ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       uint32_t mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+       mask &= ~( 1 << queue_id);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+       return 0;
+}
+
 static int
 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
@@ -3591,6 +3705,29 @@ ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 
uint16_t queue_id)
        return 0;
 }

+static void
+ixgbevf_set_ivar(struct ixgbe_hw *hw, s8 direction,
+                       u8 queue, u8 msix_vector)
+{
+       u32 ivar, index;
+       if (direction == -1) {
+               /* other causes */
+               msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+               ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+               ivar &= ~0xFF;
+               ivar |= msix_vector;
+               IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+       } else {
+               /* tx or tx cause */
+               msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+               index = ((16 * (queue & 1)) + (8 * direction));
+               ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+               ivar &= ~(0xFF << index);
+               ivar |= (msix_vector << index);
+               IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
+       }
+}
+
 /**
  * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
  * @hw: pointer to ixgbe_hw struct
@@ -3640,6 +3777,22 @@ ixgbe_set_ivar(struct ixgbe_hw *hw, s8 direction,
        }
 }

+
+static void
+ixgbevf_configure_msix(struct ixgbe_hw *hw)
+{
+       u32 queue_idx, vector_idx;
+       /* Configure all RX queues of VF */
+       for (vector_idx = 0; vector_idx < IXGBE_VF_MAXMSIVECTOR; vector_idx ++)
+       {
+               for (queue_idx = 0; queue_idx < (hw->mac.max_rx_queues - 1); 
queue_idx++)
+                       ixgbevf_set_ivar(hw, 0, queue_idx, vector_idx);
+       }
+
+       /* Configure VF Rx queue ivar */
+       ixgbevf_set_ivar(hw, -1, 1, vector_idx);
+}
+
 /**
  * ixgbe_configure_msix - Configure MSI-X hardware
  * @hw: board private structure
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h 
b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
index 730098d..b0221cf 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
@@ -98,6 +98,11 @@
 #define IXGBE_5TUPLE_MAX_PRI            7
 #define IXGBE_5TUPLE_MIN_PRI            1

+#define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf interrupt enable mask 
*/
+#define IXGBE_VF_MAXMSIVECTOR                  1
+/* maximum other interrupts besides rx&tx*/
+#define IXGBE_MAX_OTHER_INTR           1
+#define IXGBEVF_MAX_OTHER_INTR         1
 /*
  * Information about the fdir mode.
  */
@@ -247,6 +252,7 @@ uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
                uint16_t rx_queue_id);

 int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);

 int ixgbe_dev_rx_init(struct rte_eth_dev *dev);

-- 
1.8.1.4

Reply via email to