Internal Rx interrupt/LSC/adminq share the same source, that will casue lots of CPU cycles wasted during receiving packets. So remove the PF interrupt handler just like i40e VF.
Signed-off-by: Beilei Xing <[email protected]> --- drivers/net/i40e/i40e_ethdev.c | 64 +++++++++++------------------------------- 1 file changed, 17 insertions(+), 47 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index a340540..618b8d4 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -289,7 +289,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw, uint64_t *offset, uint64_t *stat); static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue); -static void i40e_dev_interrupt_handler(void *param); +static void i40e_dev_alarm_handler(void *param); static int i40e_res_pool_init(struct i40e_res_pool_info *pool, uint32_t base, uint32_t num); static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool); @@ -1189,11 +1189,12 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw, return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details); } +#define I40E_ALARM_INTERVAL 50000 /* us */ + static int eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) { struct rte_pci_device *pci_dev; - struct rte_intr_handle *intr_handle; struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *vsi; @@ -1218,7 +1219,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) } i40e_set_default_ptype_table(dev); pci_dev = RTE_ETH_DEV_TO_PCI(dev); - intr_handle = &pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); @@ -1450,16 +1450,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* initialize pf host driver to setup SRIOV resource if applicable */ i40e_pf_host_init(dev); - /* register callback func to eal lib */ - rte_intr_callback_register(intr_handle, - i40e_dev_interrupt_handler, dev); - /* configure and enable device interrupt */ i40e_pf_config_irq0(hw, TRUE); i40e_pf_enable_irq0(hw); - /* enable uio intr after callback register */ - rte_intr_enable(intr_handle); + rte_eal_alarm_set(I40E_ALARM_INTERVAL, + i40e_dev_alarm_handler, dev); /* By default disable flexible payload in global configuration */ if (!pf->support_multi_driver) @@ -1604,14 +1600,11 @@ static int eth_i40e_dev_uninit(struct rte_eth_dev *dev) { struct i40e_pf *pf; - struct rte_pci_device *pci_dev; - struct rte_intr_handle *intr_handle; struct i40e_hw *hw; struct i40e_filter_control_settings settings; struct rte_flow *p_flow; int ret; uint8_t aq_fail = 0; - int retries = 0; PMD_INIT_FUNC_TRACE(); @@ -1620,8 +1613,6 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pci_dev = RTE_ETH_DEV_TO_PCI(dev); - intr_handle = &pci_dev->intr_handle; ret = rte_eth_switch_domain_free(pf->switch_domain_id); if (ret) @@ -1654,23 +1645,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) rte_free(dev->data->mac_addrs); dev->data->mac_addrs = NULL; - /* disable uio intr before callback unregister */ - rte_intr_disable(intr_handle); - - /* unregister callback func to eal lib */ - do { - ret = rte_intr_callback_unregister(intr_handle, - i40e_dev_interrupt_handler, dev); - if (ret >= 0) { - break; - } else if (ret != -EAGAIN) { - PMD_INIT_LOG(ERR, - "intr callback unregister failed: %d", - ret); - return ret; - } - i40e_msec_delay(500); - } while (retries++ < 5); + rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev); i40e_rm_ethtype_filter_list(pf); i40e_rm_tunnel_filter_list(pf); @@ -2161,7 +2136,8 @@ i40e_dev_start(struct rte_eth_dev *dev) return -EINVAL; } - rte_intr_disable(intr_handle); + if (dev->data->dev_conf.intr_conf.rxq != 0) + rte_intr_disable(intr_handle); if ((rte_intr_cap_multiple(intr_handle) || !RTE_ETH_DEV_SRIOV(dev).active) && @@ -2259,9 +2235,8 @@ i40e_dev_start(struct rte_eth_dev *dev) } if (!rte_intr_allow_others(intr_handle)) { - rte_intr_callback_unregister(intr_handle, - i40e_dev_interrupt_handler, - (void *)dev); + rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev); + /* configure and enable device interrupt */ i40e_pf_config_irq0(hw, FALSE); i40e_pf_enable_irq0(hw); @@ -2281,8 +2256,8 @@ i40e_dev_start(struct rte_eth_dev *dev) i40e_dev_link_update(dev, 0); } - /* enable uio intr after callback register */ - rte_intr_enable(intr_handle); + if (dev->data->dev_conf.intr_conf.rxq != 0) + rte_intr_enable(intr_handle); i40e_filter_restore(pf); @@ -2334,12 +2309,6 @@ i40e_dev_stop(struct rte_eth_dev *dev) /* Set link down */ i40e_dev_set_link_down(dev); - if (!rte_intr_allow_others(intr_handle)) - /* resume to the default handler */ - rte_intr_callback_register(intr_handle, - i40e_dev_interrupt_handler, - (void *)dev); - /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); if (intr_handle->intr_vec) { @@ -2392,7 +2361,8 @@ i40e_dev_close(struct rte_eth_dev *dev) /* Disable interrupt */ i40e_pf_disable_irq0(hw); - rte_intr_disable(intr_handle); + if (dev->data->dev_conf.intr_conf.rxq != 0) + rte_intr_disable(intr_handle); i40e_fdir_teardown(pf); @@ -6488,7 +6458,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) * void */ static void -i40e_dev_interrupt_handler(void *param) +i40e_dev_alarm_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -6532,7 +6502,8 @@ i40e_dev_interrupt_handler(void *param) done: /* Enable interrupt */ i40e_pf_enable_irq0(hw); - rte_intr_enable(dev->intr_handle); + rte_eal_alarm_set(I40E_ALARM_INTERVAL, + i40e_dev_alarm_handler, dev); } int @@ -11419,7 +11390,6 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); - rte_intr_enable(&pci_dev->intr_handle); return 0; } -- 2.5.5

