This patch configures VFs with random mac if no MAC is provided by the PF/bulletin. This also adds required bulletin APIs by PF-PMD driver to communicate LINK properties/changes to the VFs through bulletin update mechanism.
With these changes, VF-PMD instance is able to run fastpath over PF-PMD driver instance. Signed-off-by: Manish Chopra <mani...@marvell.com> Signed-off-by: Igor Russkikh <irussk...@marvell.com> Signed-off-by: Rasesh Mody <rm...@marvell.com> --- drivers/net/qede/qede_ethdev.c | 34 ++++++++++++++++++++- drivers/net/qede/qede_main.c | 7 ++++- drivers/net/qede/qede_sriov.c | 55 ++++++++++++++++++++++++++++++++++ drivers/net/qede/qede_sriov.h | 1 + 4 files changed, 95 insertions(+), 2 deletions(-) diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 210a3b10f..e785f3fb0 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -2479,6 +2479,24 @@ static void qede_update_pf_params(struct ecore_dev *edev) qed_ops->common->update_pf_params(edev, &pf_params); } +static void qede_generate_random_mac_addr(struct rte_ether_addr *mac_addr) +{ + uint64_t random; + + /* Set Organizationally Unique Identifier (OUI) prefix. */ + mac_addr->addr_bytes[0] = 0x00; + mac_addr->addr_bytes[1] = 0x09; + mac_addr->addr_bytes[2] = 0xC0; + + /* Force indication of locally assigned MAC address. */ + mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; + + /* Generate the last 3 bytes of the MAC address with a random number. */ + random = rte_rand(); + + memcpy(&mac_addr->addr_bytes[3], &random, 3); +} + static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) { struct rte_pci_device *pci_dev; @@ -2491,7 +2509,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) uint8_t bulletin_change; uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; uint8_t is_mac_forced; - bool is_mac_exist; + bool is_mac_exist = false; /* Fix up ecore debug level */ uint32_t dp_module = ~0 & ~ECORE_MSG_HW; uint8_t dp_level = ECORE_LEVEL_VERBOSE; @@ -2669,6 +2687,20 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) DP_ERR(edev, "No VF macaddr assigned\n"); } } + + /* If MAC doesn't exist from PF, generate random one */ + if (!is_mac_exist) { + struct rte_ether_addr *mac_addr; + + mac_addr = (struct rte_ether_addr *)&vf_mac; + qede_generate_random_mac_addr(mac_addr); + + rte_ether_addr_copy(mac_addr, + ð_dev->data->mac_addrs[0]); + + rte_ether_addr_copy(ð_dev->data->mac_addrs[0], + &adapter->primary_mac); + } } eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c index 0afacc064..805a95e3c 100644 --- a/drivers/net/qede/qede_main.c +++ b/drivers/net/qede/qede_main.c @@ -651,10 +651,15 @@ void qed_link_update(struct ecore_hwfn *hwfn) struct ecore_dev *edev = hwfn->p_dev; struct qede_dev *qdev = (struct qede_dev *)edev; struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; + int rc; + + rc = qede_link_update(dev, 0); + qed_inform_vf_link_state(hwfn); - if (!qede_link_update(dev, 0)) + if (!rc) { _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + } } static int qed_drain(struct ecore_dev *edev) diff --git a/drivers/net/qede/qede_sriov.c b/drivers/net/qede/qede_sriov.c index 6d620dde8..93f7a2a55 100644 --- a/drivers/net/qede/qede_sriov.c +++ b/drivers/net/qede/qede_sriov.c @@ -126,6 +126,28 @@ static void qed_handle_vf_msg(struct ecore_hwfn *hwfn) ecore_ptt_release(hwfn, ptt); } +static void qed_handle_bulletin_post(struct ecore_hwfn *hwfn) +{ + struct ecore_ptt *ptt; + int i; + + ptt = ecore_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, true, "PTT acquire failed\n"); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + return; + } + + /* TODO - at the moment update bulletin board of all VFs. + * if this proves to costly, we can mark VFs that need their + * bulletins updated. + */ + ecore_for_each_vf(hwfn, i) + ecore_iov_post_vf_bulletin(hwfn, i, ptt); + + ecore_ptt_release(hwfn, ptt); +} + void qed_iov_pf_task(void *arg) { struct ecore_hwfn *p_hwfn = arg; @@ -134,6 +156,13 @@ void qed_iov_pf_task(void *arg) OSAL_CLEAR_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags); qed_handle_vf_msg(p_hwfn); } + + if (OSAL_GET_BIT(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, + &p_hwfn->iov_task_flags)) { + OSAL_CLEAR_BIT(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, + &p_hwfn->iov_task_flags); + qed_handle_bulletin_post(p_hwfn); + } } int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag) @@ -144,3 +173,29 @@ int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag) OSAL_SET_BIT(flag, &p_hwfn->iov_task_flags); return rte_eal_alarm_set(1, qed_iov_pf_task, p_hwfn); } + +void qed_inform_vf_link_state(struct ecore_hwfn *hwfn) +{ + struct ecore_hwfn *lead_hwfn = ECORE_LEADING_HWFN(hwfn->p_dev); + struct ecore_mcp_link_capabilities caps; + struct ecore_mcp_link_params params; + struct ecore_mcp_link_state link; + int i; + + if (!hwfn->pf_iov_info) + return; + + rte_memcpy(¶ms, ecore_mcp_get_link_params(lead_hwfn), + sizeof(params)); + rte_memcpy(&link, ecore_mcp_get_link_state(lead_hwfn), sizeof(link)); + rte_memcpy(&caps, ecore_mcp_get_link_capabilities(lead_hwfn), + sizeof(caps)); + + /* Update bulletin of all future possible VFs with link configuration */ + for (i = 0; i < hwfn->p_dev->p_iov_info->total_vfs; i++) { + ecore_iov_set_link(hwfn, i, + ¶ms, &link, &caps); + } + + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); +} diff --git a/drivers/net/qede/qede_sriov.h b/drivers/net/qede/qede_sriov.h index 8b7fa7daa..e58ecc2a5 100644 --- a/drivers/net/qede/qede_sriov.h +++ b/drivers/net/qede/qede_sriov.h @@ -17,5 +17,6 @@ enum qed_iov_wq_flag { QED_IOV_WQ_DB_REC_HANDLER, }; +void qed_inform_vf_link_state(struct ecore_hwfn *hwfn); int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag); void qed_iov_pf_task(void *arg); -- 2.17.1