From: Junfeng Guo <junfeng....@intel.com>

Add dev ops dev_start, dev_stop and link_update.

Signed-off-by: Beilei Xing <beilei.x...@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun...@intel.com>
Signed-off-by: Junfeng Guo <junfeng....@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c | 55 ++++++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_rxtx.c   | 20 +++++++++++++
 2 files changed, 75 insertions(+)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index fb5cd1b111..621bf9aad5 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -29,6 +29,22 @@ static const char * const idpf_valid_args[] = {
        NULL
 };
 
+static int
+idpf_dev_link_update(struct rte_eth_dev *dev,
+                    __rte_unused int wait_to_complete)
+{
+       struct rte_eth_link new_link;
+
+       memset(&new_link, 0, sizeof(new_link));
+
+       new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+       new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+       new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+                                 RTE_ETH_LINK_SPEED_FIXED);
+
+       return rte_eth_linkstatus_set(dev, &new_link);
+}
+
 static int
 idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -267,6 +283,42 @@ idpf_dev_configure(struct rte_eth_dev *dev)
        return 0;
 }
 
+static int
+idpf_dev_start(struct rte_eth_dev *dev)
+{
+       struct idpf_vport *vport = dev->data->dev_private;
+       int ret;
+
+       if (dev->data->mtu > vport->max_mtu) {
+               PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
+               return -EINVAL;
+       }
+
+       vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
+
+       /* TODO: start queues */
+
+       ret = idpf_vc_ena_dis_vport(vport, true);
+       if (ret != 0) {
+               PMD_DRV_LOG(ERR, "Failed to enable vport");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
+idpf_dev_stop(struct rte_eth_dev *dev)
+{
+       struct idpf_vport *vport = dev->data->dev_private;
+
+       idpf_vc_ena_dis_vport(vport, false);
+
+       /* TODO: stop queues */
+
+       return 0;
+}
+
 static int
 idpf_dev_close(struct rte_eth_dev *dev)
 {
@@ -656,6 +708,9 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
        .rx_queue_setup                 = idpf_rx_queue_setup,
        .tx_queue_setup                 = idpf_tx_queue_setup,
        .dev_infos_get                  = idpf_dev_info_get,
+       .dev_start                      = idpf_dev_start,
+       .dev_stop                       = idpf_dev_stop,
+       .link_update                    = idpf_dev_link_update,
 };
 
 static uint16_t
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 25dd5d85d5..3528d2f2c7 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -334,6 +334,11 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
                return -EINVAL;
 
+       if (rx_conf->rx_deferred_start) {
+               PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+               return -EINVAL;
+       }
+
        /* Setup Rx description queue */
        rxq = rte_zmalloc_socket("idpf rxq",
                                 sizeof(struct idpf_rx_queue),
@@ -465,6 +470,11 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
                return -EINVAL;
 
+       if (rx_conf->rx_deferred_start) {
+               PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+               return -EINVAL;
+       }
+
        /* Setup Rx description queue */
        rxq = rte_zmalloc_socket("idpf rxq",
                                 sizeof(struct idpf_rx_queue),
@@ -569,6 +579,11 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
                return -EINVAL;
 
+       if (tx_conf->tx_deferred_start) {
+               PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+               return -EINVAL;
+       }
+
        /* Allocate the TX queue data structure. */
        txq = rte_zmalloc_socket("idpf split txq",
                                 sizeof(struct idpf_tx_queue),
@@ -691,6 +706,11 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
                return -EINVAL;
 
+       if (tx_conf->tx_deferred_start) {
+               PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+               return -EINVAL;
+       }
+
        /* Allocate the TX queue data structure. */
        txq = rte_zmalloc_socket("idpf txq",
                                 sizeof(struct idpf_tx_queue),
-- 
2.26.2

Reply via email to