ethdev:
 - make changes so drivers can start using new API for rx_pkt_burst().
 - provide helper functions/macros.
 - remove rx_pkt_burst() from 'struct rte_eth_dev'.
drivers/net:
 - adjust to new rx_burst API.

Signed-off-by: Konstantin Ananyev <konstantin.anan...@intel.com>
---
 app/test/virtual_pmd.c                   |  15 ++-
 drivers/net/i40e/i40e_ethdev.c           |   2 +-
 drivers/net/i40e/i40e_ethdev_vf.c        |   3 +-
 drivers/net/i40e/i40e_rxtx.c             | 161 ++++++++++++++++-------
 drivers/net/i40e/i40e_rxtx.h             |  36 +++--
 drivers/net/i40e/i40e_rxtx_vec_avx2.c    |   7 +-
 drivers/net/i40e/i40e_rxtx_vec_avx512.c  |   8 +-
 drivers/net/i40e/i40e_rxtx_vec_sse.c     |   8 +-
 drivers/net/i40e/i40e_vf_representor.c   |   5 +-
 drivers/net/ice/ice_dcf_ethdev.c         |   5 +-
 drivers/net/ice/ice_dcf_vf_representor.c |   5 +-
 drivers/net/ice/ice_ethdev.c             |   2 +-
 drivers/net/ice/ice_rxtx.c               | 160 +++++++++++++++-------
 drivers/net/ice/ice_rxtx.h               |  44 +++----
 drivers/net/ice/ice_rxtx_vec_avx2.c      |  16 ++-
 drivers/net/ice/ice_rxtx_vec_avx512.c    |  16 ++-
 drivers/net/ice/ice_rxtx_vec_sse.c       |   8 +-
 lib/ethdev/ethdev_driver.h               | 120 +++++++++++++++++
 lib/ethdev/rte_ethdev.c                  |  23 +++-
 lib/ethdev/rte_ethdev.h                  |  39 +-----
 lib/ethdev/rte_ethdev_core.h             |   9 +-
 lib/ethdev/version.map                   |   5 +
 22 files changed, 483 insertions(+), 214 deletions(-)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 7036f401ed..734ef32c97 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -348,6 +348,8 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused,
        return rx_count;
 }
 
+static _RTE_ETH_RX_DEF(virtual_ethdev_rx_burst_success)
+
 static uint16_t
 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
                                                         struct rte_mbuf **bufs 
__rte_unused,
@@ -356,6 +358,8 @@ virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
        return 0;
 }
 
+static _RTE_ETH_RX_DEF(virtual_ethdev_rx_burst_fail)
+
 static uint16_t
 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
                uint16_t nb_pkts)
@@ -425,12 +429,12 @@ virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf 
**bufs,
 void
 virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
 {
-       struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
-
        if (success)
-               vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
+               rte_eth_set_rx_burst(port_id,
+                       _RTE_ETH_FUNC(virtual_ethdev_rx_burst_success));
        else
-               vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
+               rte_eth_set_rx_burst(port_id,
+                       _RTE_ETH_FUNC(virtual_ethdev_rx_burst_fail));
 }
 
 
@@ -599,7 +603,8 @@ virtual_ethdev_create(const char *name, struct 
rte_ether_addr *mac_addr,
        pci_dev->device.driver = &pci_drv->driver;
        eth_dev->device = &pci_dev->device;
 
-       eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
+       rte_eth_set_rx_burst(eth_dev->data->port_id,
+                       _RTE_ETH_FUNC(virtual_ethdev_rx_burst_success));
        eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
 
        rte_eth_dev_probing_finish(eth_dev);
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7b230e2ed1..4753af126d 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1437,7 +1437,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void 
*init_params __rte_unused)
        dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
        dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
        dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
-       dev->rx_pkt_burst = i40e_recv_pkts;
+       rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts));
        dev->tx_pkt_burst = i40e_xmit_pkts;
        dev->tx_pkt_prepare = i40e_prep_pkts;
 
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c 
b/drivers/net/i40e/i40e_ethdev_vf.c
index 0cfe13b7b2..e08e97276a 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1576,7 +1576,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
        eth_dev->rx_descriptor_done   = i40e_dev_rx_descriptor_done;
        eth_dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
        eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
-       eth_dev->rx_pkt_burst = &i40e_recv_pkts;
+       rte_eth_set_rx_burst(eth_dev->data->port_id,
+               _RTE_ETH_FUNC(i40e_recv_pkts));
        eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
 
        /*
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 026cda948c..f2d0d35538 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -700,7 +700,9 @@ i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
 }
 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
 
-uint16_t
+static _RTE_ETH_RX_DEF(i40e_recv_pkts_bulk_alloc)
+
+static uint16_t
 i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
        struct i40e_rx_queue *rxq;
@@ -822,7 +824,9 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
        return nb_rx;
 }
 
-uint16_t
+_RTE_ETH_RX_DEF(i40e_recv_pkts)
+
+static uint16_t
 i40e_recv_scattered_pkts(void *rx_queue,
                         struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
@@ -1000,6 +1004,8 @@ i40e_recv_scattered_pkts(void *rx_queue,
        return nb_rx;
 }
 
+_RTE_ETH_RX_DEF(i40e_recv_scattered_pkts)
+
 /* Check if the context descriptor is needed for TX offloading */
 static inline uint16_t
 i40e_calc_context_desc(uint64_t flags)
@@ -1843,19 +1849,21 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
                RTE_PTYPE_UNKNOWN
        };
 
-       if (dev->rx_pkt_burst == i40e_recv_pkts ||
+       rte_eth_rx_burst_t rx_burst = rte_eth_get_rx_burst(dev->data->port_id);
+
+       if (rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts) ||
 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
-           dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
+           rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_bulk_alloc) ||
 #endif
-           dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
-           dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
-           dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+           rx_burst == _RTE_ETH_FUNC(i40e_recv_scattered_pkts) ||
+           rx_burst == _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec) ||
+           rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec) ||
 #ifdef CC_AVX512_SUPPORT
-           dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx512 ||
-           dev->rx_pkt_burst == i40e_recv_pkts_vec_avx512 ||
+           rx_burst == _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec_avx512) ||
+           rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx512) ||
 #endif
-           dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
-           dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2)
+           rx_burst == _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec_avx2) ||
+           rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx2))
                return ptypes;
        return NULL;
 }
@@ -3265,6 +3273,8 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
        struct i40e_adapter *ad =
                I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        uint16_t rx_using_sse, i;
+       rte_eth_rx_burst_t rx_burst;
+
        /* In order to allow Vector Rx there are a few configuration
         * conditions to be met and Rx Bulk Allocation should be allowed.
         */
@@ -3309,17 +3319,22 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
                                PMD_DRV_LOG(NOTICE,
                                        "Using AVX512 Vector Scattered Rx (port 
%d).",
                                        dev->data->port_id);
-                               dev->rx_pkt_burst =
-                                       i40e_recv_scattered_pkts_vec_avx512;
+                               rte_eth_set_rx_burst(dev->data->port_id,
+                                       _RTE_ETH_FUNC(
+                                       i40e_recv_scattered_pkts_vec_avx512));
+                                       
 #endif
                        } else {
                                PMD_INIT_LOG(DEBUG,
                                        "Using %sVector Scattered Rx (port 
%d).",
                                        ad->rx_use_avx2 ? "avx2 " : "",
                                        dev->data->port_id);
-                               dev->rx_pkt_burst = ad->rx_use_avx2 ?
-                                       i40e_recv_scattered_pkts_vec_avx2 :
-                                       i40e_recv_scattered_pkts_vec;
+                               rte_eth_set_rx_burst(dev->data->port_id,
+                                        ad->rx_use_avx2 ?
+                                       _RTE_ETH_FUNC(
+                                        i40e_recv_scattered_pkts_vec_avx2) :
+                                        _RTE_ETH_FUNC(
+                                                i40e_recv_scattered_pkts_vec));
                        }
                } else {
                        if (ad->rx_use_avx512) {
@@ -3327,17 +3342,19 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
                                PMD_DRV_LOG(NOTICE,
                                        "Using AVX512 Vector Rx (port %d).",
                                        dev->data->port_id);
-                               dev->rx_pkt_burst =
-                                       i40e_recv_pkts_vec_avx512;
+                               rte_eth_set_rx_burst(dev->data->port_id,
+                                       _RTE_ETH_FUNC(
+                                               i40e_recv_pkts_vec_avx512));
 #endif
                        } else {
                                PMD_INIT_LOG(DEBUG,
                                        "Using %sVector Rx (port %d).",
                                        ad->rx_use_avx2 ? "avx2 " : "",
                                        dev->data->port_id);
-                               dev->rx_pkt_burst = ad->rx_use_avx2 ?
-                                       i40e_recv_pkts_vec_avx2 :
-                                       i40e_recv_pkts_vec;
+                               rte_eth_set_rx_burst(dev->data->port_id,
+                                       ad->rx_use_avx2 ?
+                                       _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx2) :
+                                       _RTE_ETH_FUNC(i40e_recv_pkts_vec));
                        }
                }
 #else /* RTE_ARCH_X86 */
@@ -3345,11 +3362,13 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
                        PMD_INIT_LOG(DEBUG,
                                     "Using Vector Scattered Rx (port %d).",
                                     dev->data->port_id);
-                       dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+                       rte_eth_set_rx_burst(dev->data->port_id,
+                               _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec));
                } else {
                        PMD_INIT_LOG(DEBUG, "Using Vector Rx (port %d).",
                                     dev->data->port_id);
-                       dev->rx_pkt_burst = i40e_recv_pkts_vec;
+                       rte_eth_set_rx_burst(dev->data->port_id,
+                               _RTE_ETH_FUNC(i40e_recv_pkts_vec));
                }
 #endif /* RTE_ARCH_X86 */
        } else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
@@ -3358,27 +3377,34 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
                                    "will be used on port=%d.",
                             dev->data->port_id);
 
-               dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+               rte_eth_set_rx_burst(dev->data->port_id,
+                       _RTE_ETH_FUNC(i40e_recv_pkts_bulk_alloc));
        } else {
                /* Simple Rx Path. */
                PMD_INIT_LOG(DEBUG, "Simple Rx path will be used on port=%d.",
                             dev->data->port_id);
-               dev->rx_pkt_burst = dev->data->scattered_rx ?
-                                       i40e_recv_scattered_pkts :
-                                       i40e_recv_pkts;
+               rte_eth_set_rx_burst(dev->data->port_id,
+                       dev->data->scattered_rx ?
+                       _RTE_ETH_FUNC(i40e_recv_scattered_pkts) :
+                       _RTE_ETH_FUNC(i40e_recv_pkts));
        }
 
+       rx_burst = rte_eth_get_rx_burst(dev->data->port_id);
+
        /* Propagate information about RX function choice through all queues. */
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
                rx_using_sse =
-                       (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
-                        dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+                       (rx_burst == _RTE_ETH_FUNC(
+                                       i40e_recv_scattered_pkts_vec) ||
+                        rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec) ||
 #ifdef CC_AVX512_SUPPORT
-                        dev->rx_pkt_burst == 
i40e_recv_scattered_pkts_vec_avx512 ||
-                        dev->rx_pkt_burst == i40e_recv_pkts_vec_avx512 ||
+                        rx_burst == _RTE_ETH_FUNC(
+                                i40e_recv_scattered_pkts_vec_avx512) ||
+                        rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx512) ||
 #endif
-                        dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 
||
-                        dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2);
+                        rx_burst == _RTE_ETH_FUNC(
+                                       i40e_recv_scattered_pkts_vec_avx2) ||
+                        rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx2));
 
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
                        struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
@@ -3390,27 +3416,66 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
 }
 
 static const struct {
-       eth_rx_burst_t pkt_burst;
+       rte_eth_rx_burst_t pkt_burst;
        const char *info;
 } i40e_rx_burst_infos[] = {
-       { i40e_recv_scattered_pkts,          "Scalar Scattered" },
-       { i40e_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc" },
-       { i40e_recv_pkts,                    "Scalar" },
+       {
+               _RTE_ETH_FUNC(i40e_recv_scattered_pkts),
+               "Scalar Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(i40e_recv_pkts_bulk_alloc),
+               "Scalar Bulk Alloc",
+       },
+       {
+               _RTE_ETH_FUNC(i40e_recv_pkts),
+               "Scalar",
+       },
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
-       { i40e_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
-       { i40e_recv_pkts_vec_avx512,           "Vector AVX512" },
+       {
+               _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec_avx512),
+               "Vector AVX512 Scattered"
+       },
+       {
+               _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx512),
+               "Vector AVX512",
+       },
 #endif
-       { i40e_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
-       { i40e_recv_pkts_vec_avx2,           "Vector AVX2" },
-       { i40e_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
-       { i40e_recv_pkts_vec,                "Vector SSE" },
+       {
+               _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec_avx2),
+               "Vector AVX2 Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx2),
+               "Vector AVX2",
+       },
+       {
+               _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec),
+               "Vector SSE Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(i40e_recv_pkts_vec),
+               "Vector SSE",
+       },
 #elif defined(RTE_ARCH_ARM64)
-       { i40e_recv_scattered_pkts_vec,      "Vector Neon Scattered" },
-       { i40e_recv_pkts_vec,                "Vector Neon" },
+       {
+               _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec),
+               "Vector Neon Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(i40e_recv_pkts_vec),
+               "Vector Neon",
+       },
 #elif defined(RTE_ARCH_PPC_64)
-       { i40e_recv_scattered_pkts_vec,      "Vector AltiVec Scattered" },
-       { i40e_recv_pkts_vec,                "Vector AltiVec" },
+       {
+               _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec),
+               "Vector AltiVec Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(i40e_recv_pkts_vec,
+               "Vector AltiVec",
+       },
 #endif
 };
 
@@ -3418,7 +3483,7 @@ int
 i40e_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
                       struct rte_eth_burst_mode *mode)
 {
-       eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+       rte_eth_rx_burst_t pkt_burst = rte_eth_get_rx_burst(dev->data->port_id);
        int ret = -EINVAL;
        unsigned int i;
 
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 5ccf5773e8..beeeaae78d 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -199,12 +199,10 @@ int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                            const struct rte_eth_txconf *tx_conf);
 void i40e_dev_rx_queue_release(void *rxq);
 void i40e_dev_tx_queue_release(void *txq);
-uint16_t i40e_recv_pkts(void *rx_queue,
-                       struct rte_mbuf **rx_pkts,
-                       uint16_t nb_pkts);
-uint16_t i40e_recv_scattered_pkts(void *rx_queue,
-                                 struct rte_mbuf **rx_pkts,
-                                 uint16_t nb_pkts);
+
+_RTE_ETH_RX_PROTO(i40e_recv_pkts);
+_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts);
+
 uint16_t i40e_xmit_pkts(void *tx_queue,
                        struct rte_mbuf **tx_pkts,
                        uint16_t nb_pkts);
@@ -231,11 +229,9 @@ int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t 
offset);
 int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
 
-uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                           uint16_t nb_pkts);
-uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
-                                     struct rte_mbuf **rx_pkts,
-                                     uint16_t nb_pkts);
+_RTE_ETH_RX_PROTO(i40e_recv_pkts_vec);
+_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec);
+
 int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
 int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
 int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
@@ -248,19 +244,17 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
 void i40e_set_tx_function(struct rte_eth_dev *dev);
 void i40e_set_default_ptype_table(struct rte_eth_dev *dev);
 void i40e_set_default_pctype_table(struct rte_eth_dev *dev);
-uint16_t i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
-       uint16_t nb_pkts);
-uint16_t i40e_recv_scattered_pkts_vec_avx2(void *rx_queue,
-       struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+_RTE_ETH_RX_PROTO(i40e_recv_pkts_vec_avx2);
+_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec_avx2);
+
 uint16_t i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint16_t nb_pkts);
 int i40e_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
-uint16_t i40e_recv_pkts_vec_avx512(void *rx_queue,
-                                  struct rte_mbuf **rx_pkts,
-                                  uint16_t nb_pkts);
-uint16_t i40e_recv_scattered_pkts_vec_avx512(void *rx_queue,
-                                            struct rte_mbuf **rx_pkts,
-                                            uint16_t nb_pkts);
+
+_RTE_ETH_RX_PROTO(i40e_recv_pkts_vec_avx512);
+_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec_avx512);
+
 uint16_t i40e_xmit_pkts_vec_avx512(void *tx_queue,
                                   struct rte_mbuf **tx_pkts,
                                   uint16_t nb_pkts);
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c 
b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
index 3b9eef91a9..5c03d16644 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -628,13 +628,15 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
  */
-uint16_t
+static inline uint16_t
 i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
                   uint16_t nb_pkts)
 {
        return _recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
+_RTE_ETH_RX_DEF(i40e_recv_pkts_vec_avx2)
+
 /*
  * vPMD receive routine that reassembles single burst of 32 scattered packets
  * Notice:
@@ -682,7 +684,7 @@ i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct 
rte_mbuf **rx_pkts,
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
  */
-uint16_t
+static inline uint16_t
 i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
                             uint16_t nb_pkts)
 {
@@ -699,6 +701,7 @@ i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                                rx_pkts + retval, nb_pkts);
 }
 
+_RTE_ETH_RX_DEF(i40e_recv_scattered_pkts_vec_avx2)
 
 static inline void
 vtx1(volatile struct i40e_tx_desc *txdp,
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c 
b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index bd21d64223..96ff3d60c3 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -802,13 +802,15 @@ _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, 
struct rte_mbuf **rx_pkts,
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
  */
-uint16_t
+static inline uint16_t
 i40e_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
                          uint16_t nb_pkts)
 {
        return _recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
+_RTE_ETH_RX_DEF(i40e_recv_pkts_vec_avx512)
+
 /**
  * vPMD receive routine that reassembles single burst of 32 scattered packets
  * Notice:
@@ -857,7 +859,7 @@ i40e_recv_scattered_burst_vec_avx512(void *rx_queue,
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
  */
-uint16_t
+static inline uint16_t
 i40e_recv_scattered_pkts_vec_avx512(void *rx_queue,
                                    struct rte_mbuf **rx_pkts,
                                    uint16_t nb_pkts)
@@ -876,6 +878,8 @@ i40e_recv_scattered_pkts_vec_avx512(void *rx_queue,
                                rx_pkts + retval, nb_pkts);
 }
 
+_RTE_ETH_RX_DEF(i40e_recv_scattered_pkts_vec_avx512)
+
 static __rte_always_inline int
 i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
 {
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c 
b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index bfa5aff48d..24687984a7 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -598,13 +598,15 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
  *   numbers of DD bits
  */
-uint16_t
+static inline uint16_t
 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
                   uint16_t nb_pkts)
 {
        return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
+_RTE_ETH_RX_DEF(i40e_recv_pkts_vec)
+
 /**
  * vPMD receive routine that reassembles single burst of 32 scattered packets
  *
@@ -651,7 +653,7 @@ i40e_recv_scattered_burst_vec(void *rx_queue, struct 
rte_mbuf **rx_pkts,
 /**
  * vPMD receive routine that reassembles scattered packets.
  */
-uint16_t
+static inline uint16_t
 i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
                             uint16_t nb_pkts)
 {
@@ -674,6 +676,8 @@ i40e_recv_scattered_pkts_vec(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                                                      nb_pkts);
 }
 
+_RTE_ETH_RX_DEF(i40e_recv_scattered_pkts_vec)
+
 static inline void
 vtx1(volatile struct i40e_tx_desc *txdp,
                struct rte_mbuf *pkt, uint64_t flags)
diff --git a/drivers/net/i40e/i40e_vf_representor.c 
b/drivers/net/i40e/i40e_vf_representor.c
index 0481b55381..9d32a5c85d 100644
--- a/drivers/net/i40e/i40e_vf_representor.c
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -466,6 +466,8 @@ i40e_vf_representor_rx_burst(__rte_unused void *rx_queue,
        return 0;
 }
 
+static _RTE_ETH_RX_DEF(i40e_vf_representor_rx_burst)
+
 static uint16_t
 i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
        __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
@@ -501,7 +503,8 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void 
*init_params)
        /* No data-path, but need stub Rx/Tx functions to avoid crash
         * when testing with the likes of testpmd.
         */
-       ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst;
+       rte_eth_set_rx_burst(ethdev->data->port_id,
+                       _RTE_ETH_FUNC(i40e_vf_representor_rx_burst));
        ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
 
        vf = &pf->vfs[representor->vf_id];
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index cab7c4da87..58a4204621 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -40,6 +40,8 @@ ice_dcf_recv_pkts(__rte_unused void *rx_queue,
        return 0;
 }
 
+static _RTE_ETH_RX_DEF(ice_dcf_recv_pkts)
+
 static uint16_t
 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
                  __rte_unused struct rte_mbuf **bufs,
@@ -1039,7 +1041,8 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
        struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
 
        eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
-       eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
+       rte_eth_set_rx_burst(eth_dev->data->port_id,
+                       _RTE_ETH_FUNC(ice_dcf_recv_pkts));
        eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
diff --git a/drivers/net/ice/ice_dcf_vf_representor.c 
b/drivers/net/ice/ice_dcf_vf_representor.c
index 970461f3e9..8136169ebd 100644
--- a/drivers/net/ice/ice_dcf_vf_representor.c
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -18,6 +18,8 @@ ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
        return 0;
 }
 
+static _RTE_ETH_RX_DEF(ice_dcf_vf_repr_rx_burst)
+
 static uint16_t
 ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
                         __rte_unused struct rte_mbuf **tx_pkts,
@@ -413,7 +415,8 @@ ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, 
void *init_param)
 
        vf_rep_eth_dev->dev_ops = &ice_dcf_vf_repr_dev_ops;
 
-       vf_rep_eth_dev->rx_pkt_burst = ice_dcf_vf_repr_rx_burst;
+       rte_eth_set_rx_burst(vf_rep_eth_dev->data->port_id,
+                       _RTE_ETH_FUNC(ice_dcf_vf_repr_rx_burst));
        vf_rep_eth_dev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
 
        vf_rep_eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a4cd39c954..4d67a2dddf 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1996,7 +1996,7 @@ ice_dev_init(struct rte_eth_dev *dev)
        dev->rx_queue_count = ice_rx_queue_count;
        dev->rx_descriptor_status = ice_rx_descriptor_status;
        dev->tx_descriptor_status = ice_tx_descriptor_status;
-       dev->rx_pkt_burst = ice_recv_pkts;
+       rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_recv_pkts));
        dev->tx_pkt_burst = ice_xmit_pkts;
        dev->tx_pkt_prepare = ice_prep_pkts;
 
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 5d7ab4f047..2cc411d315 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1749,6 +1749,8 @@ ice_recv_pkts_bulk_alloc(void *rx_queue,
        return nb_rx;
 }
 
+static _RTE_ETH_RX_DEF(ice_recv_pkts_bulk_alloc)
+
 static uint16_t
 ice_recv_scattered_pkts(void *rx_queue,
                        struct rte_mbuf **rx_pkts,
@@ -1917,12 +1919,15 @@ ice_recv_scattered_pkts(void *rx_queue,
        return nb_rx;
 }
 
+static _RTE_ETH_RX_DEF(ice_recv_scattered_pkts)
+
 const uint32_t *
 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 {
        struct ice_adapter *ad =
                ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        const uint32_t *ptypes;
+       rte_eth_rx_burst_t rx_pkt_burst;
 
        static const uint32_t ptypes_os[] = {
                /* refers to ice_get_default_pkt_type() */
@@ -1988,24 +1993,28 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
        else
                ptypes = ptypes_os;
 
-       if (dev->rx_pkt_burst == ice_recv_pkts ||
-           dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
-           dev->rx_pkt_burst == ice_recv_scattered_pkts)
+       rx_pkt_burst = rte_eth_get_rx_burst(dev->data->port_id);
+
+       if (rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts) ||
+           rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_bulk_alloc) ||
+           rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_scattered_pkts))
                return ptypes;
 
 #ifdef RTE_ARCH_X86
-       if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
-           dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
+       if (rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_vec) ||
+           rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec) ||
 #ifdef CC_AVX512_SUPPORT
-           dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
-           dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
-           dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
-           dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
+           rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_vec_avx512) ||
+           rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_vec_avx512_offload) ||
+           rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx512) ||
+           rx_pkt_burst ==
+               _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx512_offload) ||
 #endif
-           dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
-           dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
-           dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
-           dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
+           rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_vec_avx2) ||
+           rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_vec_avx2_offload) ||
+           rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx2) ||
+           rx_pkt_burst ==
+               _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx2_offload))
                return ptypes;
 #endif
 
@@ -2216,7 +2225,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf)
        return ICE_SUCCESS;
 }
 
-uint16_t
+static uint16_t
 ice_recv_pkts(void *rx_queue,
              struct rte_mbuf **rx_pkts,
              uint16_t nb_pkts)
@@ -2313,6 +2322,8 @@ ice_recv_pkts(void *rx_queue,
        return nb_rx;
 }
 
+_RTE_ETH_RX_DEF(ice_recv_pkts)
+
 static inline void
 ice_parse_tunneling_params(uint64_t ol_flags,
                            union ice_tx_offload tx_offload,
@@ -3107,14 +3118,16 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                                        PMD_DRV_LOG(NOTICE,
                                                "Using AVX512 OFFLOAD Vector 
Scattered Rx (port %d).",
                                                dev->data->port_id);
-                                       dev->rx_pkt_burst =
-                                               
ice_recv_scattered_pkts_vec_avx512_offload;
+                                       rte_eth_set_rx_burst(dev->data->port_id,
+                                               _RTE_ETH_FUNC(
+                                               
ice_recv_scattered_pkts_vec_avx512_offload));
                                } else {
                                        PMD_DRV_LOG(NOTICE,
                                                "Using AVX512 Vector Scattered 
Rx (port %d).",
                                                dev->data->port_id);
-                                       dev->rx_pkt_burst =
-                                               
ice_recv_scattered_pkts_vec_avx512;
+                                       rte_eth_set_rx_burst(dev->data->port_id,
+                                               _RTE_ETH_FUNC(
+                                               
ice_recv_scattered_pkts_vec_avx512));
                                }
 #endif
                        } else if (ad->rx_use_avx2) {
@@ -3122,20 +3135,23 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                                        PMD_DRV_LOG(NOTICE,
                                                    "Using AVX2 OFFLOAD Vector 
Scattered Rx (port %d).",
                                                    dev->data->port_id);
-                                       dev->rx_pkt_burst =
-                                               
ice_recv_scattered_pkts_vec_avx2_offload;
+                                       rte_eth_set_rx_burst(dev->data->port_id,
+                                               _RTE_ETH_FUNC(
+                                               
ice_recv_scattered_pkts_vec_avx2_offload));
                                } else {
                                        PMD_DRV_LOG(NOTICE,
                                                    "Using AVX2 Vector 
Scattered Rx (port %d).",
                                                    dev->data->port_id);
-                                       dev->rx_pkt_burst =
-                                               
ice_recv_scattered_pkts_vec_avx2;
+                                       
rte_eth_set_rx_burst(dev->data->port_id,                                        
        _RTE_ETH_FUNC(
+                                               
ice_recv_scattered_pkts_vec_avx2));
                                }
                        } else {
                                PMD_DRV_LOG(DEBUG,
                                        "Using Vector Scattered Rx (port %d).",
                                        dev->data->port_id);
-                               dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
+                               rte_eth_set_rx_burst(dev->data->port_id,
+                                       _RTE_ETH_FUNC(
+                                               ice_recv_scattered_pkts_vec));
                        }
                } else {
                        if (ad->rx_use_avx512) {
@@ -3144,14 +3160,15 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                                        PMD_DRV_LOG(NOTICE,
                                                "Using AVX512 OFFLOAD Vector Rx 
(port %d).",
                                                dev->data->port_id);
-                                       dev->rx_pkt_burst =
-                                               
ice_recv_pkts_vec_avx512_offload;
+                                       rte_eth_set_rx_burst(dev->data->port_id,
+                                               _RTE_ETH_FUNC(
+                                               
ice_recv_pkts_vec_avx512_offload));
                                } else {
                                        PMD_DRV_LOG(NOTICE,
                                                "Using AVX512 Vector Rx (port 
%d).",
                                                dev->data->port_id);
-                                       dev->rx_pkt_burst =
-                                               ice_recv_pkts_vec_avx512;
+                                       
rte_eth_set_rx_burst(dev->data->port_id,                                        
        _RTE_ETH_FUNC(
+                                               ice_recv_pkts_vec_avx512));
                                }
 #endif
                        } else if (ad->rx_use_avx2) {
@@ -3159,20 +3176,21 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                                        PMD_DRV_LOG(NOTICE,
                                                    "Using AVX2 OFFLOAD Vector 
Rx (port %d).",
                                                    dev->data->port_id);
-                                       dev->rx_pkt_burst =
-                                               ice_recv_pkts_vec_avx2_offload;
+                                       
rte_eth_set_rx_burst(dev->data->port_id,                                        
        _RTE_ETH_FUNC(
+                                               
ice_recv_pkts_vec_avx2_offload));
                                } else {
                                        PMD_DRV_LOG(NOTICE,
                                                    "Using AVX2 Vector Rx (port 
%d).",
                                                    dev->data->port_id);
-                                       dev->rx_pkt_burst =
-                                               ice_recv_pkts_vec_avx2;
+                                       
rte_eth_set_rx_burst(dev->data->port_id,                                        
        _RTE_ETH_FUNC(
+                                               ice_recv_pkts_vec_avx2));
                                }
                        } else {
                                PMD_DRV_LOG(DEBUG,
                                        "Using Vector Rx (port %d).",
                                        dev->data->port_id);
-                               dev->rx_pkt_burst = ice_recv_pkts_vec;
+                               rte_eth_set_rx_burst(dev->data->port_id,
+                                       _RTE_ETH_FUNC(ice_recv_pkts_vec));
                        }
                }
                return;
@@ -3185,43 +3203,85 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                PMD_INIT_LOG(DEBUG,
                             "Using a Scattered function on port %d.",
                             dev->data->port_id);
-               dev->rx_pkt_burst = ice_recv_scattered_pkts;
+               rte_eth_set_rx_burst(dev->data->port_id,
+                       _RTE_ETH_FUNC(ice_recv_scattered_pkts));
        } else if (ad->rx_bulk_alloc_allowed) {
                PMD_INIT_LOG(DEBUG,
                             "Rx Burst Bulk Alloc Preconditions are "
                             "satisfied. Rx Burst Bulk Alloc function "
                             "will be used on port %d.",
                             dev->data->port_id);
-               dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
+               rte_eth_set_rx_burst(dev->data->port_id,
+                       _RTE_ETH_FUNC(ice_recv_pkts_bulk_alloc));
        } else {
                PMD_INIT_LOG(DEBUG,
                             "Rx Burst Bulk Alloc Preconditions are not "
                             "satisfied, Normal Rx will be used on port %d.",
                             dev->data->port_id);
-               dev->rx_pkt_burst = ice_recv_pkts;
+               rte_eth_set_rx_burst(dev->data->port_id,
+                       _RTE_ETH_FUNC(ice_recv_pkts));
        }
 }
 
 static const struct {
-       eth_rx_burst_t pkt_burst;
+       rte_eth_rx_burst_t pkt_burst;
        const char *info;
 } ice_rx_burst_infos[] = {
-       { ice_recv_scattered_pkts,          "Scalar Scattered" },
-       { ice_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc" },
-       { ice_recv_pkts,                    "Scalar" },
+       {
+               _RTE_ETH_FUNC(ice_recv_scattered_pkts),
+               "Scalar Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(ice_recv_pkts_bulk_alloc),
+               "Scalar Bulk Alloc",
+       },
+       {
+               _RTE_ETH_FUNC(ice_recv_pkts),
+               "Scalar",
+       },
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
-       { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
-       { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 
Scattered" },
-       { ice_recv_pkts_vec_avx512,           "Vector AVX512" },
-       { ice_recv_pkts_vec_avx512_offload,   "Offload Vector AVX512" },
+       {
+               _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx512),
+               "Vector AVX512 Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx512_offload),
+               "Offload Vector AVX512 Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(ice_recv_pkts_vec_avx512),
+               "Vector AVX512",
+       },
+       {
+               _RTE_ETH_FUNC(ice_recv_pkts_vec_avx512_offload),
+               "Offload Vector AVX512",
+       },
 #endif
-       { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
-       { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 
Scattered" },
-       { ice_recv_pkts_vec_avx2,           "Vector AVX2" },
-       { ice_recv_pkts_vec_avx2_offload,   "Offload Vector AVX2" },
-       { ice_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
-       { ice_recv_pkts_vec,                "Vector SSE" },
+       {
+               _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx2),
+               "Vector AVX2 Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx2_offload),
+               "Offload Vector AVX2 Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(ice_recv_pkts_vec_avx2),
+               "Vector AVX2",
+       },
+       {
+               _RTE_ETH_FUNC(ice_recv_pkts_vec_avx2_offload),
+               "Offload Vector AVX2",
+       },
+       {
+               _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec),
+               "Vector SSE Scattered",
+       },
+       {
+               _RTE_ETH_FUNC(ice_recv_pkts_vec),
+               "Vector SSE",
+       },
 #endif
 };
 
@@ -3229,7 +3289,7 @@ int
 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
                      struct rte_eth_burst_mode *mode)
 {
-       eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+       rte_eth_rx_burst_t pkt_burst = rte_eth_get_rx_burst(dev->data->port_id);
        int ret = -EINVAL;
        unsigned int i;
 
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index b10db0874d..be8d43a591 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -212,8 +212,7 @@ void ice_tx_queue_release(void *txq);
 void ice_free_queues(struct rte_eth_dev *dev);
 int ice_fdir_setup_tx_resources(struct ice_pf *pf);
 int ice_fdir_setup_rx_resources(struct ice_pf *pf);
-uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-                      uint16_t nb_pkts);
+_RTE_ETH_RX_PROTO(ice_recv_pkts);
 uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                       uint16_t nb_pkts);
 void ice_set_rx_function(struct rte_eth_dev *dev);
@@ -242,37 +241,28 @@ int ice_rx_vec_dev_check(struct rte_eth_dev *dev);
 int ice_tx_vec_dev_check(struct rte_eth_dev *dev);
 int ice_rxq_vec_setup(struct ice_rx_queue *rxq);
 int ice_txq_vec_setup(struct ice_tx_queue *txq);
-uint16_t ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                          uint16_t nb_pkts);
-uint16_t ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                                    uint16_t nb_pkts);
+
+_RTE_ETH_RX_PROTO(ice_recv_pkts_vec);
+_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec);
+
 uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                           uint16_t nb_pkts);
-uint16_t ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
-                               uint16_t nb_pkts);
-uint16_t ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf 
**rx_pkts,
-                                       uint16_t nb_pkts);
-uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
-                                         struct rte_mbuf **rx_pkts,
-                                         uint16_t nb_pkts);
-uint16_t ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue,
-                                                 struct rte_mbuf **rx_pkts,
-                                                 uint16_t nb_pkts);
+
+_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx2);
+_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx2_offload);
+_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx2);
+_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx2_offload);
+
 uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
                                uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                                        uint16_t nb_pkts);
-uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
-                                 uint16_t nb_pkts);
-uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue,
-                                         struct rte_mbuf **rx_pkts,
-                                         uint16_t nb_pkts);
-uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
-                                           struct rte_mbuf **rx_pkts,
-                                           uint16_t nb_pkts);
-uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
-                                                   struct rte_mbuf **rx_pkts,
-                                                   uint16_t nb_pkts);
+
+_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx512);
+_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx512_offload);
+_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx512);
+_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx512_offload);
+
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
                                  uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c 
b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 9725ac0180..29b9b57f9f 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -704,7 +704,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, 
struct rte_mbuf **rx_pkts,
  * Notice:
  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
  */
-uint16_t
+static inline uint16_t
 ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
                       uint16_t nb_pkts)
 {
@@ -712,7 +712,9 @@ ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                                           nb_pkts, NULL, false);
 }
 
-uint16_t
+_RTE_ETH_RX_DEF(ice_recv_pkts_vec_avx2)
+
+static inline uint16_t
 ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
                               uint16_t nb_pkts)
 {
@@ -720,6 +722,8 @@ ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                                           nb_pkts, NULL, true);
 }
 
+_RTE_ETH_RX_DEF(ice_recv_pkts_vec_avx2_offload)
+
 /**
  * vPMD receive routine that reassembles single burst of 32 scattered packets
  * Notice:
@@ -787,7 +791,7 @@ ice_recv_scattered_pkts_vec_avx2_common(void *rx_queue,
                                rx_pkts + retval, nb_pkts, offload);
 }
 
-uint16_t
+static inline uint16_t
 ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
                                 struct rte_mbuf **rx_pkts,
                                 uint16_t nb_pkts)
@@ -798,7 +802,9 @@ ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
                                                       false);
 }
 
-uint16_t
+_RTE_ETH_RX_DEF(ice_recv_scattered_pkts_vec_avx2)
+
+static inline uint16_t
 ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue,
                                         struct rte_mbuf **rx_pkts,
                                         uint16_t nb_pkts)
@@ -809,6 +815,8 @@ ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue,
                                                       true);
 }
 
+_RTE_ETH_RX_DEF(ice_recv_scattered_pkts_vec_avx2_offload)
+
 static __rte_always_inline void
 ice_vtx1(volatile struct ice_tx_desc *txdp,
         struct rte_mbuf *pkt, uint64_t flags, bool offload)
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c 
b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 5bba9887d2..30c44c8918 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -819,18 +819,20 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
  * Notice:
  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
  */
-uint16_t
+static inline uint16_t
 ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
 {
        return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL, 
false);
 }
 
+_RTE_ETH_RX_DEF(ice_recv_pkts_vec_avx512)
+
 /**
  * Notice:
  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
  */
-uint16_t
+static inline uint16_t
 ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
                                 uint16_t nb_pkts)
 {
@@ -838,6 +840,8 @@ ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                                             nb_pkts, NULL, true);
 }
 
+_RTE_ETH_RX_DEF(ice_recv_pkts_vec_avx512_offload)
+
 /**
  * vPMD receive routine that reassembles single burst of 32 scattered packets
  * Notice:
@@ -927,7 +931,7 @@ ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue,
  * Notice:
  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
  */
-uint16_t
+static inline uint16_t
 ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
                                   uint16_t nb_pkts)
 {
@@ -945,13 +949,15 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                                rx_pkts + retval, nb_pkts);
 }
 
+_RTE_ETH_RX_DEF(ice_recv_scattered_pkts_vec_avx512)
+
 /**
  * vPMD receive routine that reassembles scattered packets.
  * Main receive routine that can handle arbitrary burst sizes
  * Notice:
  * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
  */
-uint16_t
+static inline uint16_t
 ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
                                           struct rte_mbuf **rx_pkts,
                                           uint16_t nb_pkts)
@@ -971,6 +977,8 @@ ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
                                rx_pkts + retval, nb_pkts);
 }
 
+_RTE_ETH_RX_DEF(ice_recv_scattered_pkts_vec_avx512_offload)
+
 static __rte_always_inline int
 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 {
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c 
b/drivers/net/ice/ice_rxtx_vec_sse.c
index 673e44a243..2caf1c6941 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -587,13 +587,15 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
  * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
  *   numbers of DD bits
  */
-uint16_t
+static inline uint16_t
 ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
                  uint16_t nb_pkts)
 {
        return _ice_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
+_RTE_ETH_RX_DEF(ice_recv_pkts_vec)
+
 /**
  * vPMD receive routine that reassembles single burst of 32 scattered packets
  *
@@ -639,7 +641,7 @@ ice_recv_scattered_burst_vec(void *rx_queue, struct 
rte_mbuf **rx_pkts,
 /**
  * vPMD receive routine that reassembles scattered packets.
  */
-uint16_t
+static inline uint16_t
 ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
                            uint16_t nb_pkts)
 {
@@ -662,6 +664,8 @@ ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                                                     nb_pkts);
 }
 
+_RTE_ETH_RX_DEF(ice_recv_scattered_pkts_vec)
+
 static inline void
 ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt,
         uint64_t flags)
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index 40e474aa7e..8b7d1e8840 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -1513,6 +1513,126 @@ struct rte_eth_tunnel_filter_conf {
        uint16_t queue_id;      /**< Queue assigned to if match. */
 };
 
+/**
+ * @internal
+ * Helper routine for eth driver rx_burst API.
+ * Should be called as first thing on entrance to the PMD's rte_eth_rx_bulk
+ * implementation.
+ * Does necessary checks and returns pointer to device RX queue.
+ *
+ * @param port_id
+ *  The port identifier of the Ethernet device.
+ * @param queue_id
+ *  The index of the receive queue from which to retrieve input packets.
+ *
+ * @return
+ *  Pointer to device RX queue structure on success or NULL otherwise.
+ */
+__rte_internal
+static inline void *
+_rte_eth_rx_prolog(uint16_t port_id, uint16_t queue_id)
+{
+       struct rte_eth_dev *dev;
+
+       dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_ETHDEV_DEBUG_RX
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
+
+       if (queue_id >= dev->data->nb_rx_queues) {
+               RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+               return NULL;
+       }
+#endif
+       return dev->data->rx_queues[queue_id];
+}
+
+/**
+ * @internal
+ * Helper routine for eth driver rx_burst API.
+ * Should be called at exit from PMD's rte_eth_rx_bulk implementation.
+ * Does necessary post-processing - invokes RX callbacks if any, tracing, etc.
+ *
+ * @param port_id
+ *  The port identifier of the Ethernet device.
+ * @param queue_id
+ *  The index of the receive queue from which to retrieve input packets.
+ * @param rx_pkts
+ *   The address of an array of pointers to *rte_mbuf* structures that
+ *   have been retrieved from the device.
+ * @param nb_pkts
+ *   The number of packets that were retrieved from the device.
+ * @param nb_pkts
+ *   The number of elements in *rx_pkts* array.
+ *
+ * @return
+ *  The number of packets effectively supplied to the *rx_pkts* array.
+ */
+__rte_internal
+static inline uint16_t
+_rte_eth_rx_epilog(uint16_t port_id, uint16_t queue_id,
+       struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts)
+{
+       struct rte_eth_dev *dev;
+
+       dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_ETHDEV_RXTX_CALLBACKS
+       struct rte_eth_rxtx_callback *cb;
+
+       /* __ATOMIC_RELEASE memory order was used when the
+        * call back was inserted into the list.
+        * Since there is a clear dependency between loading
+        * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+        * not required.
+        */
+       cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
+                               __ATOMIC_RELAXED);
+
+       if (unlikely(cb != NULL)) {
+               do {
+                       nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
+                               nb_pkts, cb->param);
+                       cb = cb->next;
+               } while (cb != NULL);
+       }
+#endif
+
+       rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
+       return nb_rx;
+}
+
+#define _RTE_ETH_FUNC(fn)      _rte_eth_##fn
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD rx_burst functions.
+ */
+#define _RTE_ETH_RX_PROTO(fn) \
+       uint16_t _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id, \
+                       struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD rx_burst functions.
+ */
+#define _RTE_ETH_RX_DEF(fn) \
+_RTE_ETH_RX_PROTO(fn) \
+{ \
+       uint16_t nb_rx; \
+       void *rxq = _rte_eth_rx_prolog(port_id, queue_id); \
+       if (rxq == NULL) \
+               return 0; \
+       nb_rx = fn(rxq, rx_pkts, nb_pkts); \
+       return _rte_eth_rx_epilog(port_id, queue_id, rx_pkts, nb_rx, nb_pkts); \
+}
+
+__rte_experimental
+rte_eth_rx_burst_t rte_eth_get_rx_burst(uint16_t port_id);
+
+__rte_experimental
+int rte_eth_set_rx_burst(uint16_t port_id, rte_eth_rx_burst_t rxf);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 949292a617..c126626281 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -588,7 +588,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
        eth_dev->device = NULL;
        eth_dev->process_private = NULL;
        eth_dev->intr_handle = NULL;
-       eth_dev->rx_pkt_burst = NULL;
        eth_dev->tx_pkt_burst = NULL;
        eth_dev->tx_pkt_prepare = NULL;
        eth_dev->rx_queue_count = NULL;
@@ -6337,3 +6336,25 @@ RTE_INIT(ethdev_init_telemetry)
                        eth_dev_handle_port_link_status,
                        "Returns the link status for a port. Parameters: int 
port_id");
 }
+
+__rte_experimental
+rte_eth_rx_burst_t
+rte_eth_get_rx_burst(uint16_t port_id)
+{
+       if (port_id >= RTE_DIM(rte_eth_burst_api)) {
+               rte_errno = EINVAL;
+               return NULL;
+       }
+       return rte_eth_burst_api[port_id].rx_pkt_burst;
+}
+
+__rte_experimental
+int
+rte_eth_set_rx_burst(uint16_t port_id, rte_eth_rx_burst_t rxf)
+{
+       if (port_id >= RTE_DIM(rte_eth_burst_api))
+               return -EINVAL;
+
+       rte_eth_burst_api[port_id].rx_pkt_burst = rxf;
+       return 0;
+}
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index d2b27c351f..a155f255ad 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -4981,44 +4981,11 @@ static inline uint16_t
 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
                 struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
 {
-       struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-       uint16_t nb_rx;
-
-#ifdef RTE_ETHDEV_DEBUG_RX
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
-
-       if (queue_id >= dev->data->nb_rx_queues) {
-               RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+       if (port_id >= RTE_MAX_ETHPORTS)
                return 0;
-       }
-#endif
-       nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
-                                    rx_pkts, nb_pkts);
-
-#ifdef RTE_ETHDEV_RXTX_CALLBACKS
-       struct rte_eth_rxtx_callback *cb;
-
-       /* __ATOMIC_RELEASE memory order was used when the
-        * call back was inserted into the list.
-        * Since there is a clear dependency between loading
-        * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
-        * not required.
-        */
-       cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
-                               __ATOMIC_RELAXED);
-
-       if (unlikely(cb != NULL)) {
-               do {
-                       nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
-                                               nb_pkts, cb->param);
-                       cb = cb->next;
-               } while (cb != NULL);
-       }
-#endif
 
-       rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
-       return nb_rx;
+       return rte_eth_burst_api[port_id].rx_pkt_burst(port_id, queue_id,
+                       rx_pkts, nb_pkts);
 }
 
 /**
diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
index fb8526cb9f..94ffa071e3 100644
--- a/lib/ethdev/rte_ethdev_core.h
+++ b/lib/ethdev/rte_ethdev_core.h
@@ -25,12 +25,14 @@ TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
 
 struct rte_eth_dev;
 
+/* !!! should be removed *** */
+typedef uint16_t (*eth_rx_burst_t)(void *rxq,
+                               struct rte_mbuf **rx_pkts,
+                               uint16_t nb_pkts);
+
 typedef uint16_t (*rte_eth_rx_burst_t)(uint16_t port_id, uint16_t queue_id,
                        struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
-typedef uint16_t (*eth_rx_burst_t)(void *rxq,
-                                  struct rte_mbuf **rx_pkts,
-                                  uint16_t nb_pkts);
 /**< @internal Retrieve input packets from a receive queue of an Ethernet 
device. */
 
 typedef uint16_t (*rte_eth_tx_burst_t)(uint16_t port_id, uint16_t queue_id,
@@ -113,7 +115,6 @@ struct rte_eth_rxtx_callback {
  * process, while the actual configuration data for the device is shared.
  */
 struct rte_eth_dev {
-       eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */
        eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
        eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare 
function. */
 
diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
index 3eece75b72..2698c75940 100644
--- a/lib/ethdev/version.map
+++ b/lib/ethdev/version.map
@@ -249,6 +249,11 @@ EXPERIMENTAL {
        rte_mtr_meter_policy_delete;
        rte_mtr_meter_policy_update;
        rte_mtr_meter_policy_validate;
+
+       # added in 21.11
+       rte_eth_burst_api;
+       rte_eth_get_rx_burst;
+       rte_eth_set_rx_burst;
 };
 
 INTERNAL {
-- 
2.26.3

Reply via email to