Signed-off-by: Jianfeng Tan <jianfeng.tan at intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 50 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_ethdev.h |  2 ++
 drivers/net/ixgbe/ixgbe_rxtx.c   |  5 +++-
 3 files changed, 56 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 4c4c6df..de5c3a9 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -166,6 +166,8 @@ static int ixgbe_dev_queue_stats_mapping_set(struct 
rte_eth_dev *eth_dev,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
                               struct rte_eth_dev_info *dev_info);
+static int ixgbe_dev_ptype_info_get(struct rte_eth_dev *dev,
+               uint32_t ptype_mask, uint32_t ptypes[]);
 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                                 struct rte_eth_dev_info *dev_info);
 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -428,6 +430,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .xstats_reset         = ixgbe_dev_xstats_reset,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .dev_ptype_info_get   = ixgbe_dev_ptype_info_get,
        .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
@@ -512,6 +515,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .xstats_reset         = ixgbevf_dev_stats_reset,
        .dev_close            = ixgbevf_dev_close,
        .dev_infos_get        = ixgbevf_dev_info_get,
+       .dev_ptype_info_get   = ixgbe_dev_ptype_info_get,
        .mtu_set              = ixgbevf_dev_set_mtu,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
@@ -2829,6 +2833,52 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
        dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 }

+static int
+ixgbe_dev_ptype_info_get(struct rte_eth_dev *dev, uint32_t ptype_mask,
+               uint32_t ptypes[])
+{
+       int num = 0;
+
+       if ((dev->rx_pkt_burst == ixgbe_recv_pkts)
+                       || (dev->rx_pkt_burst == 
ixgbe_recv_pkts_lro_single_alloc)
+                       || (dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc)
+                       || (dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
+          ) {
+               /* refers to ixgbe_rxd_pkt_info_to_pkt_type() */
+               if ((ptype_mask & RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_MASK)
+                       ptypes[num++] = RTE_PTYPE_L2_ETHER;
+
+               if ((ptype_mask & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_MASK) {
+                       ptypes[num++] = RTE_PTYPE_L3_IPV4;
+                       ptypes[num++] = RTE_PTYPE_L3_IPV4_EXT;
+                       ptypes[num++] = RTE_PTYPE_L3_IPV6;
+                       ptypes[num++] = RTE_PTYPE_L3_IPV6_EXT;
+               }
+
+               if ((ptype_mask & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_MASK) {
+                       ptypes[num++] = RTE_PTYPE_L4_SCTP;
+                       ptypes[num++] = RTE_PTYPE_L4_TCP;
+                       ptypes[num++] = RTE_PTYPE_L4_UDP;
+               }
+
+               if ((ptype_mask & RTE_PTYPE_TUNNEL_MASK) == 
RTE_PTYPE_TUNNEL_MASK)
+                       ptypes[num++] = RTE_PTYPE_TUNNEL_IP;
+
+               if ((ptype_mask & RTE_PTYPE_INNER_L3_MASK) == 
RTE_PTYPE_INNER_L3_MASK) {
+                       ptypes[num++] = RTE_PTYPE_INNER_L3_IPV6;
+                       ptypes[num++] = RTE_PTYPE_INNER_L3_IPV6_EXT;
+               }
+
+               if ((ptype_mask & RTE_PTYPE_INNER_L4_MASK) == 
RTE_PTYPE_INNER_L4_MASK) {
+                       ptypes[num++] = RTE_PTYPE_INNER_L4_TCP;
+                       ptypes[num++] = RTE_PTYPE_INNER_L4_UDP;
+               }
+       } else
+               num = -ENOTSUP;
+
+       return num;
+}
+
 static void
 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                     struct rte_eth_dev_info *dev_info)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index d26771a..2479830 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -379,6 +379,8 @@ void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
 uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts);

+uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+                          uint16_t nb_pkts);
 uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 52a263c..d324099 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -899,6 +899,9 @@ end_of_tx:
 #define IXGBE_PACKET_TYPE_MAX               0X80
 #define IXGBE_PACKET_TYPE_MASK              0X7F
 #define IXGBE_PACKET_TYPE_SHIFT             0X04
+/*
+ * @note: fix ixgbe_dev_ptype_info_get() if any change here.
+ */
 static inline uint32_t
 ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
 {
@@ -1247,7 +1250,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 }

 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
-static uint16_t
+uint16_t
 ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
                           uint16_t nb_pkts)
 {
-- 
2.1.4

Reply via email to