To unify packet types among all PMDs, bit masks of packet type for 'ol_flags' are replaced by unified packet type. To avoid breaking ABI compatibility, all the changes would be enabled by RTE_UNIFIED_PKT_TYPE, which is disabled by default.
Signed-off-by: Helin Zhang <helin.zhang at intel.com> --- examples/l3fwd/main.c | 123 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 120 insertions(+), 3 deletions(-) v2 changes: * Used redefined packet types and enlarged packet_type field in mbuf. v3 changes: * Minor bug fixes and enhancements. v5 changes: * Re-worded the commit logs. v6 changes: * Disabled the code changes for unified packet type by default, to avoid breaking ABI compatibility. diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index e32512e..72d9ab7 100644 --- a/examples/l3fwd/main.c +++ b/examples/l3fwd/main.c @@ -955,7 +955,11 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); +#ifdef RTE_UNIFIED_PKT_TYPE + if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { +#else if (m->ol_flags & PKT_RX_IPV4_HDR) { +#endif /* Handle IPv4 headers.*/ ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) + sizeof(struct ether_hdr)); @@ -989,8 +993,11 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr); send_single_packet(m, dst_port); - +#ifdef RTE_UNIFIED_PKT_TYPE + } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { +#else } else { +#endif /* Handle IPv6 headers.*/ struct ipv6_hdr *ipv6_hdr; @@ -1011,8 +1018,13 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr); send_single_packet(m, dst_port); +#ifdef RTE_UNIFIED_PKT_TYPE + } else + /* Free the mbuf that contains non-IPV4/IPV6 packet */ + rte_pktmbuf_free(m); +#else } - +#endif } #ifdef DO_RFC_1812_CHECKS @@ -1036,12 +1048,19 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon * to BAD_PORT value. */ static inline __attribute__((always_inline)) void +#ifdef RTE_UNIFIED_PKT_TYPE +rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype) +#else rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t flags) +#endif { uint8_t ihl; +#ifdef RTE_UNIFIED_PKT_TYPE + if (RTE_ETH_IS_IPV4_HDR(ptype)) { +#else if ((flags & PKT_RX_IPV4_HDR) != 0) { - +#endif ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL; ipv4_hdr->time_to_live--; @@ -1071,11 +1090,19 @@ get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, struct ipv6_hdr *ipv6_hdr; struct ether_hdr *eth_hdr; +#ifdef RTE_UNIFIED_PKT_TYPE + if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) { +#else if (pkt->ol_flags & PKT_RX_IPV4_HDR) { +#endif if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4, &next_hop) != 0) next_hop = portid; +#ifdef RTE_UNIFIED_PKT_TYPE + } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) { +#else } else if (pkt->ol_flags & PKT_RX_IPV6_HDR) { +#endif eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *); ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1); if (rte_lpm6_lookup(qconf->ipv6_lookup_struct, @@ -1109,12 +1136,52 @@ process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt, ve = val_eth[dp]; dst_port[0] = dp; +#ifdef RTE_UNIFIED_PKT_TYPE + rfc1812_process(ipv4_hdr, dst_port, pkt->packet_type); +#else rfc1812_process(ipv4_hdr, dst_port, pkt->ol_flags); +#endif te = _mm_blend_epi16(te, ve, MASK_ETH); _mm_store_si128((__m128i *)eth_hdr, te); } +#ifdef RTE_UNIFIED_PKT_TYPE +/* + * Read packet_type and destination IPV4 addresses from 4 mbufs. + */ +static inline void +processx4_step1(struct rte_mbuf *pkt[FWDSTEP], + __m128i *dip, + uint32_t *ipv4_flag) +{ + struct ipv4_hdr *ipv4_hdr; + struct ether_hdr *eth_hdr; + uint32_t x0, x1, x2, x3; + + eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x0 = ipv4_hdr->dst_addr; + ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4; + + eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x1 = ipv4_hdr->dst_addr; + ipv4_flag[0] &= pkt[1]->packet_type; + + eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x2 = ipv4_hdr->dst_addr; + ipv4_flag[0] &= pkt[2]->packet_type; + + eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x3 = ipv4_hdr->dst_addr; + ipv4_flag[0] &= pkt[3]->packet_type; + + dip[0] = _mm_set_epi32(x3, x2, x1, x0); +} +#else /* RTE_UNIFIED_PKT_TYPE */ /* * Read ol_flags and destination IPV4 addresses from 4 mbufs. */ @@ -1147,14 +1214,24 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP], __m128i *dip, uint32_t *flag) dip[0] = _mm_set_epi32(x3, x2, x1, x0); } +#endif /* RTE_UNIFIED_PKT_TYPE */ /* * Lookup into LPM for destination port. * If lookup fails, use incoming port (portid) as destination port. */ static inline void +#ifdef RTE_UNIFIED_PKT_TYPE +processx4_step2(const struct lcore_conf *qconf, + __m128i dip, + uint32_t ipv4_flag, + uint8_t portid, + struct rte_mbuf *pkt[FWDSTEP], + uint16_t dprt[FWDSTEP]) +#else processx4_step2(const struct lcore_conf *qconf, __m128i dip, uint32_t flag, uint8_t portid, struct rte_mbuf *pkt[FWDSTEP], uint16_t dprt[FWDSTEP]) +#endif /* RTE_UNIFIED_PKT_TYPE */ { rte_xmm_t dst; const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, @@ -1164,7 +1241,11 @@ processx4_step2(const struct lcore_conf *qconf, __m128i dip, uint32_t flag, dip = _mm_shuffle_epi8(dip, bswap_mask); /* if all 4 packets are IPV4. */ +#ifdef RTE_UNIFIED_PKT_TYPE + if (likely(ipv4_flag)) { +#else if (likely(flag != 0)) { +#endif rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid); } else { dst.x = dip; @@ -1214,6 +1295,16 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP]) _mm_store_si128(p[2], te[2]); _mm_store_si128(p[3], te[3]); +#ifdef RTE_UNIFIED_PKT_TYPE + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1), + &dst_port[0], pkt[0]->packet_type); + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1), + &dst_port[1], pkt[1]->packet_type); + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1), + &dst_port[2], pkt[2]->packet_type); + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1), + &dst_port[3], pkt[3]->packet_type); +#else /* RTE_UNIFIED_PKT_TYPE */ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1), &dst_port[0], pkt[0]->ol_flags); rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1), @@ -1222,6 +1313,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP]) &dst_port[2], pkt[2]->ol_flags); rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1), &dst_port[3], pkt[3]->ol_flags); +#endif /* RTE_UNIFIED_PKT_TYPE */ } /* @@ -1408,7 +1500,11 @@ main_loop(__attribute__((unused)) void *dummy) uint16_t *lp; uint16_t dst_port[MAX_PKT_BURST]; __m128i dip[MAX_PKT_BURST / FWDSTEP]; +#ifdef RTE_UNIFIED_PKT_TYPE + uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP]; +#else uint32_t flag[MAX_PKT_BURST / FWDSTEP]; +#endif uint16_t pnum[MAX_PKT_BURST + 1]; #endif @@ -1478,6 +1574,18 @@ main_loop(__attribute__((unused)) void *dummy) */ int32_t n = RTE_ALIGN_FLOOR(nb_rx, 4); for (j = 0; j < n ; j+=4) { +#ifdef RTE_UNIFIED_PKT_TYPE + uint32_t pkt_type = + pkts_burst[j]->packet_type & + pkts_burst[j+1]->packet_type & + pkts_burst[j+2]->packet_type & + pkts_burst[j+3]->packet_type; + if (pkt_type & RTE_PTYPE_L3_IPV4) { + simple_ipv4_fwd_4pkts( + &pkts_burst[j], portid, qconf); + } else if (pkt_type & + RTE_PTYPE_L3_IPV6) { +#else /* RTE_UNIFIED_PKT_TYPE */ uint32_t ol_flag = pkts_burst[j]->ol_flags & pkts_burst[j+1]->ol_flags & pkts_burst[j+2]->ol_flags @@ -1486,6 +1594,7 @@ main_loop(__attribute__((unused)) void *dummy) simple_ipv4_fwd_4pkts(&pkts_burst[j], portid, qconf); } else if (ol_flag & PKT_RX_IPV6_HDR) { +#endif /* RTE_UNIFIED_PKT_TYPE */ simple_ipv6_fwd_4pkts(&pkts_burst[j], portid, qconf); } else { @@ -1510,13 +1619,21 @@ main_loop(__attribute__((unused)) void *dummy) for (j = 0; j != k; j += FWDSTEP) { processx4_step1(&pkts_burst[j], &dip[j / FWDSTEP], +#ifdef RTE_UNIFIED_PKT_TYPE + &ipv4_flag[j / FWDSTEP]); +#else &flag[j / FWDSTEP]); +#endif } k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP); for (j = 0; j != k; j += FWDSTEP) { processx4_step2(qconf, dip[j / FWDSTEP], +#ifdef RTE_UNIFIED_PKT_TYPE + ipv4_flag[j / FWDSTEP], portid, +#else flag[j / FWDSTEP], portid, +#endif &pkts_burst[j], &dst_port[j]); } -- 1.9.3