Re: [lng-odp] [RFCv4] dpdk: enable hardware checksum support

2017-05-24 Thread Elo, Matias (Nokia - FI/Espoo)

> On 24 May 2017, at 11:32, Bogdan Pricope  wrote:
> 
> Hi Matias,
> 
> Using ptypes reported by dpdk in parser was intended for another patch
> (next work after csum).
> 

Good, so we are on the same page. When implementing packet parsing you have to
move/reimplement this checksum code anyway, so it makes more sense to implement
both of them in the same patch / patch set.  

> I guess your test is a degradation test (due to new ifs) and you did
> not enabled csum offloads/ set flags on packets.

Yep, just standard l2fwd test without any offload flags.

> 
> What will be interesting to see:
> - in a generation or termination test (UDP), what will be
> degradation/gain with csum offload enabled
> - how degradation/gain is changing with bigger packets (256 bytes vs 64 bytes)

That would definitely be more interesting. I tried quickly enabling 
'ipv4_chksum'
and 'udp_chksum' flags on odp_l2fwd and the performance degradation was minimal
(~0.2%).

While testing this I noticed a small problem in the code:

> + ptype_cnt = rte_eth_dev_get_supported_ptypes(pkt_dpdk->port_id,
> + 
>ptype_mask, ptypes, ptype_cnt);
> + for (i = 0; i < ptype_cnt; i++)
> + switch (ptypes[i]) {
> + case RTE_PTYPE_L3_IPV4:
> + ptype_l3_ipv4 = 1;
> + break;
> + case RTE_PTYPE_L4_TCP:
> + ptype_l4_tcp = 1;
> + break;
> + case RTE_PTYPE_L4_UDP:
> + ptype_l4_udp = 1;
> + break;
> + }
> + }

This doesn't work alone in all cases. For example Fortville NIC uses
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN but not RTE_PTYPE_L3_IPV4.


-Matias



Re: [lng-odp] [RFCv4] dpdk: enable hardware checksum support

2017-05-24 Thread Bogdan Pricope
Hi Matias,

Using ptypes reported by dpdk in parser was intended for another patch
(next work after csum).

I guess your test is a degradation test (due to new ifs) and you did
not enabled csum offloads/ set flags on packets.

What will be interesting to see:
 - in a generation or termination test (UDP), what will be
degradation/gain with csum offload enabled
 - how degradation/gain is changing with bigger packets (256 bytes vs 64 bytes)

BR,
Bogdan


On 24 May 2017 at 10:53, Elo, Matias (Nokia - FI/Espoo)
 wrote:
> Hi Bogdan,
>
> I ran a quick test run with the patch and the overhead seems to be
> surprisingly small at least on a Xeon cpu (E5-2697v3). However, I would
> still suggest making some changes to the code. More below.
>
> -Matias
>
>
>
>>
>> @@ -605,9 +663,11 @@ static inline int mbuf_to_pkt(pktio_entry_t
>> *pktio_entry,
>>int nb_pkts = 0;
>>int alloc_len, num;
>>odp_pool_t pool = pktio_entry->s.pkt_dpdk.pool;
>> + odp_pktin_config_opt_t *pktin_cfg;
>>
>>/* Allocate maximum sized packets */
>>alloc_len = pktio_entry->s.pkt_dpdk.data_room;
>> + pktin_cfg = &pktio_entry->s.config.pktin;
>>
>>num = packet_alloc_multi(pool, alloc_len, pkt_table, mbuf_num);
>>if (num != mbuf_num) {
>> @@ -658,6 +718,34 @@ static inline int mbuf_to_pkt(pktio_entry_t
>> *pktio_entry,
>>if (mbuf->ol_flags & PKT_RX_RSS_HASH)
>>odp_packet_flow_hash_set(pkt, mbuf->hash.rss);
>>
>> + if ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
>> RTE_PTYPE_L3_IPV4 &&
>> + pktin_cfg->bit.ipv4_chksum &&
>> + mbuf->ol_flags & PKT_RX_IP_CKSUM_BAD) {
>> + if (pktin_cfg->bit.drop_ipv4_err) {
>> + odp_packet_free(pkt);
>> + continue;
>> + } else
>> + pkt_hdr->p.error_flags.ip_err = 1;
>> + }
>> +
>> + if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
>> RTE_PTYPE_L4_UDP &&
>> + pktin_cfg->bit.udp_chksum &&
>> + mbuf->ol_flags & PKT_RX_L4_CKSUM_BAD) {
>> + if (pktin_cfg->bit.drop_udp_err) {
>> + odp_packet_free(pkt);
>> + continue;
>> + } else
>> + pkt_hdr->p.error_flags.udp_err = 1;
>> + } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
>> RTE_PTYPE_L4_TCP &&
>> + pktin_cfg->bit.tcp_chksum &&
>> + mbuf->ol_flags & PKT_RX_L4_CKSUM_BAD) {
>> + if (pktin_cfg->bit.drop_tcp_err) {
>> + odp_packet_free(pkt);
>> + continue;
>> + } else
>> + pkt_hdr->p.error_flags.tcp_err = 1;
>> + }
>> +
>
> Instead of doing packet parsing and checksum validation separately I would
> do both in one function. The api-next pktio code (should be merged to
> master) has a new configuration option 'odp_pktio_config_t.parser.layer',
> which selects the parsing level. packet_parse_layer() function is then used
> to parse the received packet up to the selected level.
>
> So, instead of of calling packet_parse_layer() in dpdk pktio I would add a
> new dpdk specific implementation of this function. This way we can exploit
> all dpdk packet parsing features in addition to the checksum calculations.
> Also, by doing this you can remove most of the if() calls above. Enabling a
> higher protocol layer checksum calculation than the selected parsing level
> would be a user error (e.g. ODP_PKTIO_PARSER_LAYER_L2 and TCP checksum
> enabled).
>
>


Re: [lng-odp] [RFCv4] dpdk: enable hardware checksum support

2017-05-24 Thread Elo, Matias (Nokia - FI/Espoo)
Hi Bogdan,

I ran a quick test run with the patch and the overhead seems to be surprisingly 
small at least on a Xeon cpu (E5-2697v3). However, I would still suggest making 
some changes to the code. More below.

-Matias



>
> @@ -605,9 +663,11 @@ static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry,
>int nb_pkts = 0;
>int alloc_len, num;
>odp_pool_t pool = pktio_entry->s.pkt_dpdk.pool;
> + odp_pktin_config_opt_t *pktin_cfg;
>
>/* Allocate maximum sized packets */
>alloc_len = pktio_entry->s.pkt_dpdk.data_room;
> + pktin_cfg = &pktio_entry->s.config.pktin;
>
>num = packet_alloc_multi(pool, alloc_len, pkt_table, mbuf_num);
>if (num != mbuf_num) {
> @@ -658,6 +718,34 @@ static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry,
>if (mbuf->ol_flags & PKT_RX_RSS_HASH)
>odp_packet_flow_hash_set(pkt, mbuf->hash.rss);
>
> + if ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 
> RTE_PTYPE_L3_IPV4 &&
> + pktin_cfg->bit.ipv4_chksum &&
> + mbuf->ol_flags & PKT_RX_IP_CKSUM_BAD) {
> + if (pktin_cfg->bit.drop_ipv4_err) {
> + odp_packet_free(pkt);
> + continue;
> + } else
> + pkt_hdr->p.error_flags.ip_err = 1;
> + }
> +
> + if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP 
> &&
> + pktin_cfg->bit.udp_chksum &&
> + mbuf->ol_flags & PKT_RX_L4_CKSUM_BAD) {
> + if (pktin_cfg->bit.drop_udp_err) {
> + odp_packet_free(pkt);
> + continue;
> + } else
> + pkt_hdr->p.error_flags.udp_err = 1;
> + } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == 
> RTE_PTYPE_L4_TCP &&
> + pktin_cfg->bit.tcp_chksum &&
> + mbuf->ol_flags & PKT_RX_L4_CKSUM_BAD) {
> + if (pktin_cfg->bit.drop_tcp_err) {
> + odp_packet_free(pkt);
> + continue;
> + } else
> + pkt_hdr->p.error_flags.tcp_err = 1;
> + }
> +

Instead of doing packet parsing and checksum validation separately I would do 
both in one function. The api-next pktio code (should be merged to master) has 
a new configuration option 'odp_pktio_config_t.parser.layer', which selects the 
parsing level. packet_parse_layer() function is then used to parse the received 
packet up to the selected level.

So, instead of of calling packet_parse_layer() in dpdk pktio I would add a new 
dpdk specific implementation of this function. This way we can exploit all dpdk 
packet parsing features in addition to the checksum calculations. Also, by 
doing this you can remove most of the if() calls above. Enabling a higher 
protocol layer checksum calculation than the selected parsing level would be a 
user error (e.g. ODP_PKTIO_PARSER_LAYER_L2 and TCP checksum enabled).




rfc-patch-bech.xlsx
Description: rfc-patch-bech.xlsx


[lng-odp] [RFCv4] dpdk: enable hardware checksum support

2017-05-22 Thread Bogdan Pricope
Signed-off-by: Bogdan Pricope 
---
 example/generator/odp_generator.c  | 102 ++---
 platform/linux-generic/odp_packet_io.c |   2 +
 platform/linux-generic/pktio/dpdk.c| 161 -
 3 files changed, 246 insertions(+), 19 deletions(-)

diff --git a/example/generator/odp_generator.c 
b/example/generator/odp_generator.c
index 79efe5b..d68a31f 100644
--- a/example/generator/odp_generator.c
+++ b/example/generator/odp_generator.c
@@ -46,6 +46,7 @@
 
 typedef struct {
odp_pktio_t pktio;
+   odp_pktio_config_t config;
odp_pktout_queue_t pktout[MAX_WORKERS];
unsigned pktout_count;
 } interface_t;
@@ -92,6 +93,7 @@ static struct {
  */
 typedef struct {
odp_pktout_queue_t pktout; /**< Packet output queue to use*/
+   odp_pktout_config_opt_t *pktout_cfg; /**< Packet output offload config*/
odp_pool_t pool;/**< Pool for packet IO */
odp_timer_pool_t tp;/**< Timer pool handle */
odp_queue_t tq; /**< Queue for timeouts */
@@ -116,6 +118,9 @@ static args_t *args;
 /** Barrier to sync threads execution */
 static odp_barrier_t barrier;
 
+/** List of interfaces */
+static interface_t *ifs;
+
 /* helper funcs */
 static void parse_args(int argc, char *argv[], appl_args_t *appl_args);
 static void print_info(char *progname, appl_args_t *appl_args);
@@ -194,7 +199,8 @@ static int scan_ip(char *buf, unsigned int *paddr)
  * @return Handle of created packet
  * @retval ODP_PACKET_INVALID  Packet could not be created
  */
-static odp_packet_t setup_udp_pkt_ref(odp_pool_t pool)
+static odp_packet_t setup_udp_pkt_ref(odp_pool_t pool,
+ 
odp_pktout_config_opt_t *pktout_cfg)
 {
odp_packet_t pkt;
char *buf;
@@ -239,7 +245,8 @@ static odp_packet_t setup_udp_pkt_ref(odp_pool_t pool)
udp->dst_port = odp_cpu_to_be_16(args->appl.dstport);
udp->length = odp_cpu_to_be_16(args->appl.payload + ODPH_UDPHDR_LEN);
udp->chksum = 0;
-   udp->chksum = odph_ipv4_udp_chksum(pkt);
+   if (!pktout_cfg->bit.udp_chksum)
+   udp->chksum = odph_ipv4_udp_chksum(pkt);
 
return pkt;
 }
@@ -253,7 +260,8 @@ static odp_packet_t setup_udp_pkt_ref(odp_pool_t pool)
  * @return Handle of created packet
  * @retval ODP_PACKET_INVALID  Packet could not be created
  */
-static odp_packet_t pack_udp_pkt(odp_pool_t pool, odp_packet_t pkt_ref)
+static odp_packet_t pack_udp_pkt(odp_pool_t pool, odp_packet_t pkt_ref,
+
odp_pktout_config_opt_t *pktout_cfg)
 {
odp_packet_t pkt;
char *buf;
@@ -275,7 +283,17 @@ static odp_packet_t pack_udp_pkt(odp_pool_t pool, 
odp_packet_t pkt_ref)
ip = (odph_ipv4hdr_t *)(buf + ODPH_ETHHDR_LEN);
seq = odp_atomic_fetch_add_u64(&counters.seq, 1) % 0x;
ip->id = odp_cpu_to_be_16(seq);
-   ip->chksum = odph_chksum(ip, ODPH_IPV4HDR_LEN);
+   ip->chksum = 0;
+   if (!pktout_cfg->bit.ipv4_chksum)
+   ip->chksum = odph_chksum(ip, ODPH_IPV4HDR_LEN);
+
+   if (pktout_cfg->bit.ipv4_chksum || pktout_cfg->bit.udp_chksum) {
+   odp_packet_l2_offset_set(pkt, 0);
+   odp_packet_l3_offset_set(pkt, ODPH_ETHHDR_LEN);
+   odp_packet_has_ipv4_set(pkt, 1);
+   odp_packet_l4_offset_set(pkt, ODPH_ETHHDR_LEN + 
ODPH_IPV4HDR_LEN);
+   odp_packet_has_udp_set(pkt, 1);
+   }
 
return pkt;
 }
@@ -344,7 +362,8 @@ static odp_packet_t setup_icmp_pkt_ref(odp_pool_t pool)
  * @return Handle of created packet
  * @retval ODP_PACKET_INVALID  Packet could not be created
  */
-static odp_packet_t pack_icmp_pkt(odp_pool_t pool, odp_packet_t pkt_ref)
+static odp_packet_t pack_icmp_pkt(odp_pool_t pool, odp_packet_t pkt_ref,
+ 
odp_pktout_config_opt_t *pktout_cfg)
 {
odp_packet_t pkt;
char *buf;
@@ -369,7 +388,9 @@ static odp_packet_t pack_icmp_pkt(odp_pool_t pool, 
odp_packet_t pkt_ref)
ip = (odph_ipv4hdr_t *)(buf + ODPH_ETHHDR_LEN);
seq = odp_atomic_fetch_add_u64(&counters.seq, 1) % 0x;
ip->id = odp_cpu_to_be_16(seq);
-   ip->chksum = odph_chksum(ip, ODPH_IPV4HDR_LEN);
+   ip->chksum = 0;
+   if (!pktout_cfg->bit.ipv4_chksum)
+   ip->chksum = odph_chksum(ip, ODPH_IPV4HDR_LEN);
 
/* icmp */
icmp = (odph_icmphdr_t *)(buf + ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
@@ -383,6 +404,13 @@ static odp_packet_t pack_icmp_pkt(odp_pool_t pool, 
odp_packet_t pkt_ref)
icmp->chksum = 0;
icmp->chksum = odph_chksum(icmp, args->appl.payload + ODPH_ICMPHDR_LEN);
 
+   if (pktout_cfg->bit.ipv4_chksum) {
+   odp_packet_l2_offset_set(pkt, 0);
+   odp_packet_l3_offset_set(pkt, ODPH_ETHHDR_LEN);
+   odp_packet_has_ipv4_set(pkt, 1);
+