On 1/17/2018 6:56 AM, Shahaf Shuler wrote:
> Tuesday, January 16, 2018 1:53 PM, Rafal Kozik:
>> Subject: [dpdk-dev] [PATCH 1/2] net/ena: convert to new Tx offloads API
>>
>> Ethdev Tx offloads API has changed since:
>>
>> commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
>>
>> This commit support the new Tx offloads API. Queue configuration is stored
>> in ena_ring.offloads. During preparing mbufs for tx, offloads are allowed 
>> only
>> if appropriate flags in this field are set.
>>
>> Signed-off-by: Rafal Kozik <r...@semihalf.com>
>> ---
>>  drivers/net/ena/ena_ethdev.c | 73
>> +++++++++++++++++++++++++++++++++++---------
>>  drivers/net/ena/ena_ethdev.h |  3 ++
>>  2 files changed, 61 insertions(+), 15 deletions(-)
>>
>> diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
>> index 22db895..6473776 100644
>> --- a/drivers/net/ena/ena_ethdev.c
>> +++ b/drivers/net/ena/ena_ethdev.c
>> @@ -164,6 +164,14 @@ static const struct ena_stats
>> ena_stats_ena_com_strings[] = {
>>  #define ENA_STATS_ARRAY_RX  ARRAY_SIZE(ena_stats_rx_strings)
>>  #define ENA_STATS_ARRAY_ENA_COM
>>      ARRAY_SIZE(ena_stats_ena_com_strings)
>>
>> +#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
>> +                    DEV_TX_OFFLOAD_UDP_CKSUM |\
>> +                    DEV_TX_OFFLOAD_IPV4_CKSUM |\
>> +                    DEV_TX_OFFLOAD_TCP_TSO)
>> +#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
>> +                   PKT_TX_IP_CKSUM |\
>> +                   PKT_TX_TCP_SEG)
>> +
>>  /** Vendor ID used by Amazon devices */  #define
>> PCI_VENDOR_ID_AMAZON 0x1D0F
>>  /** Amazon devices */
>> @@ -227,6 +235,8 @@ static int ena_rss_reta_query(struct rte_eth_dev
>> *dev,
>>                            struct rte_eth_rss_reta_entry64 *reta_conf,
>>                            uint16_t reta_size);
>>  static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
>> +static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter
>> *adapter,
>> +                                          uint64_t offloads);
>>
>>  static const struct eth_dev_ops ena_dev_ops = {
>>      .dev_configure        = ena_dev_configure,
>> @@ -280,21 +290,24 @@ static inline void ena_rx_mbuf_prepare(struct
>> rte_mbuf *mbuf,  }
>>
>>  static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
>> -                                   struct ena_com_tx_ctx *ena_tx_ctx)
>> +                                   struct ena_com_tx_ctx *ena_tx_ctx,
>> +                                   uint64_t queue_offloads)
>>  {
>>      struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
>>
>> -    if (mbuf->ol_flags &
>> -        (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
>> +    if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
>> +        (queue_offloads & QUEUE_OFFLOADS)) {
>>              /* check if TSO is required */
>> -            if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
>> +            if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
>> +                (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
>>                      ena_tx_ctx->tso_enable = true;
>>
>>                      ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
>>              }
>>
>>              /* check if L3 checksum is needed */
>> -            if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
>> +            if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
>> +                (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
>>                      ena_tx_ctx->l3_csum_enable = true;
>>
>>              if (mbuf->ol_flags & PKT_TX_IPV6) {
>> @@ -310,19 +323,17 @@ static inline void ena_tx_mbuf_prepare(struct
>> rte_mbuf *mbuf,
>>              }
>>
>>              /* check if L4 checksum is needed */
>> -            switch (mbuf->ol_flags & PKT_TX_L4_MASK) {
>> -            case PKT_TX_TCP_CKSUM:
>> +            if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
>> +                (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
>>                      ena_tx_ctx->l4_proto =
>> ENA_ETH_IO_L4_PROTO_TCP;
>>                      ena_tx_ctx->l4_csum_enable = true;
>> -                    break;
>> -            case PKT_TX_UDP_CKSUM:
>> +            } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
>> +                       (queue_offloads &
>> DEV_TX_OFFLOAD_UDP_CKSUM)) {
>>                      ena_tx_ctx->l4_proto =
>> ENA_ETH_IO_L4_PROTO_UDP;
>>                      ena_tx_ctx->l4_csum_enable = true;
>> -                    break;
>> -            default:
>> +            } else {
>>                      ena_tx_ctx->l4_proto =
>> ENA_ETH_IO_L4_PROTO_UNKNOWN;
>>                      ena_tx_ctx->l4_csum_enable = false;
>> -                    break;
>>              }
>>
>>              ena_meta->mss = mbuf->tso_segsz;
>> @@ -945,7 +956,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev
>> *dev,
>>                            uint16_t queue_idx,
>>                            uint16_t nb_desc,
>>                            __rte_unused unsigned int socket_id,
>> -                          __rte_unused const struct rte_eth_txconf
>> *tx_conf)
>> +                          const struct rte_eth_txconf *tx_conf)
>>  {
>>      struct ena_com_create_io_ctx ctx =
>>              /* policy set to _HOST just to satisfy icc compiler */ @@ -
>> 982,6 +993,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
>>              return -EINVAL;
>>      }
>>
>> +    if (!ena_are_tx_queue_offloads_allowed(adapter, tx_conf-
>>> offloads)) {
>> +            RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
>> +            return -EINVAL;
>> +    }
> 
> Here is it better to check also the ETH_TXQ_FLAGS_IGNORE.
> If application has not yet moved to the new API, then it won't set any port 
> Tx offloads. So for old applications, the ena_are_tx_queue_offloads_allowed 
> is not necessary. 

But ethdev layer will set the offloads if ETH_TXQ_FLAGS_IGNORE is missing, can't
PMD always only rely on tx_conf->offloads ?

Reply via email to