On 29/10/2020 09:27, Thomas Monjalon wrote:
> The mempool pointer in the mbuf struct is moved
> from the second to the first half.
> It should increase performance on most systems having 64-byte cache line,
> i.e. mbuf is split in two cache lines.
> On such system, the first half (also called first cache line) is hotter
> than the second one where the pool pointer was.
> 
> Moving this field gives more space to dynfield1.
> 
> This is how the mbuf layout looks like (pahole-style):
> 
> word  type                              name                byte  size
>  0    void *                            buf_addr;         /*   0 +  8 */
>  1    rte_iova_t                        buf_iova          /*   8 +  8 */
>       /* --- RTE_MARKER64               rearm_data;                   */
>  2    uint16_t                          data_off;         /*  16 +  2 */
>       uint16_t                          refcnt;           /*  18 +  2 */
>       uint16_t                          nb_segs;          /*  20 +  2 */
>       uint16_t                          port;             /*  22 +  2 */
>  3    uint64_t                          ol_flags;         /*  24 +  8 */
>       /* --- RTE_MARKER                 rx_descriptor_fields1;        */
>  4    uint32_t             union        packet_type;      /*  32 +  4 */
>       uint32_t                          pkt_len;          /*  36 +  4 */
>  5    uint16_t                          data_len;         /*  40 +  2 */
>       uint16_t                          vlan_tci;         /*  42 +  2 */
>  5.5  uint64_t             union        hash;             /*  44 +  8 */
>  6.5  uint16_t                          vlan_tci_outer;   /*  52 +  2 */
>       uint16_t                          buf_len;          /*  54 +  2 */
>  7    struct rte_mempool *              pool;             /*  56 +  8 */
>       /* --- RTE_MARKER                 cacheline1;                   */
>  8    struct rte_mbuf *                 next;             /*  64 +  8 */
>  9    uint64_t             union        tx_offload;       /*  72 +  8 */
> 10    uint16_t                          priv_size;        /*  80 +  2 */
>       uint16_t                          timesync;         /*  82 +  2 */
>       uint32_t                          seqn;             /*  84 +  4 */
> 11    struct rte_mbuf_ext_shared_info * shinfo;           /*  88 +  8 */
> 12    uint64_t                          dynfield1[4];     /*  96 + 32 */
> 16    /* --- END                                             128      */
> 
> Signed-off-by: Thomas Monjalon <tho...@monjalon.net>
> ---
>  doc/guides/rel_notes/deprecation.rst | 5 -----
>  lib/librte_kni/rte_kni_common.h      | 3 ++-
>  lib/librte_mbuf/rte_mbuf_core.h      | 5 ++---
>  3 files changed, 4 insertions(+), 9 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/deprecation.rst 
> b/doc/guides/rel_notes/deprecation.rst
> index 72dbb25b83..07ca1dcbb2 100644
> --- a/doc/guides/rel_notes/deprecation.rst
> +++ b/doc/guides/rel_notes/deprecation.rst
> @@ -88,11 +88,6 @@ Deprecation Notices
>  
>    - ``seqn``
>  
> -  As a consequence, the layout of the ``struct rte_mbuf`` will be 
> re-arranged,
> -  avoiding impact on vectorized implementation of the driver datapaths,
> -  while evaluating performance gains of a better use of the first cache line.
> -
> -
>  * ethdev: the legacy filter API, including
>    ``rte_eth_dev_filter_supported()``, ``rte_eth_dev_filter_ctrl()`` as well
>    as filter types MACVLAN, ETHERTYPE, FLEXIBLE, SYN, NTUPLE, TUNNEL, FDIR,
> diff --git a/lib/librte_kni/rte_kni_common.h b/lib/librte_kni/rte_kni_common.h
> index 36d66e2ffa..ffb3182731 100644
> --- a/lib/librte_kni/rte_kni_common.h
> +++ b/lib/librte_kni/rte_kni_common.h
> @@ -84,10 +84,11 @@ struct rte_kni_mbuf {
>       char pad2[4];
>       uint32_t pkt_len;       /**< Total pkt len: sum of all segment 
> data_len. */
>       uint16_t data_len;      /**< Amount of data in segment buffer. */
> +     char pad3[14];
> +     void *pool;
>  
>       /* fields on second cache line */
>       __attribute__((__aligned__(RTE_CACHE_LINE_MIN_SIZE)))
> -     void *pool;
>       void *next;             /**< Physical address of next mbuf in kernel. */
>  };
>  
> diff --git a/lib/librte_mbuf/rte_mbuf_core.h b/lib/librte_mbuf/rte_mbuf_core.h
> index 52ca1c842f..ee185fa32b 100644
> --- a/lib/librte_mbuf/rte_mbuf_core.h
> +++ b/lib/librte_mbuf/rte_mbuf_core.h
> @@ -584,12 +584,11 @@ struct rte_mbuf {
>  
>       uint16_t buf_len;         /**< Length of segment buffer. */
>  
> -     uint64_t unused;
> +     struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
>  
>       /* second cache line - fields only used in slow path or on TX */
>       RTE_MARKER cacheline1 __rte_cache_min_aligned;
>  
> -     struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
>       struct rte_mbuf *next;    /**< Next segment of scattered packet. */
>  
>       /* fields to support TX offloads */
> @@ -646,7 +645,7 @@ struct rte_mbuf {
>        */
>       struct rte_mbuf_ext_shared_info *shinfo;
>  
> -     uint64_t dynfield1[3]; /**< Reserved for dynamic fields. */
> +     uint64_t dynfield1[4]; /**< Reserved for dynamic fields. */
>  } __rte_cache_aligned;
>  
>  /**
> 

I will let other chime in on the merits of positioning cache alignment of the 
mempool pointer. 

>From the ABI PoV, depreciate notice has been observed and since mbuf effects 
everything doing it outside of a ABI Breakage window is impossible, so it now or
never.

Ray K


Reply via email to