CQE has all the meta data information from HW.
Make it available to the driver xdp handlers.

Signed-off-by: Saeed Mahameed <sae...@mellanox.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h    |  9 ++++++---
 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 15 +++++++++------
 2 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h 
b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 5893acfae307..98bb315fc8a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -505,7 +505,8 @@ struct mlx5e_rq;
 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
 typedef struct sk_buff *
 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-                              u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+                              u16 cqe_bcnt, u32 head_offset, u32 page_idx,
+                              struct mlx5_cqe64 *cqe);
 typedef struct sk_buff *
 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
                         struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
@@ -901,10 +902,12 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
 struct sk_buff *
 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-                               u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+                               u16 cqe_bcnt, u32 head_offset, u32 page_idx,
+                               struct mlx5_cqe64 *cqe);
 struct sk_buff *
 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info 
*wi,
-                                  u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+                                  u16 cqe_bcnt, u32 head_offset, u32 page_idx,
+                                  struct mlx5_cqe64 *cqe);
 struct sk_buff *
 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
                          struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d12577c17011..e37f9747a0e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -923,7 +923,8 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
 /* returns true if packet was consumed by xdp */
 static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
                                    struct mlx5e_dma_info *di,
-                                   void *va, u16 *rx_headroom, u32 *len)
+                                   void *va, u16 *rx_headroom,
+                                   u32 *len, struct mlx5_cqe64 *cqe)
 {
        struct bpf_prog *prog = READ_ONCE(rq->xdp.prog);
        struct xdp_buff xdp;
@@ -1012,7 +1013,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct 
mlx5_cqe64 *cqe,
        }
 
        rcu_read_lock();
-       consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt);
+       consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, cqe);
        rcu_read_unlock();
        if (consumed)
                return NULL; /* page/packet was consumed by XDP */
@@ -1155,7 +1156,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct 
mlx5_cqe64 *cqe)
 
 struct sk_buff *
 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info 
*wi,
-                                  u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+                                  u16 cqe_bcnt, u32 head_offset, u32 page_idx,
+                                  struct mlx5_cqe64 *cqe)
 {
        u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
        struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
@@ -1202,7 +1204,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, 
struct mlx5e_mpw_info *w
 
 struct sk_buff *
 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
-                               u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+                               u16 cqe_bcnt, u32 head_offset, u32 page_idx,
+                               struct mlx5_cqe64 *cqe)
 {
        struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
        u16 rx_headroom = rq->buff.headroom;
@@ -1221,7 +1224,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, 
struct mlx5e_mpw_info *wi,
        prefetch(data);
 
        rcu_read_lock();
-       consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32);
+       consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, cqe);
        rcu_read_unlock();
        if (consumed) {
                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
@@ -1268,7 +1271,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, 
struct mlx5_cqe64 *cqe)
        cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
 
        skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset,
-                                          page_idx);
+                                          page_idx, cqe);
        if (!skb)
                goto mpwrq_cqe_out;
 
-- 
2.17.0

Reply via email to