Author: hselasky Date: Mon Nov 16 10:10:53 2020 New Revision: 367718 URL: https://svnweb.freebsd.org/changeset/base/367718
Log: Report EQE data upon CQ completion in mlx5core. Report EQE data upon CQ completion to let upper layers use this data. Linux commit: 4e0e2ea1886afe8c001971ff767f6670312a9b04 MFC after: 1 week Sponsored by: Mellanox Technologies // NVIDIA Networking Modified: head/sys/dev/mlx5/cq.h head/sys/dev/mlx5/driver.h head/sys/dev/mlx5/mlx5_core/mlx5_cq.c head/sys/dev/mlx5/mlx5_core/mlx5_eq.c head/sys/dev/mlx5/mlx5_en/en.h head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c head/sys/dev/mlx5/mlx5_en/mlx5_en_rl.c head/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c head/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c head/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c Modified: head/sys/dev/mlx5/cq.h ============================================================================== --- head/sys/dev/mlx5/cq.h Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/cq.h Mon Nov 16 10:10:53 2020 (r367718) @@ -32,7 +32,7 @@ #include <dev/mlx5/driver.h> #include <dev/mlx5/mlx5_ifc.h> - +struct mlx5_eqe; struct mlx5_core_cq { u32 cqn; int cqe_sz; @@ -40,7 +40,7 @@ struct mlx5_core_cq { __be32 *arm_db; unsigned vector; int irqn; - void (*comp) (struct mlx5_core_cq *); + void (*comp) (struct mlx5_core_cq *, struct mlx5_eqe *); void (*event) (struct mlx5_core_cq *, int); struct mlx5_uar *uar; u32 cons_index; Modified: head/sys/dev/mlx5/driver.h ============================================================================== --- head/sys/dev/mlx5/driver.h Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/driver.h Mon Nov 16 10:10:53 2020 (r367718) @@ -1021,7 +1021,7 @@ void mlx5_unregister_debugfs(void); int mlx5_eq_init(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); -void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); +void mlx5_cq_completion(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); Modified: head/sys/dev/mlx5/mlx5_core/mlx5_cq.c ============================================================================== --- head/sys/dev/mlx5/mlx5_core/mlx5_cq.c Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/mlx5_core/mlx5_cq.c Mon Nov 16 10:10:53 2020 (r367718) @@ -55,13 +55,16 @@ mlx5_cq_table_write_unlock(struct mlx5_cq_table *table NET_EPOCH_WAIT(); } -void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) +void mlx5_cq_completion(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) { struct mlx5_cq_table *table = &dev->priv.cq_table; struct mlx5_core_cq *cq; struct epoch_tracker et; + u32 cqn; bool do_lock; + cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; + NET_EPOCH_ENTER(et); do_lock = atomic_read(&table->writercount) != 0; @@ -78,7 +81,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 if (likely(cq != NULL)) { ++cq->arm_sn; - cq->comp(cq); + cq->comp(cq, eqe); } else { mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); Modified: head/sys/dev/mlx5/mlx5_core/mlx5_eq.c ============================================================================== --- head/sys/dev/mlx5/mlx5_core/mlx5_eq.c Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/mlx5_core/mlx5_eq.c Mon Nov 16 10:10:53 2020 (r367718) @@ -246,8 +246,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, stru eq->eqn, eqe_type_str(eqe->type)); switch (eqe->type) { case MLX5_EVENT_TYPE_COMP: - cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; - mlx5_cq_completion(dev, cqn); + mlx5_cq_completion(dev, eqe); break; case MLX5_EVENT_TYPE_PATH_MIG: Modified: head/sys/dev/mlx5/mlx5_en/en.h ============================================================================== --- head/sys/dev/mlx5/mlx5_en/en.h Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/mlx5_en/en.h Mon Nov 16 10:10:53 2020 (r367718) @@ -149,7 +149,7 @@ MALLOC_DECLARE(M_MLX5EN); struct mlx5_core_dev; struct mlx5e_cq; -typedef void (mlx5e_cq_comp_t)(struct mlx5_core_cq *); +typedef void (mlx5e_cq_comp_t)(struct mlx5_core_cq *, struct mlx5_eqe *); #define mlx5_en_err(_dev, format, ...) \ if_printf(_dev, "ERR: ""%s:%d:(pid %d): " format, \ @@ -1107,8 +1107,8 @@ int mlx5e_open_locked(struct ifnet *); int mlx5e_close_locked(struct ifnet *); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event); -void mlx5e_rx_cq_comp(struct mlx5_core_cq *); -void mlx5e_tx_cq_comp(struct mlx5_core_cq *); +mlx5e_cq_comp_t mlx5e_rx_cq_comp; +mlx5e_cq_comp_t mlx5e_tx_cq_comp; struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); void mlx5e_dim_work(struct work_struct *); Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c ============================================================================== --- head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c Mon Nov 16 10:10:53 2020 (r367718) @@ -1898,7 +1898,7 @@ mlx5e_drain_sq(struct mlx5e_sq *sq) mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { mtx_unlock(&sq->lock); msleep(1); - sq->cq.mcq.comp(&sq->cq.mcq); + sq->cq.mcq.comp(&sq->cq.mcq, NULL); mtx_lock(&sq->lock); } mtx_unlock(&sq->lock); @@ -1916,7 +1916,7 @@ mlx5e_drain_sq(struct mlx5e_sq *sq) mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { mtx_unlock(&sq->lock); msleep(1); - sq->cq.mcq.comp(&sq->cq.mcq); + sq->cq.mcq.comp(&sq->cq.mcq, NULL); mtx_lock(&sq->lock); } mtx_unlock(&sq->lock); @@ -2229,7 +2229,7 @@ mlx5e_open_channel(struct mlx5e_priv *priv, /* poll receive queue initially */ NET_EPOCH_ENTER(et); - c->rq.cq.mcq.comp(&c->rq.cq.mcq); + c->rq.cq.mcq.comp(&c->rq.cq.mcq, NULL); NET_EPOCH_EXIT(et); return (0); @@ -3805,7 +3805,7 @@ mlx5e_disable_rx_dma(struct mlx5e_channel *ch) while (!mlx5_wq_ll_is_empty(&rq->wq)) { msleep(1); NET_EPOCH_ENTER(et); - rq->cq.mcq.comp(&rq->cq.mcq); + rq->cq.mcq.comp(&rq->cq.mcq, NULL); NET_EPOCH_EXIT(et); } @@ -3838,7 +3838,7 @@ mlx5e_enable_rx_dma(struct mlx5e_channel *ch) rq->enabled = 1; NET_EPOCH_ENTER(et); - rq->cq.mcq.comp(&rq->cq.mcq); + rq->cq.mcq.comp(&rq->cq.mcq, NULL); NET_EPOCH_EXIT(et); } Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_rl.c ============================================================================== --- head/sys/dev/mlx5/mlx5_en/mlx5_en_rl.c Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/mlx5_en/mlx5_en_rl.c Mon Nov 16 10:10:53 2020 (r367718) @@ -232,7 +232,7 @@ mlx5e_rl_open_channel(struct mlx5e_rl_worker *rlw, int *ppsq = sq; /* poll TX queue initially */ - sq->cq.mcq.comp(&sq->cq.mcq); + sq->cq.mcq.comp(&sq->cq.mcq, NULL); return (0); Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c ============================================================================== --- head/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c Mon Nov 16 10:10:53 2020 (r367718) @@ -537,7 +537,7 @@ wq_ll_pop: } void -mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq) +mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused) { struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq); int i = 0; Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c ============================================================================== --- head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c Mon Nov 16 10:10:53 2020 (r367718) @@ -871,7 +871,7 @@ select_queue: } void -mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq) +mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused) { struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq); Modified: head/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c ============================================================================== --- head/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c Mon Nov 16 10:10:53 2020 (r367718) @@ -31,7 +31,7 @@ #include <rdma/ib_cache.h> #include "mlx5_ib.h" -static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) +static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe __unused) { struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; Modified: head/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c ============================================================================== --- head/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c Mon Nov 16 10:06:10 2020 (r367717) +++ head/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c Mon Nov 16 10:10:53 2020 (r367718) @@ -2459,7 +2459,7 @@ static void mlx5_ib_handle_internal_error(struct mlx5_ * lock/unlock above locks Now need to arm all involved CQs. */ list_for_each_entry(mcq, &cq_armed_list, reset_notify) { - mcq->comp(mcq); + mcq->comp(mcq, NULL); } spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); } _______________________________________________ svn-src-head@freebsd.org mailing list https://lists.freebsd.org/mailman/listinfo/svn-src-head To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"