Add code for eq. Aeq is a kind queue for mgmt asynchronous message
and mgmt command response message.

Signed-off-by: Ziyang Xuan <xuanziya...@huawei.com>
---
 drivers/net/hinic/base/hinic_ctx_def.h   | 184 ++++++
 drivers/net/hinic/base/hinic_pmd_dpdev.h | 146 +++++
 drivers/net/hinic/base/hinic_pmd_eqs.c   | 725 +++++++++++++++++++++++
 drivers/net/hinic/base/hinic_pmd_eqs.h   |  94 +++
 4 files changed, 1149 insertions(+)
 create mode 100644 drivers/net/hinic/base/hinic_ctx_def.h
 create mode 100644 drivers/net/hinic/base/hinic_pmd_dpdev.h
 create mode 100644 drivers/net/hinic/base/hinic_pmd_eqs.c
 create mode 100644 drivers/net/hinic/base/hinic_pmd_eqs.h

diff --git a/drivers/net/hinic/base/hinic_ctx_def.h 
b/drivers/net/hinic/base/hinic_ctx_def.h
new file mode 100644
index 000000000..ff5151bae
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_ctx_def.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_CTX_DEF_H_
+#define _HINIC_CTX_DEF_H_
+
+#define MASKED_SQ_IDX(sq, idx)                 ((idx) & (sq)->wq->mask)
+
+#define HINIC_Q_CTXT_MAX                       42
+
+/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */
+#define HINIC_CI_Q_ADDR_SIZE                   (64)
+
+#define CI_TABLE_SIZE(num_qps, pg_sz)  \
+                       (ALIGN((num_qps) * HINIC_CI_Q_ADDR_SIZE, pg_sz))
+
+#define HINIC_CI_VADDR(base_addr, q_id)                ((u8 *)(base_addr) + \
+                                               (q_id) * HINIC_CI_Q_ADDR_SIZE)
+
+#define HINIC_CI_PADDR(base_paddr, q_id)       ((base_paddr) + \
+                                               (q_id) * HINIC_CI_Q_ADDR_SIZE)
+
+#define Q_CTXT_SIZE                                    48
+#define TSO_LRO_CTXT_SIZE                              240
+
+#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
+                       (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \
+                       + (q_id) * Q_CTXT_SIZE)
+
+#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
+                       (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \
+                       + (max_sqs) * Q_CTXT_SIZE + (q_id) * Q_CTXT_SIZE)
+
+#define SQ_CTXT_SIZE(num_sqs)  ((u16)(sizeof(struct hinic_qp_ctxt_header) \
+                               + (num_sqs) * sizeof(struct hinic_sq_ctxt)))
+
+#define RQ_CTXT_SIZE(num_rqs)  ((u16)(sizeof(struct hinic_qp_ctxt_header) \
+                               + (num_rqs) * sizeof(struct hinic_rq_ctxt)))
+
+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_SHIFT                  8
+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT            13
+#define SQ_CTXT_CEQ_ATTR_EN_SHIFT                      23
+#define SQ_CTXT_CEQ_ATTR_ARM_SHIFT                     31
+
+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_MASK                   0x1FU
+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK             0x3FFU
+#define SQ_CTXT_CEQ_ATTR_EN_MASK                       0x1U
+#define SQ_CTXT_CEQ_ATTR_ARM_MASK                      0x1U
+
+#define SQ_CTXT_CEQ_ATTR_SET(val, member)              (((val) & \
+                                       SQ_CTXT_CEQ_ATTR_##member##_MASK) \
+                                       << SQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define SQ_CTXT_CI_IDX_SHIFT                           11
+#define SQ_CTXT_CI_OWNER_SHIFT                         23
+
+#define SQ_CTXT_CI_IDX_MASK                            0xFFFU
+#define SQ_CTXT_CI_OWNER_MASK                          0x1U
+
+#define SQ_CTXT_CI_SET(val, member)                    (((val) & \
+                                       SQ_CTXT_CI_##member##_MASK) \
+                                       << SQ_CTXT_CI_##member##_SHIFT)
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT                   0
+#define SQ_CTXT_WQ_PAGE_PI_SHIFT                       20
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK                    0xFFFFFU
+#define SQ_CTXT_WQ_PAGE_PI_MASK                                0xFFFU
+
+#define SQ_CTXT_WQ_PAGE_SET(val, member)               (((val) & \
+                                       SQ_CTXT_WQ_PAGE_##member##_MASK) \
+                                       << SQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT             0
+#define SQ_CTXT_PREF_CACHE_MAX_SHIFT                   14
+#define SQ_CTXT_PREF_CACHE_MIN_SHIFT                   25
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK              0x3FFFU
+#define SQ_CTXT_PREF_CACHE_MAX_MASK                    0x7FFU
+#define SQ_CTXT_PREF_CACHE_MIN_MASK                    0x7FU
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT                   0
+#define SQ_CTXT_PREF_CI_SHIFT                          20
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK                    0xFFFFFU
+#define SQ_CTXT_PREF_CI_MASK                           0xFFFU
+
+#define SQ_CTXT_PREF_SET(val, member)                  (((val) & \
+                                       SQ_CTXT_PREF_##member##_MASK) \
+                                       << SQ_CTXT_PREF_##member##_SHIFT)
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT                  0
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK                   0x7FFFFFU
+
+#define SQ_CTXT_WQ_BLOCK_SET(val, member)      (((val) & \
+                                       SQ_CTXT_WQ_BLOCK_##member##_MASK) \
+                                       << SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define RQ_CTXT_CEQ_ATTR_EN_SHIFT                      0
+#define RQ_CTXT_CEQ_ATTR_OWNER_SHIFT                   1
+
+#define RQ_CTXT_CEQ_ATTR_EN_MASK                       0x1U
+#define RQ_CTXT_CEQ_ATTR_OWNER_MASK                    0x1U
+
+#define RQ_CTXT_CEQ_ATTR_SET(val, member)              (((val) & \
+                                       RQ_CTXT_CEQ_ATTR_##member##_MASK) \
+                                       << RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define RQ_CTXT_PI_IDX_SHIFT                           0
+#define RQ_CTXT_PI_INTR_SHIFT                          22
+#define RQ_CTXT_PI_CEQ_ARM_SHIFT                       31
+
+#define RQ_CTXT_PI_IDX_MASK                            0xFFFU
+#define RQ_CTXT_PI_INTR_MASK                           0x3FFU
+#define RQ_CTXT_PI_CEQ_ARM_MASK                                0x1U
+
+#define RQ_CTXT_PI_SET(val, member)                    (((val) & \
+                                       RQ_CTXT_PI_##member##_MASK) << \
+                                       RQ_CTXT_PI_##member##_SHIFT)
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT                   0
+#define RQ_CTXT_WQ_PAGE_CI_SHIFT                       20
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK                    0xFFFFFU
+#define RQ_CTXT_WQ_PAGE_CI_MASK                                0xFFFU
+
+#define RQ_CTXT_WQ_PAGE_SET(val, member)               (((val) & \
+                                       RQ_CTXT_WQ_PAGE_##member##_MASK) << \
+                                       RQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT             0
+#define RQ_CTXT_PREF_CACHE_MAX_SHIFT                   14
+#define RQ_CTXT_PREF_CACHE_MIN_SHIFT                   25
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK              0x3FFFU
+#define RQ_CTXT_PREF_CACHE_MAX_MASK                    0x7FFU
+#define RQ_CTXT_PREF_CACHE_MIN_MASK                    0x7FU
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT                   0
+#define RQ_CTXT_PREF_CI_SHIFT                          20
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK                    0xFFFFFU
+#define RQ_CTXT_PREF_CI_MASK                           0xFFFU
+
+#define RQ_CTXT_PREF_SET(val, member)                  (((val) & \
+                                       RQ_CTXT_PREF_##member##_MASK) << \
+                                       RQ_CTXT_PREF_##member##_SHIFT)
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT                  0
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK                   0x7FFFFFU
+
+#define RQ_CTXT_WQ_BLOCK_SET(val, member)              (((val) & \
+                                       RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
+                                       RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define SIZE_16BYTES(size)             (ALIGN((size), 16) >> 4)
+
+#define        WQ_PAGE_PFN_SHIFT                               12
+#define        WQ_BLOCK_PFN_SHIFT                              9
+
+#define WQ_PAGE_PFN(page_addr)         ((page_addr) >> WQ_PAGE_PFN_SHIFT)
+#define WQ_BLOCK_PFN(page_addr)                ((page_addr) >> 
WQ_BLOCK_PFN_SHIFT)
+
+enum sq_cflag {
+       CFLAG_DATA_PATH = 0,
+};
+
+enum hinic_qp_ctxt_type {
+       HINIC_QP_CTXT_TYPE_SQ,
+       HINIC_QP_CTXT_TYPE_RQ,
+};
+
+/* service type related define */
+enum cfg_svc_type_en {
+       CFG_SVC_NIC_BIT0    = (1 << 0),
+};
+
+#define IS_NIC_TYPE(dev) \
+       ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_NIC_BIT0)
+
+#endif /* _HINIC_CTX_DEF_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_dpdev.h 
b/drivers/net/hinic/base/hinic_pmd_dpdev.h
new file mode 100644
index 000000000..dfaec0209
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_dpdev.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_DPDEV_H_
+#define _HINIC_PMD_DPDEV_H_
+
+#include <rte_ethdev.h>
+#include <rte_eth_ctrl.h>
+
+#include "hinic_compat.h"
+#include "hinic_csr.h"
+#include "hinic_ctx_def.h"
+#include "hinic_qe_def.h"
+#include "hinic_port_cmd.h"
+#include "hinic_pmd_wq.h"
+#include "hinic_pmd_hw.h"
+#include "hinic_pmd_hw_mgmt.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_nicio.h"
+#include "hinic_pmd_qp.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_nic.h"
+#include "hinic_pmd_niccfg.h"
+#include "hinic_pmd_mgmt_interface.h"
+#include "hinic_pmd_cfg.h"
+#include "hinic_pmd_eqs.h"
+#include "hinic_pmd_api_cmd.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_cmdq.h"
+
+#define HINIC_AEQN_START       (0)
+#define HINIC_AEQN_NUM         (4)
+#define HINIC_MGMT_RSP_AEQN    (1)
+
+#define HINIC_DEV_NAME_LEN     (32)
+
+#define HINIC_MAX_DMA_ENTRIES  (8192)
+
+#define HINIC_MAX_RX_QUEUES    (64)
+
+#define HINIC_MGMT_CMD_UNSUPPORTED     0xFF
+
+/* mbuf pool for copy invalid mbuf segs */
+#define HINIC_COPY_MEMPOOL_DEPTH (128)
+#define HINIC_COPY_MBUF_SIZE     (4096)
+
+#define HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev) \
+       ((struct hinic_nic_dev *)(dev)->data->dev_private)
+
+enum hinic_dev_status {
+       HINIC_DEV_INIT,
+       HINIC_DEV_CLOSE,
+       HINIC_DEV_START,
+       HINIC_DEV_INTR_EN,
+};
+
+struct hinic_txq;
+struct hinic_rxq;
+
+/* dma os dependency implementation */
+struct hinic_os_dep {
+       /* kernel dma alloc api */
+       rte_atomic32_t dma_alloc_cnt;
+       rte_spinlock_t  dma_hash_lock;
+       struct rte_hash *dma_addr_hash;
+};
+
+/* hinic nic_device */
+struct hinic_nic_dev {
+       u32 link_status;                /* port link status */
+       struct hinic_txq **txqs;
+       struct hinic_rxq **rxqs;
+       struct rte_mempool *cpy_mpool;
+       u16 num_qps;
+       u16 num_sq;
+       u16 num_rq;
+       u16 mtu_size;
+       u8 rss_tmpl_idx;
+       u8 rss_indir_flag;
+       u8 num_rss;
+       u8 rx_queue_list[HINIC_MAX_RX_QUEUES];
+
+       /* hardware hw_dev */
+       struct hinic_hwdev *hwdev;
+       struct hinic_nic_io *nic_io;
+
+       /* dma memory allocator */
+       struct hinic_os_dep dumb_os_dep;
+       struct hinic_os_dep *os_dep;
+
+       /* info */
+       unsigned int flags;
+       struct nic_service_cap nic_cap;
+       u32 rx_mode_status;     /* promisc allmulticast */
+       unsigned long dev_status;
+
+       /* dpdk only */
+       char proc_dev_name[HINIC_DEV_NAME_LEN];
+       /* PF0->COS4, PF1->COS5, PF2->COS6, PF3->COS7,
+        * vf: the same with associate pf
+        */
+       u32 default_cos;
+
+       u32 ffm_num;
+};
+
+int32_t hinic_nic_dev_create(struct rte_eth_dev *rte_dev);
+void hinic_nic_dev_destroy(struct rte_eth_dev *rte_dev);
+
+int hinic_hwif_res_init(struct hinic_nic_dev *nic_dev);
+void hinic_hwif_res_free(struct hinic_nic_dev *nic_dev);
+
+int hinic_init_nicio(struct hinic_nic_dev *nic_dev);
+void hinic_deinit_nicio(struct hinic_nic_dev *nic_dev);
+
+int hinic_comm_aeqs_init(struct hinic_nic_dev *nic_dev);
+void hinic_comm_aeqs_free(struct hinic_nic_dev *nic_dev);
+
+int hinic_comm_pf_to_mgmt_init(struct hinic_nic_dev *nic_dev);
+void hinic_comm_pf_to_mgmt_free(struct hinic_nic_dev *nic_dev);
+
+int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev);
+void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev);
+
+int hinic_init_capability(struct hinic_nic_dev *nic_dev);
+
+int hinic_create_rq(struct hinic_nic_dev *nic_dev, u16 q_id, u16 rq_depth);
+void hinic_destroy_rq(struct hinic_nic_dev *nic_dev, u16 q_id);
+
+int hinic_create_sq(struct hinic_nic_dev *nic_dev, u16 q_id, u16 sq_depth);
+void hinic_destroy_sq(struct hinic_nic_dev *nic_dev, u16 q_id);
+
+void hinic_lsc_process(struct rte_eth_dev *rte_dev, u8 status);
+
+void *hinic_dma_mem_zalloc(void *dev, size_t size, dma_addr_t *dma_handle,
+               unsigned int flag, unsigned int align);
+void hinic_dma_mem_free(void *dev, size_t size, void *virt, dma_addr_t phys);
+
+int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev);
+void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev);
+
+void dma_free_coherent_volatile(void *dev, size_t size,
+       volatile void *virt, dma_addr_t phys);
+
+#endif /* _HINIC_PMD_DPDEV_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_eqs.c 
b/drivers/net/hinic/base/hinic_pmd_eqs.c
new file mode 100644
index 000000000..db175a776
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_eqs.c
@@ -0,0 +1,725 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_pmd_dpdev.h"
+
+#define AEQ_CTRL_0_INTR_IDX_SHIFT              0
+#define AEQ_CTRL_0_DMA_ATTR_SHIFT              12
+#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT          20
+#define AEQ_CTRL_0_INTR_MODE_SHIFT             31
+
+#define AEQ_CTRL_0_INTR_IDX_MASK               0x3FFU
+#define AEQ_CTRL_0_DMA_ATTR_MASK               0x3FU
+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK           0x3U
+#define AEQ_CTRL_0_INTR_MODE_MASK              0x1U
+
+#define AEQ_CTRL_0_SET(val, member)            \
+                               (((val) & AEQ_CTRL_0_##member##_MASK) << \
+                               AEQ_CTRL_0_##member##_SHIFT)
+
+#define AEQ_CTRL_0_CLEAR(val, member)          \
+                               ((val) & (~(AEQ_CTRL_0_##member##_MASK \
+                                       << AEQ_CTRL_0_##member##_SHIFT)))
+
+#define AEQ_CTRL_1_LEN_SHIFT                   0
+#define AEQ_CTRL_1_ELEM_SIZE_SHIFT             24
+#define AEQ_CTRL_1_PAGE_SIZE_SHIFT             28
+
+#define AEQ_CTRL_1_LEN_MASK                    0x1FFFFFU
+#define AEQ_CTRL_1_ELEM_SIZE_MASK              0x3U
+#define AEQ_CTRL_1_PAGE_SIZE_MASK              0xFU
+
+#define AEQ_CTRL_1_SET(val, member)            \
+                               (((val) & AEQ_CTRL_1_##member##_MASK) << \
+                               AEQ_CTRL_1_##member##_SHIFT)
+
+#define AEQ_CTRL_1_CLEAR(val, member)          \
+                               ((val) & (~(AEQ_CTRL_1_##member##_MASK \
+                                       << AEQ_CTRL_1_##member##_SHIFT)))
+
+#define CEQ_CTRL_0_INTR_IDX_SHIFT              0
+#define CEQ_CTRL_0_DMA_ATTR_SHIFT              12
+#define CEQ_CTRL_0_LIMIT_KICK_SHIFT            20
+#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT          24
+#define CEQ_CTRL_0_INTR_MODE_SHIFT             31
+
+#define CEQ_CTRL_0_INTR_IDX_MASK               0x3FFU
+#define CEQ_CTRL_0_DMA_ATTR_MASK               0x3FU
+#define CEQ_CTRL_0_LIMIT_KICK_MASK             0xFU
+#define CEQ_CTRL_0_PCI_INTF_IDX_MASK           0x3U
+#define CEQ_CTRL_0_INTR_MODE_MASK              0x1U
+
+#define CEQ_CTRL_0_SET(val, member)            \
+                               (((val) & CEQ_CTRL_0_##member##_MASK) << \
+                                       CEQ_CTRL_0_##member##_SHIFT)
+
+#define CEQ_CTRL_1_LEN_SHIFT                   0
+#define CEQ_CTRL_1_PAGE_SIZE_SHIFT             28
+
+#define CEQ_CTRL_1_LEN_MASK                    0x1FFFFFU
+#define CEQ_CTRL_1_PAGE_SIZE_MASK              0xFU
+
+#define CEQ_CTRL_1_SET(val, member)            \
+                               (((val) & CEQ_CTRL_1_##member##_MASK) << \
+                                       CEQ_CTRL_1_##member##_SHIFT)
+
+#define EQ_ELEM_DESC_TYPE_SHIFT                        0
+#define EQ_ELEM_DESC_SRC_SHIFT                 7
+#define EQ_ELEM_DESC_SIZE_SHIFT                        8
+#define EQ_ELEM_DESC_WRAPPED_SHIFT             31
+
+#define EQ_ELEM_DESC_TYPE_MASK                 0x7FU
+#define EQ_ELEM_DESC_SRC_MASK                  0x1U
+#define EQ_ELEM_DESC_SIZE_MASK                 0xFFU
+#define EQ_ELEM_DESC_WRAPPED_MASK              0x1U
+
+#define EQ_ELEM_DESC_GET(val, member)          \
+                               (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \
+                               EQ_ELEM_DESC_##member##_MASK)
+
+#define EQ_CONS_IDX_CONS_IDX_SHIFT             0
+#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT           24
+#define EQ_CONS_IDX_INT_ARMED_SHIFT            31
+
+#define EQ_CONS_IDX_CONS_IDX_MASK              0x1FFFFFU
+#define EQ_CONS_IDX_XOR_CHKSUM_MASK            0xFU
+#define EQ_CONS_IDX_INT_ARMED_MASK             0x1U
+
+#define EQ_CONS_IDX_SET(val, member)           \
+                               (((val) & EQ_CONS_IDX_##member##_MASK) << \
+                               EQ_CONS_IDX_##member##_SHIFT)
+
+#define EQ_CONS_IDX_CLEAR(val, member)         \
+                               ((val) & (~(EQ_CONS_IDX_##member##_MASK \
+                                       << EQ_CONS_IDX_##member##_SHIFT)))
+
+#define EQ_WRAPPED(eq)                 ((u32)(eq)->wrapped << EQ_VALID_SHIFT)
+
+#define EQ_CONS_IDX(eq)                ((eq)->cons_idx | \
+                               ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT))
+
+#define EQ_CONS_IDX_REG_ADDR(eq)       (((eq)->type == HINIC_AEQ) ? \
+                               HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) :\
+                               HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
+
+#define EQ_PROD_IDX_REG_ADDR(eq)       (((eq)->type == HINIC_AEQ) ? \
+                               HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) :\
+                               HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
+
+#define GET_EQ_NUM_PAGES(eq, size)             \
+               ((u16)(ALIGN((eq)->eq_len * (u32)(eq)->elem_size, (size)) \
+               / (size)))
+
+#define GET_EQ_NUM_ELEMS(eq, pg_size)  ((pg_size) / (u32)(eq)->elem_size)
+
+#define GET_EQ_ELEMENT(eq, idx)                \
+               (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \
+               (((u32)(idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
+
+#define GET_AEQ_ELEM(eq, idx)          ((struct hinic_aeq_elem *) \
+                                       GET_EQ_ELEMENT((eq), (idx)))
+
+#define GET_CEQ_ELEM(eq, idx)          ((u32 *)GET_EQ_ELEMENT((eq), (idx)))
+
+#define GET_CURR_AEQ_ELEM(eq)          GET_AEQ_ELEM((eq), (eq)->cons_idx)
+
+#define PAGE_IN_4K(page_size)          ((page_size) >> 12)
+#define EQ_SET_HW_PAGE_SIZE_VAL(eq) ((u32)ilog2(PAGE_IN_4K((eq)->page_size)))
+
+#define ELEMENT_SIZE_IN_32B(eq)                (((eq)->elem_size) >> 5)
+#define EQ_SET_HW_ELEM_SIZE_VAL(eq)    ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq)))
+
+#define AEQ_DMA_ATTR_DEFAULT                   0
+#define CEQ_DMA_ATTR_DEFAULT                   0
+
+#define CEQ_LMT_KICK_DEFAULT                   0
+
+#define EQ_WRAPPED_SHIFT                       20
+
+#define        EQ_VALID_SHIFT                          31
+
+#define aeq_to_aeqs(eq) \
+               container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
+
+static u8 eq_cons_idx_checksum_set(u32 val)
+{
+       u8 checksum = 0;
+       u8 idx;
+
+       for (idx = 0; idx < 32; idx += 4)
+               checksum ^= ((val >> idx) & 0xF);
+
+       return (checksum & 0xF);
+}
+
+/**
+ * set_eq_cons_idx - write the cons idx to the hw
+ * @eq: The event queue to update the cons idx for
+ * @arm_state: indicate whether report interrupts when generate eq element
+ **/
+static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state)
+{
+       u32 eq_cons_idx, eq_wrap_ci, val;
+       u32 addr = EQ_CONS_IDX_REG_ADDR(eq);
+
+       eq_wrap_ci = EQ_CONS_IDX(eq);
+
+       /* Read Modify Write */
+       val = hinic_hwif_read_reg(eq->hwdev->hwif, addr);
+
+       val = EQ_CONS_IDX_CLEAR(val, CONS_IDX) &
+               EQ_CONS_IDX_CLEAR(val, INT_ARMED) &
+               EQ_CONS_IDX_CLEAR(val, XOR_CHKSUM);
+
+       /* Just aeq0 use int_arm mode for pmd drv to recv
+        * asyn event&mbox recv data
+        */
+       if (eq->q_id == 0)
+               eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
+                       EQ_CONS_IDX_SET(arm_state, INT_ARMED);
+       else
+               eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
+                       EQ_CONS_IDX_SET(HINIC_EQ_NOT_ARMED, INT_ARMED);
+
+       val |= eq_cons_idx;
+
+       val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
+
+       hinic_hwif_write_reg(eq->hwdev->hwif, addr, val);
+}
+
+/**
+ * eq_update_ci - update the cons idx of event queue
+ * @eq: the event queue to update the cons idx for
+ **/
+static void eq_update_ci(struct hinic_eq *eq)
+{
+       set_eq_cons_idx(eq, HINIC_EQ_ARMED);
+}
+
+struct hinic_ceq_ctrl_reg {
+       struct hinic_mgmt_msg_head mgmt_msg_head;
+
+       u16 func_id;
+       u16 q_id;
+       u32 ctrl0;
+       u32 ctrl1;
+};
+
+static int set_ceq_ctrl_reg(struct hinic_hwdev *hwdev, u16 q_id,
+                           u32 ctrl0, u32 ctrl1)
+{
+       struct hinic_ceq_ctrl_reg ceq_ctrl;
+       u16 in_size = sizeof(ceq_ctrl);
+
+       memset(&ceq_ctrl, 0, in_size);
+       ceq_ctrl.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       ceq_ctrl.func_id = hinic_global_func_id(hwdev);
+       ceq_ctrl.q_id = q_id;
+       ceq_ctrl.ctrl0 = ctrl0;
+       ceq_ctrl.ctrl1 = ctrl1;
+
+       return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+                                    HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP,
+                                    &ceq_ctrl, in_size, NULL, NULL, 0);
+}
+
+/**
+ * set_eq_ctrls - setting eq's ctrls registers
+ * @eq: the event queue for setting
+ **/
+static int set_eq_ctrls(struct hinic_eq *eq)
+{
+       enum hinic_eq_type type = eq->type;
+       struct hinic_hwif *hwif = eq->hwdev->hwif;
+       struct irq_info *eq_irq = &eq->eq_irq;
+       u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size;
+       u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif);
+       int ret = 0;
+
+       if (type == HINIC_AEQ) {
+               /* set ctrl0 */
+               addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+
+               val = hinic_hwif_read_reg(hwif, addr);
+
+               val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) &
+                       AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
+                       AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
+                       AEQ_CTRL_0_CLEAR(val, INTR_MODE);
+
+               ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+                       AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR)  |
+                       AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX)      |
+                       AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
+
+               val |= ctrl0;
+
+               hinic_hwif_write_reg(hwif, addr, val);
+
+               /* set ctrl1 */
+               addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+
+               page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+               elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
+
+               ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN)         |
+                       AEQ_CTRL_1_SET(elem_size, ELEM_SIZE)    |
+                       AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+               hinic_hwif_write_reg(hwif, addr, ctrl1);
+       } else {
+               ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+                       CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR)  |
+                       CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) |
+                       CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX)      |
+                       CEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
+
+               page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+
+               ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) |
+                       CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+               /* set ceq ctrl reg through mgmt cpu */
+               ret = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1);
+       }
+
+       return ret;
+}
+
+/**
+ * ceq_elements_init - Initialize all the elements in the ceq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ **/
+static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+       u16 i;
+       u32 *ceqe;
+
+       for (i = 0; i < eq->eq_len; i++) {
+               ceqe = GET_CEQ_ELEM(eq, i);
+               *(ceqe) = cpu_to_be32(init_val);
+       }
+
+       rte_wmb();      /* Write the init values */
+}
+
+/**
+ * aeq_elements_init - initialize all the elements in the aeq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ **/
+static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+       struct hinic_aeq_elem *aeqe;
+       u16 i;
+
+       for (i = 0; i < eq->eq_len; i++) {
+               aeqe = GET_AEQ_ELEM(eq, i);
+               aeqe->desc = cpu_to_be32(init_val);
+       }
+
+       rte_wmb();      /* Write the init values */
+}
+
+/**
+ * alloc_eq_pages - allocate the pages for the queue
+ * @eq: the event queue
+ **/
+static int alloc_eq_pages(struct hinic_eq *eq)
+{
+       struct hinic_hwif *hwif = eq->hwdev->hwif;
+       u32 init_val;
+       u64 dma_addr_size, virt_addr_size;
+       u16 pg_num, i;
+       int err;
+
+       dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr);
+       virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr);
+
+       eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL);
+       if (!eq->dma_addr) {
+               PMD_DRV_LOG(ERR, "Allocate dma addr array failed");
+               return -ENOMEM;
+       }
+
+       eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL);
+       if (!eq->virt_addr) {
+               PMD_DRV_LOG(ERR, "Allocate virt addr array failed");
+               err = -ENOMEM;
+               goto virt_addr_alloc_err;
+       }
+
+       for (pg_num = 0; pg_num < eq->num_pages; pg_num++) {
+               eq->virt_addr[pg_num] =
+                       (u8 *)dma_zalloc_coherent_aligned(eq->hwdev->dev_hdl,
+                                       eq->page_size, &eq->dma_addr[pg_num],
+                                       GFP_KERNEL);
+               if (!eq->virt_addr[pg_num]) {
+                       err = -ENOMEM;
+                       goto dma_alloc_err;
+               }
+
+               hinic_hwif_write_reg(hwif,
+                                    HINIC_EQ_HI_PHYS_ADDR_REG(eq->type,
+                                    eq->q_id, pg_num),
+                                    upper_32_bits(eq->dma_addr[pg_num]));
+
+               hinic_hwif_write_reg(hwif,
+                                    HINIC_EQ_LO_PHYS_ADDR_REG(eq->type,
+                                    eq->q_id, pg_num),
+                                    lower_32_bits(eq->dma_addr[pg_num]));
+       }
+
+       init_val = EQ_WRAPPED(eq);
+
+       if (eq->type == HINIC_AEQ)
+               aeq_elements_init(eq, init_val);
+       else
+               ceq_elements_init(eq, init_val);
+
+       return 0;
+
+dma_alloc_err:
+       for (i = 0; i < pg_num; i++)
+               dma_free_coherent(eq->hwdev->dev_hdl, eq->page_size,
+                                 eq->virt_addr[i], eq->dma_addr[i]);
+
+virt_addr_alloc_err:
+       kfree(eq->dma_addr);
+       return err;
+}
+
+/**
+ * free_eq_pages - free the pages of the queue
+ * @eq: the event queue
+ **/
+static void free_eq_pages(struct hinic_eq *eq)
+{
+       struct hinic_hwdev *hwdev = eq->hwdev;
+       u16 pg_num;
+
+       for (pg_num = 0; pg_num < eq->num_pages; pg_num++)
+               dma_free_coherent(hwdev->dev_hdl, eq->page_size,
+                                 eq->virt_addr[pg_num],
+                                 eq->dma_addr[pg_num]);
+
+       kfree(eq->virt_addr);
+       kfree(eq->dma_addr);
+}
+
+#define MSIX_ENTRY_IDX_0 (0)
+
+/**
+ * init_eq - initialize eq
+ * @eq:        the event queue
+ * @hwdev: the pointer to the private hardware device object
+ * @q_id: Queue id number
+ * @q_len: the number of EQ elements
+ * @type: the type of the event queue, ceq or aeq
+ * @page_size: the page size of the event queue
+ * @entry: msix entry associated with the event queue
+ * Return: 0 - Success, Negative - failure
+ **/
+static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id,
+                  u16 q_len, enum hinic_eq_type type, u32 page_size,
+                  __rte_unused struct irq_info *entry)
+{
+       int err = 0;
+
+       eq->hwdev = hwdev;
+       eq->q_id = q_id;
+       eq->type = type;
+       eq->page_size = page_size;
+       eq->eq_len = q_len;
+
+       /* clear eq_len to force eqe drop in hardware */
+       if (eq->type == HINIC_AEQ) {
+               hinic_hwif_write_reg(eq->hwdev->hwif,
+                                    HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+       } else {
+               err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+               if (err) {
+                       PMD_DRV_LOG(ERR, "Set ceq control registers ctrl0[0] 
ctrl1[0] failed");
+                       return err;
+               }
+       }
+
+       eq->cons_idx = 0;
+       eq->wrapped = 0;
+
+       eq->elem_size = (type == HINIC_AEQ) ?
+                       HINIC_AEQE_SIZE : HINIC_CEQE_SIZE;
+       eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
+       eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, page_size);
+
+       if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
+               PMD_DRV_LOG(ERR, "Number element in eq page is not power of 2");
+               return -EINVAL;
+       }
+
+       if (eq->num_pages > HINIC_EQ_MAX_PAGES) {
+               PMD_DRV_LOG(ERR, "Too many pages for eq, num_pages: %d",
+                       eq->num_pages);
+               return -EINVAL;
+       }
+
+       err = alloc_eq_pages(eq);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Allocate pages for eq failed");
+               return err;
+       }
+
+       /* pmd use MSIX_ENTRY_IDX_0*/
+       eq->eq_irq.msix_entry_idx = MSIX_ENTRY_IDX_0;
+
+       err = set_eq_ctrls(eq);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Init eq control registers failed");
+               goto init_eq_ctrls_err;
+       }
+
+       hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+       set_eq_cons_idx(eq, HINIC_EQ_ARMED);
+
+       if (eq->q_id == 0)
+               hinic_set_msix_state(hwdev, 0, HINIC_MSIX_ENABLE);
+
+       eq->poll_retry_nr = HINIC_RETRY_NUM;
+
+       return 0;
+
+init_eq_ctrls_err:
+       free_eq_pages(eq);
+
+       return err;
+}
+
+/**
+ * remove_eq - remove eq
+ * @eq:        the event queue
+ **/
+static void remove_eq(struct hinic_eq *eq)
+{
+       struct irq_info *entry = &eq->eq_irq;
+
+       if (eq->type == HINIC_AEQ) {
+               if (eq->q_id == 0)
+                       hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx,
+                                            HINIC_MSIX_DISABLE);
+
+               /* clear eq_len to avoid hw access host memory */
+               hinic_hwif_write_reg(eq->hwdev->hwif,
+                                    HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+       } else {
+               (void)set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+       }
+
+       /* update cons_idx to avoid invalid interrupt */
+       eq->cons_idx = (u16)hinic_hwif_read_reg(eq->hwdev->hwif,
+                                               EQ_PROD_IDX_REG_ADDR(eq));
+       set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
+
+       free_eq_pages(eq);
+}
+
+/**
+ * hinic_aeqs_init - init all the aeqs
+ * @hwdev: the pointer to the private hardware device object
+ * @num_aeqs: number of aeq
+ * @msix_entries: msix entries associated with the event queues
+ * Return: 0 - Success, Negative - failure
+ **/
+static int
+hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,
+               struct irq_info *msix_entries)
+{
+       struct hinic_aeqs *aeqs;
+       int err;
+       u16 i, q_id;
+
+       aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
+       if (!aeqs)
+               return -ENOMEM;
+
+       hwdev->aeqs = aeqs;
+       aeqs->hwdev = hwdev;
+       aeqs->num_aeqs = num_aeqs;
+
+       for (q_id = HINIC_AEQN_START; q_id < num_aeqs; q_id++) {
+               err = init_eq(&aeqs->aeq[q_id], hwdev, q_id,
+                             HINIC_DEFAULT_AEQ_LEN, HINIC_AEQ,
+                             HINIC_EQ_PAGE_SIZE, &msix_entries[q_id]);
+               if (err) {
+                       PMD_DRV_LOG(ERR, "Init aeq %d failed", q_id);
+                       goto init_aeq_err;
+               }
+       }
+
+       return 0;
+
+init_aeq_err:
+       for (i = 0; i < q_id; i++)
+               remove_eq(&aeqs->aeq[i]);
+
+       kfree(aeqs);
+
+       return err;
+}
+
+/**
+ * hinic_aeqs_free - free all the aeqs
+ * @hwdev: the pointer to the private hardware device object
+ **/
+static void hinic_aeqs_free(struct hinic_hwdev *hwdev)
+{
+       struct hinic_aeqs *aeqs = hwdev->aeqs;
+       u16 q_id;
+
+       /* hinic pmd use aeq[1~3], aeq[0] used in kernel only */
+       for (q_id = HINIC_AEQN_START; q_id < aeqs->num_aeqs ; q_id++)
+               remove_eq(&aeqs->aeq[q_id]);
+
+       kfree(aeqs);
+}
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
+{
+       struct hinic_eq *eq;
+       u32 addr, ci, pi;
+       int q_id;
+
+       for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {
+               eq = &hwdev->aeqs->aeq[q_id];
+               addr = EQ_CONS_IDX_REG_ADDR(eq);
+               ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+               addr = EQ_PROD_IDX_REG_ADDR(eq);
+               pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+               PMD_DRV_LOG(ERR, "aeq id: %d, ci: 0x%x, pi: 0x%x",
+                       q_id, ci, pi);
+       }
+}
+
+static int hinic_handle_aeqe(void *handle, enum hinic_aeq_type event,
+                     u8 *data, u8 size, void *param)
+{
+       int rc = 0;
+
+       switch (event) {
+       case HINIC_MSG_FROM_MGMT_CPU:
+               rc = hinic_mgmt_msg_aeqe_handler(handle, data, size, param);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Unknown event type: 0x%x, size: %d",
+                           event, size);
+               rc = HINIC_RECV_NEXT_AEQE;
+               break;
+       }
+
+       return rc;
+}
+
+/**
+ * hinic_aeq_poll_msg - poll one or continue aeqe, and call dedicated process
+ * @eq: aeq of the chip
+ * @timeout: 0   - poll all aeqe in eq, used in interrupt mode,
+ *           > 0 - poll aeq until get aeqe with 'last' field set to 1,
+ *           used in polling mode.
+ * @param: customized parameter
+ * Return: 0 - Success, EIO - poll timeout, ENODEV - swe not support
+ **/
+int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param)
+{
+       struct hinic_aeq_elem *aeqe_pos;
+       enum hinic_aeq_type event;
+       u32 aeqe_desc = 0;
+       u16 i;
+       u8 size;
+       int done = HINIC_ERROR;
+       int err = -EFAULT;
+       unsigned long end;
+
+       for (i = 0; ((timeout == 0) && (i < eq->eq_len)) ||
+            ((timeout > 0) && (done != HINIC_OK) && (i < eq->eq_len)); i++) {
+               err = -EIO;
+               end = jiffies + msecs_to_jiffies(timeout);
+               do {
+                       aeqe_pos = GET_CURR_AEQ_ELEM(eq);
+                       rte_rmb();
+
+                       /* Data in HW is in Big endian Format */
+                       aeqe_desc = be32_to_cpu(aeqe_pos->desc);
+
+                       /* HW updates wrapped bit,
+                        * when it adds eq element event
+                        */
+                       if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED)
+                           != eq->wrapped) {
+                               err = 0;
+                               break;
+                       }
+
+                       if (timeout != 0)
+                               rte_delay_ms(1);
+               } while (time_before(jiffies, end));
+
+               if (err != HINIC_OK) /*poll time out*/
+                       break;
+
+               event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
+               if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
+                       PMD_DRV_LOG(ERR, "AEQ sw event not support %d",
+                               event);
+                       return -ENODEV;
+
+               } else {
+                       size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
+                       done = hinic_handle_aeqe(eq->hwdev, event,
+                                                aeqe_pos->aeqe_data,
+                                                size, param);
+               }
+
+               eq->cons_idx++;
+               if (eq->cons_idx == eq->eq_len) {
+                       eq->cons_idx = 0;
+                       eq->wrapped = !eq->wrapped;
+               }
+       }
+
+       eq_update_ci(eq);
+
+       return err;
+}
+
+/**
+ * hinic_aeq_poll_msg - init aeqs
+ * @nic_dev: pmd nic device
+ * Return: 0 - Success, Negative - failure
+ **/
+int hinic_comm_aeqs_init(struct hinic_nic_dev *nic_dev)
+{
+       int rc;
+       u16 num_aeqs;
+       struct irq_info aeq_irqs[HINIC_MAX_AEQS];
+
+       num_aeqs = HINIC_HWIF_NUM_AEQS(nic_dev->hwdev->hwif);
+       if (num_aeqs < HINIC_MAX_AEQS) {
+               PMD_DRV_LOG(ERR, "Warning: PMD need %d AEQs, Chip have %d",
+                       HINIC_MAX_AEQS, num_aeqs);
+               return HINIC_ERROR;
+       }
+
+       memset(aeq_irqs, 0, sizeof(aeq_irqs));
+       rc = hinic_aeqs_init(nic_dev->hwdev, num_aeqs, aeq_irqs);
+       if (rc != HINIC_OK)
+               PMD_DRV_LOG(ERR, "Initialize aeqs failed, rc: %d", rc);
+
+       return rc;
+}
+
+void hinic_comm_aeqs_free(struct hinic_nic_dev *nic_dev)
+{
+       hinic_aeqs_free(nic_dev->hwdev);
+}
diff --git a/drivers/net/hinic/base/hinic_pmd_eqs.h 
b/drivers/net/hinic/base/hinic_pmd_eqs.h
new file mode 100644
index 000000000..73efb3ce6
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_eqs.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_EQS_H_
+#define _HINIC_PMD_EQS_H_
+
+#define HINIC_EQ_PAGE_SIZE             0x00001000
+
+#define HINIC_MAX_AEQS                 4
+
+#define HINIC_EQ_MAX_PAGES             8
+
+#define HINIC_AEQE_SIZE                        64
+#define HINIC_CEQE_SIZE                        4
+
+#define HINIC_AEQE_DESC_SIZE           4
+#define HINIC_AEQE_DATA_SIZE           \
+                       (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)
+
+#define HINIC_DEFAULT_AEQ_LEN          64
+
+#define        HINIC_CEQ_ID_CMDQ               0
+
+enum hinic_eq_type {
+       HINIC_AEQ,
+       HINIC_CEQ
+};
+
+enum hinic_eq_intr_mode {
+       HINIC_INTR_MODE_ARMED,
+       HINIC_INTR_MODE_ALWAYS,
+};
+
+enum hinic_eq_ci_arm_state {
+       HINIC_EQ_NOT_ARMED,
+       HINIC_EQ_ARMED,
+};
+
+enum hinic_aeq_type {
+       HINIC_HW_INTER_INT = 0,
+       HINIC_MBX_FROM_FUNC = 1,
+       HINIC_MSG_FROM_MGMT_CPU = 2,
+       HINIC_API_RSP = 3,
+       HINIC_API_CHAIN_STS = 4,
+       HINIC_MBX_SEND_RSLT = 5,
+       HINIC_MAX_AEQ_EVENTS
+};
+
+/* When continue aeqe, the event process must return done
+ * for indicating data receive finish or not
+ */
+typedef int (*hinic_aeq_event_cb)(void *hw_dev, u8 *data, u8 size);
+#define HINIC_RETRY_NUM        (10)
+
+struct hinic_eq {
+       struct hinic_hwdev              *hwdev;
+       u16                             q_id;
+       enum hinic_eq_type              type;
+       u32                             page_size;
+       u16                             eq_len;
+
+       u16                             cons_idx;
+       u16                             wrapped;
+
+       u16                             elem_size;
+       u16                             num_pages;
+       u32                             num_elem_in_pg;
+
+       struct irq_info                 eq_irq;
+
+       dma_addr_t                      *dma_addr;
+       u8                              **virt_addr;
+
+       u16                             poll_retry_nr;
+};
+
+struct hinic_aeq_elem {
+       u8      aeqe_data[HINIC_AEQE_DATA_SIZE];
+       u32     desc;
+};
+
+struct hinic_aeqs {
+       struct hinic_hwdev      *hwdev;
+       u16                     poll_retry_nr;
+
+       struct hinic_eq         aeq[HINIC_MAX_AEQS];
+       u16                     num_aeqs;
+};
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev);
+int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param);
+
+#endif /* _HINIC_PMD_EQS_H_ */
-- 
2.18.0

Reply via email to