Re: [Qemu-devel] [PATCH v3 4/4] hw/pvrdma: Add support for SRQ

2019-04-03 Thread Yuval Shaia
On Wed, Apr 03, 2019 at 02:33:43PM +0300, Kamal Heib wrote:
> Implement the pvrdma device commands for supporting SRQ
> 
> Signed-off-by: Kamal Heib 
> ---
>  hw/rdma/vmw/pvrdma_cmd.c| 147 
>  hw/rdma/vmw/pvrdma_main.c   |  16 
>  hw/rdma/vmw/pvrdma_qp_ops.c |  46 ++-
>  hw/rdma/vmw/pvrdma_qp_ops.h |   1 +
>  4 files changed, 209 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c
> index b931bb6dc9d4..8d70c0d23de4 100644
> --- a/hw/rdma/vmw/pvrdma_cmd.c
> +++ b/hw/rdma/vmw/pvrdma_cmd.c
> @@ -609,6 +609,149 @@ static int destroy_uc(PVRDMADev *dev, union 
> pvrdma_cmd_req *req,
>  return 0;
>  }
>  
> +static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring,
> +   uint64_t pdir_dma, uint32_t max_wr,
> +   uint32_t max_sge, uint32_t nchunks)
> +{
> +uint64_t *dir = NULL, *tbl = NULL;
> +PvrdmaRing *r;
> +int rc = -EINVAL;
> +char ring_name[MAX_RING_NAME_SZ];
> +uint32_t wqe_sz;
> +
> +if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
> +rdma_error_report("Got invalid page count for SRQ ring: %d",
> +  nchunks);
> +return rc;
> +}
> +
> +dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
> +if (!dir) {
> +rdma_error_report("Failed to map to SRQ page directory");
> +goto out;
> +}
> +
> +tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
> +if (!tbl) {
> +rdma_error_report("Failed to map to SRQ page table");
> +goto out;
> +}
> +
> +r = g_malloc(sizeof(*r));
> +*ring = r;
> +
> +r->ring_state = (struct pvrdma_ring *)
> +rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
> +if (!r->ring_state) {
> +rdma_error_report("Failed to map tp SRQ ring state");
> +goto out_free_ring_mem;
> +}
> +
> +wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
> +  sizeof(struct pvrdma_sge) * max_sge - 1);
> +sprintf(ring_name, "srq_ring_%" PRIx64, pdir_dma);
> +rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], max_wr,
> +  wqe_sz, (dma_addr_t *)&tbl[1], nchunks - 1);
> +if (rc) {
> +goto out_unmap_ring_state;
> +}
> +
> +goto out;
> +
> +out_unmap_ring_state:
> +rdma_pci_dma_unmap(pci_dev, r->ring_state, TARGET_PAGE_SIZE);
> +
> +out_free_ring_mem:
> +g_free(r);
> +
> +out:
> +rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
> +rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
> +
> +return rc;
> +}
> +
> +static void destroy_srq_ring(PvrdmaRing *ring)
> +{
> +pvrdma_ring_free(ring);
> +rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
> +g_free(ring);
> +}
> +
> +static int create_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
> +  union pvrdma_cmd_resp *rsp)
> +{
> +struct pvrdma_cmd_create_srq *cmd = &req->create_srq;
> +struct pvrdma_cmd_create_srq_resp *resp = &rsp->create_srq_resp;
> +PvrdmaRing *ring = NULL;
> +int rc;
> +
> +memset(resp, 0, sizeof(*resp));
> +
> +rc = create_srq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
> + cmd->attrs.max_wr, cmd->attrs.max_sge,
> + cmd->nchunks);
> +if (rc) {
> +return rc;
> +}
> +
> +rc = rdma_rm_alloc_srq(&dev->rdma_dev_res, cmd->pd_handle,
> +   cmd->attrs.max_wr, cmd->attrs.max_sge,
> +   cmd->attrs.srq_limit, &resp->srqn, ring);
> +if (rc) {
> +destroy_srq_ring(ring);
> +return rc;
> +}
> +
> +return 0;
> +}
> +
> +static int query_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
> + union pvrdma_cmd_resp *rsp)
> +{
> +struct pvrdma_cmd_query_srq *cmd = &req->query_srq;
> +struct pvrdma_cmd_query_srq_resp *resp = &rsp->query_srq_resp;
> +
> +memset(resp, 0, sizeof(*resp));
> +
> +return rdma_rm_query_srq(&dev->rdma_dev_res, cmd->srq_handle,
> + (struct ibv_srq_attr *)&resp->attrs);
> +}
> +
> +static int modify_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
> +  union pvrdma_cmd_resp *rsp)
> +{
> +struct pvrdma_cmd_modify_srq *cmd = &req->modify_srq;
> +
> +/* Only support SRQ limit */
> +if (!(cmd->attr_mask & IBV_SRQ_LIMIT) ||
> +(cmd->attr_mask & IBV_SRQ_MAX_WR))
> +return -EINVAL;
> +
> +return rdma_rm_modify_srq(&dev->rdma_dev_res, cmd->srq_handle,
> +  (struct ibv_srq_attr *)&cmd->attrs,
> +  cmd->attr_mask);
> +}
> +
> +static int destroy_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
> +   union pvrdma_cmd_resp *rsp)
> +{
> +struct pvrdma_cmd_destroy_srq *cmd = &req->destroy_srq;
> +RdmaRmSRQ *srq;

[Qemu-devel] [PATCH v3 4/4] hw/pvrdma: Add support for SRQ

2019-04-03 Thread Kamal Heib
Implement the pvrdma device commands for supporting SRQ

Signed-off-by: Kamal Heib 
---
 hw/rdma/vmw/pvrdma_cmd.c| 147 
 hw/rdma/vmw/pvrdma_main.c   |  16 
 hw/rdma/vmw/pvrdma_qp_ops.c |  46 ++-
 hw/rdma/vmw/pvrdma_qp_ops.h |   1 +
 4 files changed, 209 insertions(+), 1 deletion(-)

diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c
index b931bb6dc9d4..8d70c0d23de4 100644
--- a/hw/rdma/vmw/pvrdma_cmd.c
+++ b/hw/rdma/vmw/pvrdma_cmd.c
@@ -609,6 +609,149 @@ static int destroy_uc(PVRDMADev *dev, union 
pvrdma_cmd_req *req,
 return 0;
 }
 
+static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring,
+   uint64_t pdir_dma, uint32_t max_wr,
+   uint32_t max_sge, uint32_t nchunks)
+{
+uint64_t *dir = NULL, *tbl = NULL;
+PvrdmaRing *r;
+int rc = -EINVAL;
+char ring_name[MAX_RING_NAME_SZ];
+uint32_t wqe_sz;
+
+if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
+rdma_error_report("Got invalid page count for SRQ ring: %d",
+  nchunks);
+return rc;
+}
+
+dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
+if (!dir) {
+rdma_error_report("Failed to map to SRQ page directory");
+goto out;
+}
+
+tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
+if (!tbl) {
+rdma_error_report("Failed to map to SRQ page table");
+goto out;
+}
+
+r = g_malloc(sizeof(*r));
+*ring = r;
+
+r->ring_state = (struct pvrdma_ring *)
+rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
+if (!r->ring_state) {
+rdma_error_report("Failed to map tp SRQ ring state");
+goto out_free_ring_mem;
+}
+
+wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
+  sizeof(struct pvrdma_sge) * max_sge - 1);
+sprintf(ring_name, "srq_ring_%" PRIx64, pdir_dma);
+rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], max_wr,
+  wqe_sz, (dma_addr_t *)&tbl[1], nchunks - 1);
+if (rc) {
+goto out_unmap_ring_state;
+}
+
+goto out;
+
+out_unmap_ring_state:
+rdma_pci_dma_unmap(pci_dev, r->ring_state, TARGET_PAGE_SIZE);
+
+out_free_ring_mem:
+g_free(r);
+
+out:
+rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
+rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
+
+return rc;
+}
+
+static void destroy_srq_ring(PvrdmaRing *ring)
+{
+pvrdma_ring_free(ring);
+rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
+g_free(ring);
+}
+
+static int create_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+  union pvrdma_cmd_resp *rsp)
+{
+struct pvrdma_cmd_create_srq *cmd = &req->create_srq;
+struct pvrdma_cmd_create_srq_resp *resp = &rsp->create_srq_resp;
+PvrdmaRing *ring = NULL;
+int rc;
+
+memset(resp, 0, sizeof(*resp));
+
+rc = create_srq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
+ cmd->attrs.max_wr, cmd->attrs.max_sge,
+ cmd->nchunks);
+if (rc) {
+return rc;
+}
+
+rc = rdma_rm_alloc_srq(&dev->rdma_dev_res, cmd->pd_handle,
+   cmd->attrs.max_wr, cmd->attrs.max_sge,
+   cmd->attrs.srq_limit, &resp->srqn, ring);
+if (rc) {
+destroy_srq_ring(ring);
+return rc;
+}
+
+return 0;
+}
+
+static int query_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp)
+{
+struct pvrdma_cmd_query_srq *cmd = &req->query_srq;
+struct pvrdma_cmd_query_srq_resp *resp = &rsp->query_srq_resp;
+
+memset(resp, 0, sizeof(*resp));
+
+return rdma_rm_query_srq(&dev->rdma_dev_res, cmd->srq_handle,
+ (struct ibv_srq_attr *)&resp->attrs);
+}
+
+static int modify_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+  union pvrdma_cmd_resp *rsp)
+{
+struct pvrdma_cmd_modify_srq *cmd = &req->modify_srq;
+
+/* Only support SRQ limit */
+if (!(cmd->attr_mask & IBV_SRQ_LIMIT) ||
+(cmd->attr_mask & IBV_SRQ_MAX_WR))
+return -EINVAL;
+
+return rdma_rm_modify_srq(&dev->rdma_dev_res, cmd->srq_handle,
+  (struct ibv_srq_attr *)&cmd->attrs,
+  cmd->attr_mask);
+}
+
+static int destroy_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
+   union pvrdma_cmd_resp *rsp)
+{
+struct pvrdma_cmd_destroy_srq *cmd = &req->destroy_srq;
+RdmaRmSRQ *srq;
+PvrdmaRing *ring;
+
+srq = rdma_rm_get_srq(&dev->rdma_dev_res, cmd->srq_handle);
+if (!srq) {
+return -EINVAL;
+}
+
+ring = (PvrdmaRing *)srq->opaque;
+destroy_srq_ring(ring);
+rdma_rm_dealloc_srq(&dev->rdma_dev_res, cmd->srq_handle);
+
+return 0;
+}
+
 struct cmd_handler {
 uint32_t cmd;
 uint32_t ack;
@