On Thu, 2 Apr 2020 at 09:50, Simran Singhal <singhalsimr...@gmail.com>
wrote:

> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>
> Signed-off-by: Simran Singhal <singhalsimr...@gmail.com>
> ---
> Changes in v2:
>         -Drop changes in file hw/rdma/rdma_utils.c
>

So i guess we are expected to see this back soon, right?

Ignore my r-b and t-b for v1, i did not encounter the build errors, this
one is okay too.

For the hw/rdma stuff:

Reviewed-by: Yuval Shaia <yuval.shaia...@gmail.com>
Tested-by: Yuval Shaia <yuval.shaia...@gmail.com>

Thanks,
Yuval


>
>  hw/hyperv/hyperv.c     | 15 ++++++-------
>  hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
>  hw/rdma/rdma_rm.c      |  3 +--
>  3 files changed, 33 insertions(+), 35 deletions(-)
>
> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> index 8ca3706f5b..4ddafe1de1 100644
> --- a/hw/hyperv/hyperv.c
> +++ b/hw/hyperv/hyperv.c
> @@ -15,6 +15,7 @@
>  #include "sysemu/kvm.h"
>  #include "qemu/bitops.h"
>  #include "qemu/error-report.h"
> +#include "qemu/lockable.h"
>  #include "qemu/queue.h"
>  #include "qemu/rcu.h"
>  #include "qemu/rcu_queue.h"
> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id,
> HvMsgHandler handler, void *data)
>      int ret;
>      MsgHandler *mh;
>
> -    qemu_mutex_lock(&handlers_mutex);
> +    QEMU_LOCK_GUARD(&handlers_mutex);
>      QLIST_FOREACH(mh, &msg_handlers, link) {
>          if (mh->conn_id == conn_id) {
>              if (handler) {
> @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id,
> HvMsgHandler handler, void *data)
>                  g_free_rcu(mh, rcu);
>                  ret = 0;
>              }
> -            goto unlock;
> +            return ret;
>          }
>      }
>
> @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id,
> HvMsgHandler handler, void *data)
>      } else {
>          ret = -ENOENT;
>      }
> -unlock:
> -    qemu_mutex_unlock(&handlers_mutex);
> +
>      return ret;
>  }
>
> @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id,
> EventNotifier *notifier)
>      int ret;
>      EventFlagHandler *handler;
>
> -    qemu_mutex_lock(&handlers_mutex);
> +    QEMU_LOCK_GUARD(&handlers_mutex);
>      QLIST_FOREACH(handler, &event_flag_handlers, link) {
>          if (handler->conn_id == conn_id) {
>              if (notifier) {
> @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id,
> EventNotifier *notifier)
>                  g_free_rcu(handler, rcu);
>                  ret = 0;
>              }
> -            goto unlock;
> +            return ret;
>          }
>      }
>
> @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id,
> EventNotifier *notifier)
>      } else {
>          ret = -ENOENT;
>      }
> -unlock:
> -    qemu_mutex_unlock(&handlers_mutex);
> +
>      return ret;
>  }
>
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index 3dd39fe1a7..db7e5c8be5 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources
> *rdma_dev_res, struct ibv_cq *ibcq)
>      struct ibv_wc wc[2];
>      RdmaProtectedGSList *cqe_ctx_list;
>
> -    qemu_mutex_lock(&rdma_dev_res->lock);
> -    do {
> -        ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
> +    WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
> +        do {
> +            ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
>
> -        trace_rdma_poll_cq(ne, ibcq);
> +            trace_rdma_poll_cq(ne, ibcq);
>
> -        for (i = 0; i < ne; i++) {
> -            bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> -            if (unlikely(!bctx)) {
> -                rdma_error_report("No matching ctx for req %"PRId64,
> -                                  wc[i].wr_id);
> -                continue;
> -            }
> +            for (i = 0; i < ne; i++) {
> +                bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> +                if (unlikely(!bctx)) {
> +                    rdma_error_report("No matching ctx for req %"PRId64,
> +                                      wc[i].wr_id);
> +                    continue;
> +                }
>
> -            comp_handler(bctx->up_ctx, &wc[i]);
> +                comp_handler(bctx->up_ctx, &wc[i]);
>
> -            if (bctx->backend_qp) {
> -                cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> -            } else {
> -                cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> -            }
> +                if (bctx->backend_qp) {
> +                    cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> +                } else {
> +                    cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> +                }
>
> -            rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> -            rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> -            g_free(bctx);
> -        }
> -        total_ne += ne;
> -    } while (ne > 0);
> -    atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> -    qemu_mutex_unlock(&rdma_dev_res->lock);
> +                rdma_protected_gslist_remove_int32(cqe_ctx_list,
> wc[i].wr_id);
> +                rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> +                g_free(bctx);
> +            }
> +            total_ne += ne;
> +        } while (ne > 0);
> +        atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> +    }
>
>      if (ne < 0) {
>          rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
> diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
> index 7e9ea283c9..60957f88db 100644
> --- a/hw/rdma/rdma_rm.c
> +++ b/hw/rdma/rdma_rm.c
> @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl
> *tbl, uint32_t handle)
>  {
>      trace_rdma_res_tbl_dealloc(tbl->name, handle);
>
> -    qemu_mutex_lock(&tbl->lock);
> +    QEMU_LOCK_GUARD(&tbl->lock);
>
>      if (handle < tbl->tbl_sz) {
>          clear_bit(handle, tbl->bitmap);
>          tbl->used--;
>      }
>
> -    qemu_mutex_unlock(&tbl->lock);
>  }
>
>  int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev
> *backend_dev,
> --
> 2.17.1
>
>

Reply via email to