On Thu,  2 Aug 2018 04:37:31 +0200
Dominique Martinet <asmad...@codewreck.org> wrote:

> From: Dominique Martinet <dominique.marti...@cea.fr>
> 
> 'msize' is often a power of two, or at least page-aligned, so avoiding
> an overhead of two dozen bytes for each allocation will help the
> allocator do its work and reduce memory fragmentation.
> 
> Suggested-by: Matthew Wilcox <wi...@infradead.org>
> Signed-off-by: Dominique Martinet <dominique.marti...@cea.fr>
> Cc: Matthew Wilcox <wi...@infradead.org>
> Cc: Greg Kurz <gr...@kaod.org>
> Cc: Jun Piao <piao...@huawei.com>
> ---
> 
> v2:
>  - Add extra label to not free uninitialized memory on alloc failure
>  - Rename p9_fcall_alloc to 9p_fcall_init
>  - Add a p9_fcall_fini function to echo to init
> 
[...]
> diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
> index 2ab4574183c9..c5cac97df7f7 100644
> --- a/net/9p/trans_rdma.c
> +++ b/net/9p/trans_rdma.c
> @@ -122,7 +122,7 @@ struct p9_rdma_context {
>       dma_addr_t busa;
>       union {
>               struct p9_req_t *req;
> -             struct p9_fcall *rc;
> +             struct p9_fcall rc;
>       };
>  };
>  
> @@ -320,8 +320,8 @@ recv_done(struct ib_cq *cq, struct ib_wc *wc)
>       if (wc->status != IB_WC_SUCCESS)
>               goto err_out;
>  
> -     c->rc->size = wc->byte_len;
> -     err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
> +     c->rc.size = wc->byte_len;
> +     err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1);
>       if (err)
>               goto err_out;
>  
> @@ -331,12 +331,13 @@ recv_done(struct ib_cq *cq, struct ib_wc *wc)
>  
>       /* Check that we have not yet received a reply for this request.
>        */
> -     if (unlikely(req->rc)) {
> +     if (unlikely(req->rc.sdata)) {
>               pr_err("Duplicate reply for request %d", tag);
>               goto err_out;
>       }
>  
> -     req->rc = c->rc;
> +     req->rc.size = c->rc.size;
> +     req->rc.sdata = c->rc.sdata;
>       p9_client_cb(client, req, REQ_STATUS_RCVD);
>  
>   out:
> @@ -361,7 +362,7 @@ send_done(struct ib_cq *cq, struct ib_wc *wc)
>               container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
>  
>       ib_dma_unmap_single(rdma->cm_id->device,
> -                         c->busa, c->req->tc->size,
> +                         c->busa, c->req->tc.size,
>                           DMA_TO_DEVICE);
>       up(&rdma->sq_sem);
>       kfree(c);
> @@ -401,7 +402,7 @@ post_recv(struct p9_client *client, struct 
> p9_rdma_context *c)
>       struct ib_sge sge;
>  
>       c->busa = ib_dma_map_single(rdma->cm_id->device,
> -                                 c->rc->sdata, client->msize,
> +                                 c->rc.sdata, client->msize,
>                                   DMA_FROM_DEVICE);
>       if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
>               goto error;
> @@ -443,9 +444,9 @@ static int rdma_request(struct p9_client *client, struct 
> p9_req_t *req)
>        **/
>       if (unlikely(atomic_read(&rdma->excess_rc) > 0)) {
>               if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) {
> -                     /* Got one ! */
> -                     kfree(req->rc);
> -                     req->rc = NULL;
> +                     /* Got one! */
> +                     kfree(req->rc.sdata);

Shouldn't this be p9_fcall_fini(&req->rc) ?

The rest looks good, so, with that fixed, you can add:

Reviewed-by: Greg Kurz <gr...@kaod.org>

> +                     req->rc.sdata = NULL;
>                       goto dont_need_post_recv;
>               } else {
>                       /* We raced and lost. */

Reply via email to