RE: [[PATCH v1] 14/37] [CIFS] SMBD: Post a SMBD data transfer message with page payload

2017-08-14 Thread Long Li


> -Original Message-
> From: Tom Talpey
> Sent: Monday, August 14, 2017 1:23 PM
> To: Long Li ; Steve French ;
> linux-c...@vger.kernel.org; samba-techni...@lists.samba.org; linux-
> ker...@vger.kernel.org
> Subject: RE: [[PATCH v1] 14/37] [CIFS] SMBD: Post a SMBD data transfer
> message with page payload
> 
> > -Original Message-
> > From: linux-cifs-ow...@vger.kernel.org [mailto:linux-cifs-
> > ow...@vger.kernel.org] On Behalf Of Long Li
> > Sent: Wednesday, August 2, 2017 4:10 PM
> > To: Steve French ; linux-c...@vger.kernel.org;
> > samba- techni...@lists.samba.org; linux-kernel@vger.kernel.org
> > Cc: Long Li 
> > Subject: [[PATCH v1] 14/37] [CIFS] SMBD: Post a SMBD data transfer
> > message with page payload
> >
> >  /*
> > + * Send a page
> > + * page: the page to send
> > + * offset: offset in the page to send
> > + * size: length in the page to send
> > + * remaining_data_length: remaining data to send in this payload  */
> > +static int cifs_rdma_post_send_page(struct cifs_rdma_info *info,
> > +struct page
> > *page,
> > +   unsigned long offset, size_t size, int
> > +remaining_data_length) {
> >...
> > +   wait_event(info->wait_send_queue,
> > + atomic_read(&info->send_credits) >
> > 0);
> 
> This is an uninterruptible wait, correct? What's to guarantee the event will
> ever fire? Also, if the count is zero, there should be a check that an SMB
> Direct credit request is outstanding. If not, it's wasteful to sleep for the
> keepalive timer to do so.

Will fix it.

> 
> Tom.


RE: [[PATCH v1] 14/37] [CIFS] SMBD: Post a SMBD data transfer message with page payload

2017-08-14 Thread Tom Talpey
> -Original Message-
> From: linux-cifs-ow...@vger.kernel.org [mailto:linux-cifs-
> ow...@vger.kernel.org] On Behalf Of Long Li
> Sent: Wednesday, August 2, 2017 4:10 PM
> To: Steve French ; linux-c...@vger.kernel.org; samba-
> techni...@lists.samba.org; linux-kernel@vger.kernel.org
> Cc: Long Li 
> Subject: [[PATCH v1] 14/37] [CIFS] SMBD: Post a SMBD data transfer message
> with page payload
> 
>  /*
> + * Send a page
> + * page: the page to send
> + * offset: offset in the page to send
> + * size: length in the page to send
> + * remaining_data_length: remaining data to send in this payload
> + */
> +static int cifs_rdma_post_send_page(struct cifs_rdma_info *info, struct page
> *page,
> +   unsigned long offset, size_t size, int remaining_data_length)
> +{
>...
> +   wait_event(info->wait_send_queue, atomic_read(&info->send_credits) >
> 0);

This is an uninterruptible wait, correct? What's to guarantee the event will
ever fire? Also, if the count is zero, there should be a check that an SMB 
Direct
credit request is outstanding. If not, it's wasteful to sleep for the keepalive 
timer
to do so.

Tom.


[[PATCH v1] 14/37] [CIFS] SMBD: Post a SMBD data transfer message with page payload

2017-08-02 Thread Long Li
From: Long Li 

Add the function to send a SMBD data transfer message to server with page 
passed from upper layer.

Signed-off-by: Long Li 
---
 fs/cifs/cifsrdma.c | 113 +
 1 file changed, 113 insertions(+)

diff --git a/fs/cifs/cifsrdma.c b/fs/cifs/cifsrdma.c
index aa3d1a5..b3ec109 100644
--- a/fs/cifs/cifsrdma.c
+++ b/fs/cifs/cifsrdma.c
@@ -66,6 +66,10 @@ static int cifs_rdma_post_recv(
struct cifs_rdma_info *info,
struct cifs_rdma_response *response);
 
+static int cifs_rdma_post_send_page(struct cifs_rdma_info *info,
+   struct page *page, unsigned long offset,
+   size_t size, int remaining_data_length);
+
 /*
  * Per RDMA transport connection parameters
  * as defined in [MS-SMBD] 3.1.1.1
@@ -558,6 +562,115 @@ static int cifs_rdma_post_send_negotiate_req(struct 
cifs_rdma_info *info)
 }
 
 /*
+ * Send a page
+ * page: the page to send
+ * offset: offset in the page to send
+ * size: length in the page to send
+ * remaining_data_length: remaining data to send in this payload
+ */
+static int cifs_rdma_post_send_page(struct cifs_rdma_info *info, struct page 
*page,
+   unsigned long offset, size_t size, int remaining_data_length)
+{
+   struct cifs_rdma_request *request;
+   struct smbd_data_transfer *packet;
+   struct ib_send_wr send_wr, *send_wr_fail;
+   int rc = -ENOMEM;
+   int i;
+
+   request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+   if (!request)
+   return rc;
+
+   request->info = info;
+
+   wait_event(info->wait_send_queue, atomic_read(&info->send_credits) > 0);
+   atomic_dec(&info->send_credits);
+
+   packet = (struct smbd_data_transfer *) request->packet;
+   packet->credits_requested = cpu_to_le16(info->send_credit_target);
+   packet->flags = cpu_to_le16(0);
+
+   packet->reserved = cpu_to_le16(0);
+   packet->data_offset = cpu_to_le32(24);
+   packet->data_length = cpu_to_le32(size);
+   packet->remaining_data_length = cpu_to_le32(remaining_data_length);
+
+   packet->padding = cpu_to_le32(0);
+
+   log_outgoing("credits_requested=%d credits_granted=%d data_offset=%d "
+"data_length=%d remaining_data_length=%d\n",
+   le16_to_cpu(packet->credits_requested),
+   le16_to_cpu(packet->credits_granted),
+   le32_to_cpu(packet->data_offset),
+   le32_to_cpu(packet->data_length),
+   le32_to_cpu(packet->remaining_data_length));
+
+   request->sge = kzalloc(sizeof(struct ib_sge)*2, GFP_KERNEL);
+   if (!request->sge)
+   goto allocate_sge_failed;
+   request->num_sge = 2;
+
+   request->sge[0].addr = ib_dma_map_single(info->id->device,
+(void *)packet,
+sizeof(*packet),
+DMA_BIDIRECTIONAL);
+   if(ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+   rc = -EIO;
+   goto dma_mapping_failed;
+   }
+   request->sge[0].length = sizeof(*packet);
+   request->sge[0].lkey = info->pd->local_dma_lkey;
+   ib_dma_sync_single_for_device(info->id->device, request->sge[0].addr,
+ request->sge[0].length, DMA_TO_DEVICE);
+
+   request->sge[1].addr = ib_dma_map_page(info->id->device, page,
+  offset, size, DMA_BIDIRECTIONAL);
+   if(ib_dma_mapping_error(info->id->device, request->sge[1].addr)) {
+   rc = -EIO;
+   goto dma_mapping_failed;
+   }
+   request->sge[1].length = size;
+   request->sge[1].lkey = info->pd->local_dma_lkey;
+   ib_dma_sync_single_for_device(info->id->device, request->sge[1].addr,
+ request->sge[1].length, DMA_TO_DEVICE);
+
+   log_rdma_send("rdma_request sge[0] addr=%llu legnth=%u lkey=%u sge[1] "
+ "addr=%llu length=%u lkey=%u\n",
+   request->sge[0].addr, request->sge[0].length,
+   request->sge[0].lkey, request->sge[1].addr,
+   request->sge[1].length, request->sge[1].lkey);
+
+   request->cqe.done = send_done;
+
+   send_wr.next = NULL;
+   send_wr.wr_cqe = &request->cqe;
+   send_wr.sg_list = request->sge;
+   send_wr.num_sge = request->num_sge;
+   send_wr.opcode = IB_WR_SEND;
+   send_wr.send_flags = IB_SEND_SIGNALED;
+
+   rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail);
+   if (!rc)
+   return 0;
+
+   // post send failed
+   log_rdma_send("ib_post_send failed rc=%d\n", rc);
+
+dma_mapping_failed:
+   for (i=0; i<2; i++)
+   if (request->sge[i].addr)
+   ib_dma_unmap_single(info->id->device,
+