Re: [Patch v4 1/3] CIFS: Add support for direct I/O read

2018-11-29 Thread Pavel Shilovsky
чт, 29 нояб. 2018 г. в 14:48, Long Li :
>
> > Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> >
> > ср, 28 нояб. 2018 г. в 15:43, Long Li :
> > >
> > > > Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> > > >
> > > > Hi Long,
> > > >
> > > > Please find my comments below.
> > > >
> > > >
> > > > ср, 31 окт. 2018 г. в 15:14, Long Li :
> > > > >
> > > > > From: Long Li 
> > > > >
> > > > > With direct I/O read, we transfer the data directly from transport
> > > > > layer to the user data buffer.
> > > > >
> > > > > Change in v3: add support for kernel AIO
> > > > >
> > > > > Change in v4:
> > > > > Refactor common read code to __cifs_readv for direct and non-direct
> > I/O.
> > > > > Retry on direct I/O failure.
> > > > >
> > > > > Signed-off-by: Long Li 
> > > > > ---
> > > > >  fs/cifs/cifsfs.h   |   1 +
> > > > >  fs/cifs/cifsglob.h |   5 ++
> > > > >  fs/cifs/file.c | 219
> > +++--
> > > > 
> > > > >  3 files changed, 186 insertions(+), 39 deletions(-)
> > > > >
> > > > > diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index
> > > > > 5f02318..7fba9aa 100644
> > > > > --- a/fs/cifs/cifsfs.h
> > > > > +++ b/fs/cifs/cifsfs.h
> > > > > @@ -102,6 +102,7 @@ extern int cifs_open(struct inode *inode,
> > > > > struct file *file);  extern int cifs_close(struct inode *inode,
> > > > > struct file *file);  extern int cifs_closedir(struct inode *inode,
> > > > > struct file *file);  extern ssize_t cifs_user_readv(struct kiocb
> > > > > *iocb, struct iov_iter *to);
> > > > > +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct
> > > > > +iov_iter *to);
> > > > >  extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct
> > > > > iov_iter *to);  extern ssize_t cifs_user_writev(struct kiocb
> > > > > *iocb, struct iov_iter *from);  extern ssize_t
> > > > > cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
> > > > > diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index
> > > > > 7f62c98..52248dd 100644
> > > > > --- a/fs/cifs/cifsglob.h
> > > > > +++ b/fs/cifs/cifsglob.h
> > > > > @@ -1146,6 +1146,11 @@ struct cifs_aio_ctx {
> > > > > unsigned intlen;
> > > > > unsigned inttotal_len;
> > > > > boolshould_dirty;
> > > > > +   /*
> > > > > +* Indicates if this aio_ctx is for direct_io,
> > > > > +* If yes, iter is a copy of the user passed iov_iter
> > > > > +*/
> > > > > +   booldirect_io;
> > > > >  };
> > > > >
> > > > >  struct cifs_readdata;
> > > > > diff --git a/fs/cifs/file.c b/fs/cifs/file.c index
> > > > > 87eece6..daab878
> > > > > 100644
> > > > > --- a/fs/cifs/file.c
> > > > > +++ b/fs/cifs/file.c
> > > > > @@ -2965,7 +2965,6 @@ cifs_uncached_readdata_release(struct kref
> > > > *refcount)
> > > > > kref_put(>ctx->refcount, cifs_aio_ctx_release);
> > > > > for (i = 0; i < rdata->nr_pages; i++) {
> > > > > put_page(rdata->pages[i]);
> > > > > -   rdata->pages[i] = NULL;
> > > > > }
> > > > > cifs_readdata_release(refcount);  } @@ -3092,6 +3091,63 @@
> > > > > cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
> > > > > return uncached_fill_pages(server, rdata, iter,
> > > > > iter->count); }
> > > > >
> > > > > +static int cifs_resend_rdata(struct cifs_readdata *rdata,
> > > > > + struct list_head *rdata_list,
> > > > > + struct cifs_aio_ctx *ctx) {
> > > > > +   int wait_retry = 0;
> > > > > +   unsigned int rsize, credits;
> > > > > +   int rc;
> > > > > +   struct 

Re: [Patch v4 1/3] CIFS: Add support for direct I/O read

2018-11-29 Thread Pavel Shilovsky
чт, 29 нояб. 2018 г. в 14:48, Long Li :
>
> > Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> >
> > ср, 28 нояб. 2018 г. в 15:43, Long Li :
> > >
> > > > Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> > > >
> > > > Hi Long,
> > > >
> > > > Please find my comments below.
> > > >
> > > >
> > > > ср, 31 окт. 2018 г. в 15:14, Long Li :
> > > > >
> > > > > From: Long Li 
> > > > >
> > > > > With direct I/O read, we transfer the data directly from transport
> > > > > layer to the user data buffer.
> > > > >
> > > > > Change in v3: add support for kernel AIO
> > > > >
> > > > > Change in v4:
> > > > > Refactor common read code to __cifs_readv for direct and non-direct
> > I/O.
> > > > > Retry on direct I/O failure.
> > > > >
> > > > > Signed-off-by: Long Li 
> > > > > ---
> > > > >  fs/cifs/cifsfs.h   |   1 +
> > > > >  fs/cifs/cifsglob.h |   5 ++
> > > > >  fs/cifs/file.c | 219
> > +++--
> > > > 
> > > > >  3 files changed, 186 insertions(+), 39 deletions(-)
> > > > >
> > > > > diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index
> > > > > 5f02318..7fba9aa 100644
> > > > > --- a/fs/cifs/cifsfs.h
> > > > > +++ b/fs/cifs/cifsfs.h
> > > > > @@ -102,6 +102,7 @@ extern int cifs_open(struct inode *inode,
> > > > > struct file *file);  extern int cifs_close(struct inode *inode,
> > > > > struct file *file);  extern int cifs_closedir(struct inode *inode,
> > > > > struct file *file);  extern ssize_t cifs_user_readv(struct kiocb
> > > > > *iocb, struct iov_iter *to);
> > > > > +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct
> > > > > +iov_iter *to);
> > > > >  extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct
> > > > > iov_iter *to);  extern ssize_t cifs_user_writev(struct kiocb
> > > > > *iocb, struct iov_iter *from);  extern ssize_t
> > > > > cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
> > > > > diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index
> > > > > 7f62c98..52248dd 100644
> > > > > --- a/fs/cifs/cifsglob.h
> > > > > +++ b/fs/cifs/cifsglob.h
> > > > > @@ -1146,6 +1146,11 @@ struct cifs_aio_ctx {
> > > > > unsigned intlen;
> > > > > unsigned inttotal_len;
> > > > > boolshould_dirty;
> > > > > +   /*
> > > > > +* Indicates if this aio_ctx is for direct_io,
> > > > > +* If yes, iter is a copy of the user passed iov_iter
> > > > > +*/
> > > > > +   booldirect_io;
> > > > >  };
> > > > >
> > > > >  struct cifs_readdata;
> > > > > diff --git a/fs/cifs/file.c b/fs/cifs/file.c index
> > > > > 87eece6..daab878
> > > > > 100644
> > > > > --- a/fs/cifs/file.c
> > > > > +++ b/fs/cifs/file.c
> > > > > @@ -2965,7 +2965,6 @@ cifs_uncached_readdata_release(struct kref
> > > > *refcount)
> > > > > kref_put(>ctx->refcount, cifs_aio_ctx_release);
> > > > > for (i = 0; i < rdata->nr_pages; i++) {
> > > > > put_page(rdata->pages[i]);
> > > > > -   rdata->pages[i] = NULL;
> > > > > }
> > > > > cifs_readdata_release(refcount);  } @@ -3092,6 +3091,63 @@
> > > > > cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
> > > > > return uncached_fill_pages(server, rdata, iter,
> > > > > iter->count); }
> > > > >
> > > > > +static int cifs_resend_rdata(struct cifs_readdata *rdata,
> > > > > + struct list_head *rdata_list,
> > > > > + struct cifs_aio_ctx *ctx) {
> > > > > +   int wait_retry = 0;
> > > > > +   unsigned int rsize, credits;
> > > > > +   int rc;
> > > > > +   struct 

RE: [Patch v4 1/3] CIFS: Add support for direct I/O read

2018-11-29 Thread Long Li
> Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> 
> ср, 28 нояб. 2018 г. в 15:43, Long Li :
> >
> > > Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> > >
> > > Hi Long,
> > >
> > > Please find my comments below.
> > >
> > >
> > > ср, 31 окт. 2018 г. в 15:14, Long Li :
> > > >
> > > > From: Long Li 
> > > >
> > > > With direct I/O read, we transfer the data directly from transport
> > > > layer to the user data buffer.
> > > >
> > > > Change in v3: add support for kernel AIO
> > > >
> > > > Change in v4:
> > > > Refactor common read code to __cifs_readv for direct and non-direct
> I/O.
> > > > Retry on direct I/O failure.
> > > >
> > > > Signed-off-by: Long Li 
> > > > ---
> > > >  fs/cifs/cifsfs.h   |   1 +
> > > >  fs/cifs/cifsglob.h |   5 ++
> > > >  fs/cifs/file.c | 219
> +++--
> > > 
> > > >  3 files changed, 186 insertions(+), 39 deletions(-)
> > > >
> > > > diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index
> > > > 5f02318..7fba9aa 100644
> > > > --- a/fs/cifs/cifsfs.h
> > > > +++ b/fs/cifs/cifsfs.h
> > > > @@ -102,6 +102,7 @@ extern int cifs_open(struct inode *inode,
> > > > struct file *file);  extern int cifs_close(struct inode *inode,
> > > > struct file *file);  extern int cifs_closedir(struct inode *inode,
> > > > struct file *file);  extern ssize_t cifs_user_readv(struct kiocb
> > > > *iocb, struct iov_iter *to);
> > > > +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct
> > > > +iov_iter *to);
> > > >  extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct
> > > > iov_iter *to);  extern ssize_t cifs_user_writev(struct kiocb
> > > > *iocb, struct iov_iter *from);  extern ssize_t
> > > > cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
> > > > diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index
> > > > 7f62c98..52248dd 100644
> > > > --- a/fs/cifs/cifsglob.h
> > > > +++ b/fs/cifs/cifsglob.h
> > > > @@ -1146,6 +1146,11 @@ struct cifs_aio_ctx {
> > > > unsigned intlen;
> > > > unsigned inttotal_len;
> > > > boolshould_dirty;
> > > > +   /*
> > > > +* Indicates if this aio_ctx is for direct_io,
> > > > +* If yes, iter is a copy of the user passed iov_iter
> > > > +*/
> > > > +   booldirect_io;
> > > >  };
> > > >
> > > >  struct cifs_readdata;
> > > > diff --git a/fs/cifs/file.c b/fs/cifs/file.c index
> > > > 87eece6..daab878
> > > > 100644
> > > > --- a/fs/cifs/file.c
> > > > +++ b/fs/cifs/file.c
> > > > @@ -2965,7 +2965,6 @@ cifs_uncached_readdata_release(struct kref
> > > *refcount)
> > > > kref_put(>ctx->refcount, cifs_aio_ctx_release);
> > > > for (i = 0; i < rdata->nr_pages; i++) {
> > > > put_page(rdata->pages[i]);
> > > > -   rdata->pages[i] = NULL;
> > > > }
> > > > cifs_readdata_release(refcount);  } @@ -3092,6 +3091,63 @@
> > > > cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
> > > > return uncached_fill_pages(server, rdata, iter,
> > > > iter->count); }
> > > >
> > > > +static int cifs_resend_rdata(struct cifs_readdata *rdata,
> > > > + struct list_head *rdata_list,
> > > > + struct cifs_aio_ctx *ctx) {
> > > > +   int wait_retry = 0;
> > > > +   unsigned int rsize, credits;
> > > > +   int rc;
> > > > +   struct TCP_Server_Info *server =
> > > > +tlink_tcon(rdata->cfile->tlink)->ses->server;
> > > > +
> > > > +   /*
> > > > +* Try to resend this rdata, waiting for credits up to 3 
> > > > seconds.
> > > > +* Note: we are attempting to resend the whole rdata not in
> segments
> > > > +*/
> > > > +   do {
> > > > +   rc = server->ops-&

RE: [Patch v4 1/3] CIFS: Add support for direct I/O read

2018-11-29 Thread Long Li
> Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> 
> ср, 28 нояб. 2018 г. в 15:43, Long Li :
> >
> > > Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> > >
> > > Hi Long,
> > >
> > > Please find my comments below.
> > >
> > >
> > > ср, 31 окт. 2018 г. в 15:14, Long Li :
> > > >
> > > > From: Long Li 
> > > >
> > > > With direct I/O read, we transfer the data directly from transport
> > > > layer to the user data buffer.
> > > >
> > > > Change in v3: add support for kernel AIO
> > > >
> > > > Change in v4:
> > > > Refactor common read code to __cifs_readv for direct and non-direct
> I/O.
> > > > Retry on direct I/O failure.
> > > >
> > > > Signed-off-by: Long Li 
> > > > ---
> > > >  fs/cifs/cifsfs.h   |   1 +
> > > >  fs/cifs/cifsglob.h |   5 ++
> > > >  fs/cifs/file.c | 219
> +++--
> > > 
> > > >  3 files changed, 186 insertions(+), 39 deletions(-)
> > > >
> > > > diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index
> > > > 5f02318..7fba9aa 100644
> > > > --- a/fs/cifs/cifsfs.h
> > > > +++ b/fs/cifs/cifsfs.h
> > > > @@ -102,6 +102,7 @@ extern int cifs_open(struct inode *inode,
> > > > struct file *file);  extern int cifs_close(struct inode *inode,
> > > > struct file *file);  extern int cifs_closedir(struct inode *inode,
> > > > struct file *file);  extern ssize_t cifs_user_readv(struct kiocb
> > > > *iocb, struct iov_iter *to);
> > > > +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct
> > > > +iov_iter *to);
> > > >  extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct
> > > > iov_iter *to);  extern ssize_t cifs_user_writev(struct kiocb
> > > > *iocb, struct iov_iter *from);  extern ssize_t
> > > > cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
> > > > diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index
> > > > 7f62c98..52248dd 100644
> > > > --- a/fs/cifs/cifsglob.h
> > > > +++ b/fs/cifs/cifsglob.h
> > > > @@ -1146,6 +1146,11 @@ struct cifs_aio_ctx {
> > > > unsigned intlen;
> > > > unsigned inttotal_len;
> > > > boolshould_dirty;
> > > > +   /*
> > > > +* Indicates if this aio_ctx is for direct_io,
> > > > +* If yes, iter is a copy of the user passed iov_iter
> > > > +*/
> > > > +   booldirect_io;
> > > >  };
> > > >
> > > >  struct cifs_readdata;
> > > > diff --git a/fs/cifs/file.c b/fs/cifs/file.c index
> > > > 87eece6..daab878
> > > > 100644
> > > > --- a/fs/cifs/file.c
> > > > +++ b/fs/cifs/file.c
> > > > @@ -2965,7 +2965,6 @@ cifs_uncached_readdata_release(struct kref
> > > *refcount)
> > > > kref_put(>ctx->refcount, cifs_aio_ctx_release);
> > > > for (i = 0; i < rdata->nr_pages; i++) {
> > > > put_page(rdata->pages[i]);
> > > > -   rdata->pages[i] = NULL;
> > > > }
> > > > cifs_readdata_release(refcount);  } @@ -3092,6 +3091,63 @@
> > > > cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
> > > > return uncached_fill_pages(server, rdata, iter,
> > > > iter->count); }
> > > >
> > > > +static int cifs_resend_rdata(struct cifs_readdata *rdata,
> > > > + struct list_head *rdata_list,
> > > > + struct cifs_aio_ctx *ctx) {
> > > > +   int wait_retry = 0;
> > > > +   unsigned int rsize, credits;
> > > > +   int rc;
> > > > +   struct TCP_Server_Info *server =
> > > > +tlink_tcon(rdata->cfile->tlink)->ses->server;
> > > > +
> > > > +   /*
> > > > +* Try to resend this rdata, waiting for credits up to 3 
> > > > seconds.
> > > > +* Note: we are attempting to resend the whole rdata not in
> segments
> > > > +*/
> > > > +   do {
> > > > +   rc = server->ops-&

Re: [Patch v4 1/3] CIFS: Add support for direct I/O read

2018-11-29 Thread Pavel Shilovsky
ср, 28 нояб. 2018 г. в 15:43, Long Li :
>
> > Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> >
> > Hi Long,
> >
> > Please find my comments below.
> >
> >
> > ср, 31 окт. 2018 г. в 15:14, Long Li :
> > >
> > > From: Long Li 
> > >
> > > With direct I/O read, we transfer the data directly from transport
> > > layer to the user data buffer.
> > >
> > > Change in v3: add support for kernel AIO
> > >
> > > Change in v4:
> > > Refactor common read code to __cifs_readv for direct and non-direct I/O.
> > > Retry on direct I/O failure.
> > >
> > > Signed-off-by: Long Li 
> > > ---
> > >  fs/cifs/cifsfs.h   |   1 +
> > >  fs/cifs/cifsglob.h |   5 ++
> > >  fs/cifs/file.c | 219 +++--
> > 
> > >  3 files changed, 186 insertions(+), 39 deletions(-)
> > >
> > > diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index
> > > 5f02318..7fba9aa 100644
> > > --- a/fs/cifs/cifsfs.h
> > > +++ b/fs/cifs/cifsfs.h
> > > @@ -102,6 +102,7 @@ extern int cifs_open(struct inode *inode, struct
> > > file *file);  extern int cifs_close(struct inode *inode, struct file
> > > *file);  extern int cifs_closedir(struct inode *inode, struct file
> > > *file);  extern ssize_t cifs_user_readv(struct kiocb *iocb, struct
> > > iov_iter *to);
> > > +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter
> > > +*to);
> > >  extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter
> > > *to);  extern ssize_t cifs_user_writev(struct kiocb *iocb, struct
> > > iov_iter *from);  extern ssize_t cifs_strict_writev(struct kiocb
> > > *iocb, struct iov_iter *from); diff --git a/fs/cifs/cifsglob.h
> > > b/fs/cifs/cifsglob.h index 7f62c98..52248dd 100644
> > > --- a/fs/cifs/cifsglob.h
> > > +++ b/fs/cifs/cifsglob.h
> > > @@ -1146,6 +1146,11 @@ struct cifs_aio_ctx {
> > > unsigned intlen;
> > > unsigned inttotal_len;
> > > boolshould_dirty;
> > > +   /*
> > > +* Indicates if this aio_ctx is for direct_io,
> > > +* If yes, iter is a copy of the user passed iov_iter
> > > +*/
> > > +   booldirect_io;
> > >  };
> > >
> > >  struct cifs_readdata;
> > > diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 87eece6..daab878
> > > 100644
> > > --- a/fs/cifs/file.c
> > > +++ b/fs/cifs/file.c
> > > @@ -2965,7 +2965,6 @@ cifs_uncached_readdata_release(struct kref
> > *refcount)
> > > kref_put(>ctx->refcount, cifs_aio_ctx_release);
> > > for (i = 0; i < rdata->nr_pages; i++) {
> > > put_page(rdata->pages[i]);
> > > -   rdata->pages[i] = NULL;
> > > }
> > > cifs_readdata_release(refcount);  } @@ -3092,6 +3091,63 @@
> > > cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
> > > return uncached_fill_pages(server, rdata, iter, iter->count);
> > > }
> > >
> > > +static int cifs_resend_rdata(struct cifs_readdata *rdata,
> > > + struct list_head *rdata_list,
> > > + struct cifs_aio_ctx *ctx) {
> > > +   int wait_retry = 0;
> > > +   unsigned int rsize, credits;
> > > +   int rc;
> > > +   struct TCP_Server_Info *server =
> > > +tlink_tcon(rdata->cfile->tlink)->ses->server;
> > > +
> > > +   /*
> > > +* Try to resend this rdata, waiting for credits up to 3 seconds.
> > > +* Note: we are attempting to resend the whole rdata not in 
> > > segments
> > > +*/
> > > +   do {
> > > +   rc = server->ops->wait_mtu_credits(server, rdata->bytes,
> > > +   , );
> > > +
> > > +   if (rc)
> > > +   break;
> > > +
> > > +   if (rsize < rdata->bytes) {
> > > +   add_credits_and_wake_if(server, credits, 0);
> > > +   msleep(1000);
> > > +   wait_retry++;
> > > +   }
> > > +   } while (rsize < rdata->bytes && wait_retry 

Re: [Patch v4 1/3] CIFS: Add support for direct I/O read

2018-11-29 Thread Pavel Shilovsky
ср, 28 нояб. 2018 г. в 15:43, Long Li :
>
> > Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> >
> > Hi Long,
> >
> > Please find my comments below.
> >
> >
> > ср, 31 окт. 2018 г. в 15:14, Long Li :
> > >
> > > From: Long Li 
> > >
> > > With direct I/O read, we transfer the data directly from transport
> > > layer to the user data buffer.
> > >
> > > Change in v3: add support for kernel AIO
> > >
> > > Change in v4:
> > > Refactor common read code to __cifs_readv for direct and non-direct I/O.
> > > Retry on direct I/O failure.
> > >
> > > Signed-off-by: Long Li 
> > > ---
> > >  fs/cifs/cifsfs.h   |   1 +
> > >  fs/cifs/cifsglob.h |   5 ++
> > >  fs/cifs/file.c | 219 +++--
> > 
> > >  3 files changed, 186 insertions(+), 39 deletions(-)
> > >
> > > diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index
> > > 5f02318..7fba9aa 100644
> > > --- a/fs/cifs/cifsfs.h
> > > +++ b/fs/cifs/cifsfs.h
> > > @@ -102,6 +102,7 @@ extern int cifs_open(struct inode *inode, struct
> > > file *file);  extern int cifs_close(struct inode *inode, struct file
> > > *file);  extern int cifs_closedir(struct inode *inode, struct file
> > > *file);  extern ssize_t cifs_user_readv(struct kiocb *iocb, struct
> > > iov_iter *to);
> > > +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter
> > > +*to);
> > >  extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter
> > > *to);  extern ssize_t cifs_user_writev(struct kiocb *iocb, struct
> > > iov_iter *from);  extern ssize_t cifs_strict_writev(struct kiocb
> > > *iocb, struct iov_iter *from); diff --git a/fs/cifs/cifsglob.h
> > > b/fs/cifs/cifsglob.h index 7f62c98..52248dd 100644
> > > --- a/fs/cifs/cifsglob.h
> > > +++ b/fs/cifs/cifsglob.h
> > > @@ -1146,6 +1146,11 @@ struct cifs_aio_ctx {
> > > unsigned intlen;
> > > unsigned inttotal_len;
> > > boolshould_dirty;
> > > +   /*
> > > +* Indicates if this aio_ctx is for direct_io,
> > > +* If yes, iter is a copy of the user passed iov_iter
> > > +*/
> > > +   booldirect_io;
> > >  };
> > >
> > >  struct cifs_readdata;
> > > diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 87eece6..daab878
> > > 100644
> > > --- a/fs/cifs/file.c
> > > +++ b/fs/cifs/file.c
> > > @@ -2965,7 +2965,6 @@ cifs_uncached_readdata_release(struct kref
> > *refcount)
> > > kref_put(>ctx->refcount, cifs_aio_ctx_release);
> > > for (i = 0; i < rdata->nr_pages; i++) {
> > > put_page(rdata->pages[i]);
> > > -   rdata->pages[i] = NULL;
> > > }
> > > cifs_readdata_release(refcount);  } @@ -3092,6 +3091,63 @@
> > > cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
> > > return uncached_fill_pages(server, rdata, iter, iter->count);
> > > }
> > >
> > > +static int cifs_resend_rdata(struct cifs_readdata *rdata,
> > > + struct list_head *rdata_list,
> > > + struct cifs_aio_ctx *ctx) {
> > > +   int wait_retry = 0;
> > > +   unsigned int rsize, credits;
> > > +   int rc;
> > > +   struct TCP_Server_Info *server =
> > > +tlink_tcon(rdata->cfile->tlink)->ses->server;
> > > +
> > > +   /*
> > > +* Try to resend this rdata, waiting for credits up to 3 seconds.
> > > +* Note: we are attempting to resend the whole rdata not in 
> > > segments
> > > +*/
> > > +   do {
> > > +   rc = server->ops->wait_mtu_credits(server, rdata->bytes,
> > > +   , );
> > > +
> > > +   if (rc)
> > > +   break;
> > > +
> > > +   if (rsize < rdata->bytes) {
> > > +   add_credits_and_wake_if(server, credits, 0);
> > > +   msleep(1000);
> > > +   wait_retry++;
> > > +   }
> > > +   } while (rsize < rdata->bytes && wait_retry 

RE: [Patch v4 1/3] CIFS: Add support for direct I/O read

2018-11-28 Thread Long Li
> Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> 
> Hi Long,
> 
> Please find my comments below.
> 
> 
> ср, 31 окт. 2018 г. в 15:14, Long Li :
> >
> > From: Long Li 
> >
> > With direct I/O read, we transfer the data directly from transport
> > layer to the user data buffer.
> >
> > Change in v3: add support for kernel AIO
> >
> > Change in v4:
> > Refactor common read code to __cifs_readv for direct and non-direct I/O.
> > Retry on direct I/O failure.
> >
> > Signed-off-by: Long Li 
> > ---
> >  fs/cifs/cifsfs.h   |   1 +
> >  fs/cifs/cifsglob.h |   5 ++
> >  fs/cifs/file.c | 219 +++--
> 
> >  3 files changed, 186 insertions(+), 39 deletions(-)
> >
> > diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index
> > 5f02318..7fba9aa 100644
> > --- a/fs/cifs/cifsfs.h
> > +++ b/fs/cifs/cifsfs.h
> > @@ -102,6 +102,7 @@ extern int cifs_open(struct inode *inode, struct
> > file *file);  extern int cifs_close(struct inode *inode, struct file
> > *file);  extern int cifs_closedir(struct inode *inode, struct file
> > *file);  extern ssize_t cifs_user_readv(struct kiocb *iocb, struct
> > iov_iter *to);
> > +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter
> > +*to);
> >  extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter
> > *to);  extern ssize_t cifs_user_writev(struct kiocb *iocb, struct
> > iov_iter *from);  extern ssize_t cifs_strict_writev(struct kiocb
> > *iocb, struct iov_iter *from); diff --git a/fs/cifs/cifsglob.h
> > b/fs/cifs/cifsglob.h index 7f62c98..52248dd 100644
> > --- a/fs/cifs/cifsglob.h
> > +++ b/fs/cifs/cifsglob.h
> > @@ -1146,6 +1146,11 @@ struct cifs_aio_ctx {
> > unsigned intlen;
> > unsigned inttotal_len;
> > boolshould_dirty;
> > +   /*
> > +* Indicates if this aio_ctx is for direct_io,
> > +* If yes, iter is a copy of the user passed iov_iter
> > +*/
> > +   booldirect_io;
> >  };
> >
> >  struct cifs_readdata;
> > diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 87eece6..daab878
> > 100644
> > --- a/fs/cifs/file.c
> > +++ b/fs/cifs/file.c
> > @@ -2965,7 +2965,6 @@ cifs_uncached_readdata_release(struct kref
> *refcount)
> > kref_put(>ctx->refcount, cifs_aio_ctx_release);
> > for (i = 0; i < rdata->nr_pages; i++) {
> > put_page(rdata->pages[i]);
> > -   rdata->pages[i] = NULL;
> > }
> > cifs_readdata_release(refcount);  } @@ -3092,6 +3091,63 @@
> > cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
> > return uncached_fill_pages(server, rdata, iter, iter->count);
> > }
> >
> > +static int cifs_resend_rdata(struct cifs_readdata *rdata,
> > + struct list_head *rdata_list,
> > + struct cifs_aio_ctx *ctx) {
> > +   int wait_retry = 0;
> > +   unsigned int rsize, credits;
> > +   int rc;
> > +   struct TCP_Server_Info *server =
> > +tlink_tcon(rdata->cfile->tlink)->ses->server;
> > +
> > +   /*
> > +* Try to resend this rdata, waiting for credits up to 3 seconds.
> > +* Note: we are attempting to resend the whole rdata not in segments
> > +*/
> > +   do {
> > +   rc = server->ops->wait_mtu_credits(server, rdata->bytes,
> > +   , );
> > +
> > +   if (rc)
> > +   break;
> > +
> > +   if (rsize < rdata->bytes) {
> > +   add_credits_and_wake_if(server, credits, 0);
> > +   msleep(1000);
> > +   wait_retry++;
> > +   }
> > +   } while (rsize < rdata->bytes && wait_retry < 3);
> > +
> > +   /*
> > +* If we can't find enough credits to send this rdata
> > +* release the rdata and return failure, this will pass
> > +* whatever I/O amount we have finished to VFS.
> > +*/
> > +   if (rsize < rdata->bytes) {
> > +   rc = -EBUSY;
> 
> We don't have enough credits and return EBUSY here...
> 
> > +   goto out;
> > +   }
> > +
> > +   rc = -

RE: [Patch v4 1/3] CIFS: Add support for direct I/O read

2018-11-28 Thread Long Li
> Subject: Re: [Patch v4 1/3] CIFS: Add support for direct I/O read
> 
> Hi Long,
> 
> Please find my comments below.
> 
> 
> ср, 31 окт. 2018 г. в 15:14, Long Li :
> >
> > From: Long Li 
> >
> > With direct I/O read, we transfer the data directly from transport
> > layer to the user data buffer.
> >
> > Change in v3: add support for kernel AIO
> >
> > Change in v4:
> > Refactor common read code to __cifs_readv for direct and non-direct I/O.
> > Retry on direct I/O failure.
> >
> > Signed-off-by: Long Li 
> > ---
> >  fs/cifs/cifsfs.h   |   1 +
> >  fs/cifs/cifsglob.h |   5 ++
> >  fs/cifs/file.c | 219 +++--
> 
> >  3 files changed, 186 insertions(+), 39 deletions(-)
> >
> > diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index
> > 5f02318..7fba9aa 100644
> > --- a/fs/cifs/cifsfs.h
> > +++ b/fs/cifs/cifsfs.h
> > @@ -102,6 +102,7 @@ extern int cifs_open(struct inode *inode, struct
> > file *file);  extern int cifs_close(struct inode *inode, struct file
> > *file);  extern int cifs_closedir(struct inode *inode, struct file
> > *file);  extern ssize_t cifs_user_readv(struct kiocb *iocb, struct
> > iov_iter *to);
> > +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter
> > +*to);
> >  extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter
> > *to);  extern ssize_t cifs_user_writev(struct kiocb *iocb, struct
> > iov_iter *from);  extern ssize_t cifs_strict_writev(struct kiocb
> > *iocb, struct iov_iter *from); diff --git a/fs/cifs/cifsglob.h
> > b/fs/cifs/cifsglob.h index 7f62c98..52248dd 100644
> > --- a/fs/cifs/cifsglob.h
> > +++ b/fs/cifs/cifsglob.h
> > @@ -1146,6 +1146,11 @@ struct cifs_aio_ctx {
> > unsigned intlen;
> > unsigned inttotal_len;
> > boolshould_dirty;
> > +   /*
> > +* Indicates if this aio_ctx is for direct_io,
> > +* If yes, iter is a copy of the user passed iov_iter
> > +*/
> > +   booldirect_io;
> >  };
> >
> >  struct cifs_readdata;
> > diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 87eece6..daab878
> > 100644
> > --- a/fs/cifs/file.c
> > +++ b/fs/cifs/file.c
> > @@ -2965,7 +2965,6 @@ cifs_uncached_readdata_release(struct kref
> *refcount)
> > kref_put(>ctx->refcount, cifs_aio_ctx_release);
> > for (i = 0; i < rdata->nr_pages; i++) {
> > put_page(rdata->pages[i]);
> > -   rdata->pages[i] = NULL;
> > }
> > cifs_readdata_release(refcount);  } @@ -3092,6 +3091,63 @@
> > cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
> > return uncached_fill_pages(server, rdata, iter, iter->count);
> > }
> >
> > +static int cifs_resend_rdata(struct cifs_readdata *rdata,
> > + struct list_head *rdata_list,
> > + struct cifs_aio_ctx *ctx) {
> > +   int wait_retry = 0;
> > +   unsigned int rsize, credits;
> > +   int rc;
> > +   struct TCP_Server_Info *server =
> > +tlink_tcon(rdata->cfile->tlink)->ses->server;
> > +
> > +   /*
> > +* Try to resend this rdata, waiting for credits up to 3 seconds.
> > +* Note: we are attempting to resend the whole rdata not in segments
> > +*/
> > +   do {
> > +   rc = server->ops->wait_mtu_credits(server, rdata->bytes,
> > +   , );
> > +
> > +   if (rc)
> > +   break;
> > +
> > +   if (rsize < rdata->bytes) {
> > +   add_credits_and_wake_if(server, credits, 0);
> > +   msleep(1000);
> > +   wait_retry++;
> > +   }
> > +   } while (rsize < rdata->bytes && wait_retry < 3);
> > +
> > +   /*
> > +* If we can't find enough credits to send this rdata
> > +* release the rdata and return failure, this will pass
> > +* whatever I/O amount we have finished to VFS.
> > +*/
> > +   if (rsize < rdata->bytes) {
> > +   rc = -EBUSY;
> 
> We don't have enough credits and return EBUSY here...
> 
> > +   goto out;
> > +   }
> > +
> > +   rc = -

Re: [Patch v4 1/3] CIFS: Add support for direct I/O read

2018-11-16 Thread Pavel Shilovsky
Hi Long,

Please find my comments below.


ср, 31 окт. 2018 г. в 15:14, Long Li :
>
> From: Long Li 
>
> With direct I/O read, we transfer the data directly from transport layer to
> the user data buffer.
>
> Change in v3: add support for kernel AIO
>
> Change in v4:
> Refactor common read code to __cifs_readv for direct and non-direct I/O.
> Retry on direct I/O failure.
>
> Signed-off-by: Long Li 
> ---
>  fs/cifs/cifsfs.h   |   1 +
>  fs/cifs/cifsglob.h |   5 ++
>  fs/cifs/file.c | 219 
> +++--
>  3 files changed, 186 insertions(+), 39 deletions(-)
>
> diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
> index 5f02318..7fba9aa 100644
> --- a/fs/cifs/cifsfs.h
> +++ b/fs/cifs/cifsfs.h
> @@ -102,6 +102,7 @@ extern int cifs_open(struct inode *inode, struct file 
> *file);
>  extern int cifs_close(struct inode *inode, struct file *file);
>  extern int cifs_closedir(struct inode *inode, struct file *file);
>  extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to);
> +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to);
>  extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
>  extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from);
>  extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
> diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
> index 7f62c98..52248dd 100644
> --- a/fs/cifs/cifsglob.h
> +++ b/fs/cifs/cifsglob.h
> @@ -1146,6 +1146,11 @@ struct cifs_aio_ctx {
> unsigned intlen;
> unsigned inttotal_len;
> boolshould_dirty;
> +   /*
> +* Indicates if this aio_ctx is for direct_io,
> +* If yes, iter is a copy of the user passed iov_iter
> +*/
> +   booldirect_io;
>  };
>
>  struct cifs_readdata;
> diff --git a/fs/cifs/file.c b/fs/cifs/file.c
> index 87eece6..daab878 100644
> --- a/fs/cifs/file.c
> +++ b/fs/cifs/file.c
> @@ -2965,7 +2965,6 @@ cifs_uncached_readdata_release(struct kref *refcount)
> kref_put(>ctx->refcount, cifs_aio_ctx_release);
> for (i = 0; i < rdata->nr_pages; i++) {
> put_page(rdata->pages[i]);
> -   rdata->pages[i] = NULL;
> }
> cifs_readdata_release(refcount);
>  }
> @@ -3092,6 +3091,63 @@ cifs_uncached_copy_into_pages(struct TCP_Server_Info 
> *server,
> return uncached_fill_pages(server, rdata, iter, iter->count);
>  }
>
> +static int cifs_resend_rdata(struct cifs_readdata *rdata,
> + struct list_head *rdata_list,
> + struct cifs_aio_ctx *ctx)
> +{
> +   int wait_retry = 0;
> +   unsigned int rsize, credits;
> +   int rc;
> +   struct TCP_Server_Info *server = 
> tlink_tcon(rdata->cfile->tlink)->ses->server;
> +
> +   /*
> +* Try to resend this rdata, waiting for credits up to 3 seconds.
> +* Note: we are attempting to resend the whole rdata not in segments
> +*/
> +   do {
> +   rc = server->ops->wait_mtu_credits(server, rdata->bytes,
> +   , );
> +
> +   if (rc)
> +   break;
> +
> +   if (rsize < rdata->bytes) {
> +   add_credits_and_wake_if(server, credits, 0);
> +   msleep(1000);
> +   wait_retry++;
> +   }
> +   } while (rsize < rdata->bytes && wait_retry < 3);
> +
> +   /*
> +* If we can't find enough credits to send this rdata
> +* release the rdata and return failure, this will pass
> +* whatever I/O amount we have finished to VFS.
> +*/
> +   if (rsize < rdata->bytes) {
> +   rc = -EBUSY;

We don't have enough credits and return EBUSY here...

> +   goto out;
> +   }
> +
> +   rc = -EAGAIN;
> +   while (rc == -EAGAIN)
> +   if (!rdata->cfile->invalidHandle ||
> +   !(rc = cifs_reopen_file(rdata->cfile, true)))
> +   rc = server->ops->async_readv(rdata);
> +
> +   if (!rc) {
> +   /* Add to aio pending list */
> +   list_add_tail(>list, rdata_list);
> +   return 0;
> +   }
> +
> +   add_credits_and_wake_if(server, rdata->credits, 0);
> +out:
> +   kref_put(>refcount,
> +   cifs_uncached_readdata_release);
> +
> +   return rc;
> +}
> +
>  static int
>  cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo 
> *open_file,
>  struct cifs_sb_info *cifs_sb, struct list_head 
> *rdata_list,
> @@ -3103,6 +3159,9 @@ cifs_send_async_read(loff_t offset, size_t len, struct 
> cifsFileInfo *open_file,
> int rc;
> pid_t pid;
> struct TCP_Server_Info *server;
> +   struct page **pagevec;
> +   size_t start;
> +   struct iov_iter 

Re: [Patch v4 1/3] CIFS: Add support for direct I/O read

2018-11-16 Thread Pavel Shilovsky
Hi Long,

Please find my comments below.


ср, 31 окт. 2018 г. в 15:14, Long Li :
>
> From: Long Li 
>
> With direct I/O read, we transfer the data directly from transport layer to
> the user data buffer.
>
> Change in v3: add support for kernel AIO
>
> Change in v4:
> Refactor common read code to __cifs_readv for direct and non-direct I/O.
> Retry on direct I/O failure.
>
> Signed-off-by: Long Li 
> ---
>  fs/cifs/cifsfs.h   |   1 +
>  fs/cifs/cifsglob.h |   5 ++
>  fs/cifs/file.c | 219 
> +++--
>  3 files changed, 186 insertions(+), 39 deletions(-)
>
> diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
> index 5f02318..7fba9aa 100644
> --- a/fs/cifs/cifsfs.h
> +++ b/fs/cifs/cifsfs.h
> @@ -102,6 +102,7 @@ extern int cifs_open(struct inode *inode, struct file 
> *file);
>  extern int cifs_close(struct inode *inode, struct file *file);
>  extern int cifs_closedir(struct inode *inode, struct file *file);
>  extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to);
> +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to);
>  extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
>  extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from);
>  extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
> diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
> index 7f62c98..52248dd 100644
> --- a/fs/cifs/cifsglob.h
> +++ b/fs/cifs/cifsglob.h
> @@ -1146,6 +1146,11 @@ struct cifs_aio_ctx {
> unsigned intlen;
> unsigned inttotal_len;
> boolshould_dirty;
> +   /*
> +* Indicates if this aio_ctx is for direct_io,
> +* If yes, iter is a copy of the user passed iov_iter
> +*/
> +   booldirect_io;
>  };
>
>  struct cifs_readdata;
> diff --git a/fs/cifs/file.c b/fs/cifs/file.c
> index 87eece6..daab878 100644
> --- a/fs/cifs/file.c
> +++ b/fs/cifs/file.c
> @@ -2965,7 +2965,6 @@ cifs_uncached_readdata_release(struct kref *refcount)
> kref_put(>ctx->refcount, cifs_aio_ctx_release);
> for (i = 0; i < rdata->nr_pages; i++) {
> put_page(rdata->pages[i]);
> -   rdata->pages[i] = NULL;
> }
> cifs_readdata_release(refcount);
>  }
> @@ -3092,6 +3091,63 @@ cifs_uncached_copy_into_pages(struct TCP_Server_Info 
> *server,
> return uncached_fill_pages(server, rdata, iter, iter->count);
>  }
>
> +static int cifs_resend_rdata(struct cifs_readdata *rdata,
> + struct list_head *rdata_list,
> + struct cifs_aio_ctx *ctx)
> +{
> +   int wait_retry = 0;
> +   unsigned int rsize, credits;
> +   int rc;
> +   struct TCP_Server_Info *server = 
> tlink_tcon(rdata->cfile->tlink)->ses->server;
> +
> +   /*
> +* Try to resend this rdata, waiting for credits up to 3 seconds.
> +* Note: we are attempting to resend the whole rdata not in segments
> +*/
> +   do {
> +   rc = server->ops->wait_mtu_credits(server, rdata->bytes,
> +   , );
> +
> +   if (rc)
> +   break;
> +
> +   if (rsize < rdata->bytes) {
> +   add_credits_and_wake_if(server, credits, 0);
> +   msleep(1000);
> +   wait_retry++;
> +   }
> +   } while (rsize < rdata->bytes && wait_retry < 3);
> +
> +   /*
> +* If we can't find enough credits to send this rdata
> +* release the rdata and return failure, this will pass
> +* whatever I/O amount we have finished to VFS.
> +*/
> +   if (rsize < rdata->bytes) {
> +   rc = -EBUSY;

We don't have enough credits and return EBUSY here...

> +   goto out;
> +   }
> +
> +   rc = -EAGAIN;
> +   while (rc == -EAGAIN)
> +   if (!rdata->cfile->invalidHandle ||
> +   !(rc = cifs_reopen_file(rdata->cfile, true)))
> +   rc = server->ops->async_readv(rdata);
> +
> +   if (!rc) {
> +   /* Add to aio pending list */
> +   list_add_tail(>list, rdata_list);
> +   return 0;
> +   }
> +
> +   add_credits_and_wake_if(server, rdata->credits, 0);
> +out:
> +   kref_put(>refcount,
> +   cifs_uncached_readdata_release);
> +
> +   return rc;
> +}
> +
>  static int
>  cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo 
> *open_file,
>  struct cifs_sb_info *cifs_sb, struct list_head 
> *rdata_list,
> @@ -3103,6 +3159,9 @@ cifs_send_async_read(loff_t offset, size_t len, struct 
> cifsFileInfo *open_file,
> int rc;
> pid_t pid;
> struct TCP_Server_Info *server;
> +   struct page **pagevec;
> +   size_t start;
> +   struct iov_iter