Re: [RFC 3/4] lightnvm: read from rrpc write buffer if possible

2016-02-05 Thread Matias Bjørling
On 02/04/2016 02:08 PM, Javier González wrote:
> Since writes are buffered in memory, incoming reads must retrieve
> buffered pages instead of submitting the I/O to the media.
> 
> This patch implements this logic. When a read bio arrives to rrpc, valid
> pages from the flash blocks residing in memory are copied. If there are
> any "holes" in the bio, a new bio is submitted to the media to retrieve
> the necessary pages. The original bio is updated accordingly.
> 
> Signed-off-by: Javier González 
> ---
>  drivers/lightnvm/rrpc.c  | 451 
> ---
>  include/linux/lightnvm.h |   1 +
>  2 files changed, 346 insertions(+), 106 deletions(-)
> 
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index e9fb19d..6348d52 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -827,10 +827,13 @@ static void rrpc_end_io(struct nvm_rq *rqd)
>   struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
>   uint8_t nr_pages = rqd->nr_pages;
>  
> - if (bio_data_dir(rqd->bio) == WRITE)
> + if (bio_data_dir(rqd->bio) == WRITE) {
>   rrpc_end_io_write(rrpc, rqd, nr_pages);
> - else
> + } else {
> + if (rqd->flags & NVM_IOTYPE_SYNC)
> + return;
>   rrpc_end_io_read(rrpc, rqd, nr_pages);
> + }
>  
>   bio_put(rqd->bio);
>  
> @@ -842,83 +845,6 @@ static void rrpc_end_io(struct nvm_rq *rqd)
>   mempool_free(rqd, rrpc->rq_pool);
>  }
>  
> -static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
> - struct nvm_rq *rqd, struct rrpc_buf_rq *brrqd,
> - unsigned long flags, int nr_pages)
> -{
> - struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
> - struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rrqd);
> - struct rrpc_addr *gp;
> - sector_t laddr = rrpc_get_laddr(bio);
> - int is_gc = flags & NVM_IOTYPE_GC;
> - int i;
> -
> - if (!is_gc && rrpc_lock_rq(rrpc, bio, rrqd)) {
> - nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
> - mempool_free(rrqd, rrpc->rrq_pool);
> - mempool_free(rqd, rrpc->rq_pool);
> - return NVM_IO_REQUEUE;
> - }
> -
> - for (i = 0; i < nr_pages; i++) {
> - /* We assume that mapping occurs at 4KB granularity */
> - BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
> - gp = >trans_map[laddr + i];
> -
> - if (gp->rblk) {
> - rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
> - gp->addr);
> - } else {
> - BUG_ON(is_gc);
> - rrpc_unlock_laddr(rrpc, r);
> - nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
> - rqd->dma_ppa_list);
> - mempool_free(rrqd, rrpc->rrq_pool);
> - mempool_free(rqd, rrpc->rq_pool);
> - return NVM_IO_DONE;
> - }
> -
> - brrqd[i].addr = gp;
> - }
> -
> - rqd->opcode = NVM_OP_HBREAD;
> -
> - return NVM_IO_OK;
> -}
> -
> -static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq 
> *rqd,
> - unsigned long flags)
> -{
> - struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
> - int is_gc = flags & NVM_IOTYPE_GC;
> - sector_t laddr = rrpc_get_laddr(bio);
> - struct rrpc_addr *gp;
> -
> - if (!is_gc && rrpc_lock_rq(rrpc, bio, rrqd)) {
> - mempool_free(rrqd, rrpc->rrq_pool);
> - mempool_free(rqd, rrpc->rq_pool);
> - return NVM_IO_REQUEUE;
> - }
> -
> - BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
> - gp = >trans_map[laddr];
> -
> - if (gp->rblk) {
> - rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
> - } else {
> - BUG_ON(is_gc);
> - rrpc_unlock_rq(rrpc, rrqd);
> - mempool_free(rrqd, rrpc->rrq_pool);
> - mempool_free(rqd, rrpc->rq_pool);
> - return NVM_IO_DONE;
> - }
> -
> - rqd->opcode = NVM_OP_HBREAD;
> - rrqd->addr = gp;
> -
> - return NVM_IO_OK;
> -}
> -
>  /*
>   * Copy data from current bio to block write buffer. This if necessary
>   * to guarantee durability if a flash block becomes bad before all pages
> @@ -1051,14 +977,335 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct 
> bio *bio,
>   return NVM_IO_DONE;
>  }
>  
> +static int rrpc_buffer_write(struct rrpc *rrpc, struct bio *bio,
> + struct rrpc_rq *rrqd, unsigned long flags)
> +{
> + uint8_t nr_pages = rrpc_get_pages(bio);
> +
> + rrqd->nr_pages = nr_pages;
> +
> + if (nr_pages > 1)
> + return rrpc_write_ppalist_rq(rrpc, bio, rrqd, flags, nr_pages);
> + else
> + return 

Re: [RFC 3/4] lightnvm: read from rrpc write buffer if possible

2016-02-05 Thread Matias Bjørling
On 02/04/2016 02:08 PM, Javier González wrote:
> Since writes are buffered in memory, incoming reads must retrieve
> buffered pages instead of submitting the I/O to the media.
> 
> This patch implements this logic. When a read bio arrives to rrpc, valid
> pages from the flash blocks residing in memory are copied. If there are
> any "holes" in the bio, a new bio is submitted to the media to retrieve
> the necessary pages. The original bio is updated accordingly.
> 
> Signed-off-by: Javier González 
> ---
>  drivers/lightnvm/rrpc.c  | 451 
> ---
>  include/linux/lightnvm.h |   1 +
>  2 files changed, 346 insertions(+), 106 deletions(-)
> 
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index e9fb19d..6348d52 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -827,10 +827,13 @@ static void rrpc_end_io(struct nvm_rq *rqd)
>   struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
>   uint8_t nr_pages = rqd->nr_pages;
>  
> - if (bio_data_dir(rqd->bio) == WRITE)
> + if (bio_data_dir(rqd->bio) == WRITE) {
>   rrpc_end_io_write(rrpc, rqd, nr_pages);
> - else
> + } else {
> + if (rqd->flags & NVM_IOTYPE_SYNC)
> + return;
>   rrpc_end_io_read(rrpc, rqd, nr_pages);
> + }
>  
>   bio_put(rqd->bio);
>  
> @@ -842,83 +845,6 @@ static void rrpc_end_io(struct nvm_rq *rqd)
>   mempool_free(rqd, rrpc->rq_pool);
>  }
>  
> -static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
> - struct nvm_rq *rqd, struct rrpc_buf_rq *brrqd,
> - unsigned long flags, int nr_pages)
> -{
> - struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
> - struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rrqd);
> - struct rrpc_addr *gp;
> - sector_t laddr = rrpc_get_laddr(bio);
> - int is_gc = flags & NVM_IOTYPE_GC;
> - int i;
> -
> - if (!is_gc && rrpc_lock_rq(rrpc, bio, rrqd)) {
> - nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
> - mempool_free(rrqd, rrpc->rrq_pool);
> - mempool_free(rqd, rrpc->rq_pool);
> - return NVM_IO_REQUEUE;
> - }
> -
> - for (i = 0; i < nr_pages; i++) {
> - /* We assume that mapping occurs at 4KB granularity */
> - BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
> - gp = >trans_map[laddr + i];
> -
> - if (gp->rblk) {
> - rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
> - gp->addr);
> - } else {
> - BUG_ON(is_gc);
> - rrpc_unlock_laddr(rrpc, r);
> - nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
> - rqd->dma_ppa_list);
> - mempool_free(rrqd, rrpc->rrq_pool);
> - mempool_free(rqd, rrpc->rq_pool);
> - return NVM_IO_DONE;
> - }
> -
> - brrqd[i].addr = gp;
> - }
> -
> - rqd->opcode = NVM_OP_HBREAD;
> -
> - return NVM_IO_OK;
> -}
> -
> -static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq 
> *rqd,
> - unsigned long flags)
> -{
> - struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
> - int is_gc = flags & NVM_IOTYPE_GC;
> - sector_t laddr = rrpc_get_laddr(bio);
> - struct rrpc_addr *gp;
> -
> - if (!is_gc && rrpc_lock_rq(rrpc, bio, rrqd)) {
> - mempool_free(rrqd, rrpc->rrq_pool);
> - mempool_free(rqd, rrpc->rq_pool);
> - return NVM_IO_REQUEUE;
> - }
> -
> - BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
> - gp = >trans_map[laddr];
> -
> - if (gp->rblk) {
> - rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
> - } else {
> - BUG_ON(is_gc);
> - rrpc_unlock_rq(rrpc, rrqd);
> - mempool_free(rrqd, rrpc->rrq_pool);
> - mempool_free(rqd, rrpc->rq_pool);
> - return NVM_IO_DONE;
> - }
> -
> - rqd->opcode = NVM_OP_HBREAD;
> - rrqd->addr = gp;
> -
> - return NVM_IO_OK;
> -}
> -
>  /*
>   * Copy data from current bio to block write buffer. This if necessary
>   * to guarantee durability if a flash block becomes bad before all pages
> @@ -1051,14 +977,335 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct 
> bio *bio,
>   return NVM_IO_DONE;
>  }
>  
> +static int rrpc_buffer_write(struct rrpc *rrpc, struct bio *bio,
> + struct rrpc_rq *rrqd, unsigned long flags)
> +{
> + uint8_t nr_pages = rrpc_get_pages(bio);
> +
> + rrqd->nr_pages = nr_pages;
> +
> + if (nr_pages > 1)
> + return rrpc_write_ppalist_rq(rrpc, bio, rrqd, flags, nr_pages);
> + else
> + 

[RFC 3/4] lightnvm: read from rrpc write buffer if possible

2016-02-04 Thread Javier González
Since writes are buffered in memory, incoming reads must retrieve
buffered pages instead of submitting the I/O to the media.

This patch implements this logic. When a read bio arrives to rrpc, valid
pages from the flash blocks residing in memory are copied. If there are
any "holes" in the bio, a new bio is submitted to the media to retrieve
the necessary pages. The original bio is updated accordingly.

Signed-off-by: Javier González 
---
 drivers/lightnvm/rrpc.c  | 451 ---
 include/linux/lightnvm.h |   1 +
 2 files changed, 346 insertions(+), 106 deletions(-)

diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index e9fb19d..6348d52 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -827,10 +827,13 @@ static void rrpc_end_io(struct nvm_rq *rqd)
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
uint8_t nr_pages = rqd->nr_pages;
 
-   if (bio_data_dir(rqd->bio) == WRITE)
+   if (bio_data_dir(rqd->bio) == WRITE) {
rrpc_end_io_write(rrpc, rqd, nr_pages);
-   else
+   } else {
+   if (rqd->flags & NVM_IOTYPE_SYNC)
+   return;
rrpc_end_io_read(rrpc, rqd, nr_pages);
+   }
 
bio_put(rqd->bio);
 
@@ -842,83 +845,6 @@ static void rrpc_end_io(struct nvm_rq *rqd)
mempool_free(rqd, rrpc->rq_pool);
 }
 
-static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
-   struct nvm_rq *rqd, struct rrpc_buf_rq *brrqd,
-   unsigned long flags, int nr_pages)
-{
-   struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-   struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rrqd);
-   struct rrpc_addr *gp;
-   sector_t laddr = rrpc_get_laddr(bio);
-   int is_gc = flags & NVM_IOTYPE_GC;
-   int i;
-
-   if (!is_gc && rrpc_lock_rq(rrpc, bio, rrqd)) {
-   nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
-   mempool_free(rrqd, rrpc->rrq_pool);
-   mempool_free(rqd, rrpc->rq_pool);
-   return NVM_IO_REQUEUE;
-   }
-
-   for (i = 0; i < nr_pages; i++) {
-   /* We assume that mapping occurs at 4KB granularity */
-   BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
-   gp = >trans_map[laddr + i];
-
-   if (gp->rblk) {
-   rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
-   gp->addr);
-   } else {
-   BUG_ON(is_gc);
-   rrpc_unlock_laddr(rrpc, r);
-   nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
-   rqd->dma_ppa_list);
-   mempool_free(rrqd, rrpc->rrq_pool);
-   mempool_free(rqd, rrpc->rq_pool);
-   return NVM_IO_DONE;
-   }
-
-   brrqd[i].addr = gp;
-   }
-
-   rqd->opcode = NVM_OP_HBREAD;
-
-   return NVM_IO_OK;
-}
-
-static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
-   unsigned long flags)
-{
-   struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-   int is_gc = flags & NVM_IOTYPE_GC;
-   sector_t laddr = rrpc_get_laddr(bio);
-   struct rrpc_addr *gp;
-
-   if (!is_gc && rrpc_lock_rq(rrpc, bio, rrqd)) {
-   mempool_free(rrqd, rrpc->rrq_pool);
-   mempool_free(rqd, rrpc->rq_pool);
-   return NVM_IO_REQUEUE;
-   }
-
-   BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
-   gp = >trans_map[laddr];
-
-   if (gp->rblk) {
-   rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
-   } else {
-   BUG_ON(is_gc);
-   rrpc_unlock_rq(rrpc, rrqd);
-   mempool_free(rrqd, rrpc->rrq_pool);
-   mempool_free(rqd, rrpc->rq_pool);
-   return NVM_IO_DONE;
-   }
-
-   rqd->opcode = NVM_OP_HBREAD;
-   rrqd->addr = gp;
-
-   return NVM_IO_OK;
-}
-
 /*
  * Copy data from current bio to block write buffer. This if necessary
  * to guarantee durability if a flash block becomes bad before all pages
@@ -1051,14 +977,335 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio 
*bio,
return NVM_IO_DONE;
 }
 
+static int rrpc_buffer_write(struct rrpc *rrpc, struct bio *bio,
+   struct rrpc_rq *rrqd, unsigned long flags)
+{
+   uint8_t nr_pages = rrpc_get_pages(bio);
+
+   rrqd->nr_pages = nr_pages;
+
+   if (nr_pages > 1)
+   return rrpc_write_ppalist_rq(rrpc, bio, rrqd, flags, nr_pages);
+   else
+   return rrpc_write_rq(rrpc, bio, rrqd, flags);
+}
+
+static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
+   struct nvm_rq *rqd, struct 

[RFC 3/4] lightnvm: read from rrpc write buffer if possible

2016-02-04 Thread Javier González
Since writes are buffered in memory, incoming reads must retrieve
buffered pages instead of submitting the I/O to the media.

This patch implements this logic. When a read bio arrives to rrpc, valid
pages from the flash blocks residing in memory are copied. If there are
any "holes" in the bio, a new bio is submitted to the media to retrieve
the necessary pages. The original bio is updated accordingly.

Signed-off-by: Javier González 
---
 drivers/lightnvm/rrpc.c  | 451 ---
 include/linux/lightnvm.h |   1 +
 2 files changed, 346 insertions(+), 106 deletions(-)

diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index e9fb19d..6348d52 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -827,10 +827,13 @@ static void rrpc_end_io(struct nvm_rq *rqd)
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
uint8_t nr_pages = rqd->nr_pages;
 
-   if (bio_data_dir(rqd->bio) == WRITE)
+   if (bio_data_dir(rqd->bio) == WRITE) {
rrpc_end_io_write(rrpc, rqd, nr_pages);
-   else
+   } else {
+   if (rqd->flags & NVM_IOTYPE_SYNC)
+   return;
rrpc_end_io_read(rrpc, rqd, nr_pages);
+   }
 
bio_put(rqd->bio);
 
@@ -842,83 +845,6 @@ static void rrpc_end_io(struct nvm_rq *rqd)
mempool_free(rqd, rrpc->rq_pool);
 }
 
-static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
-   struct nvm_rq *rqd, struct rrpc_buf_rq *brrqd,
-   unsigned long flags, int nr_pages)
-{
-   struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-   struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rrqd);
-   struct rrpc_addr *gp;
-   sector_t laddr = rrpc_get_laddr(bio);
-   int is_gc = flags & NVM_IOTYPE_GC;
-   int i;
-
-   if (!is_gc && rrpc_lock_rq(rrpc, bio, rrqd)) {
-   nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
-   mempool_free(rrqd, rrpc->rrq_pool);
-   mempool_free(rqd, rrpc->rq_pool);
-   return NVM_IO_REQUEUE;
-   }
-
-   for (i = 0; i < nr_pages; i++) {
-   /* We assume that mapping occurs at 4KB granularity */
-   BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
-   gp = >trans_map[laddr + i];
-
-   if (gp->rblk) {
-   rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
-   gp->addr);
-   } else {
-   BUG_ON(is_gc);
-   rrpc_unlock_laddr(rrpc, r);
-   nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
-   rqd->dma_ppa_list);
-   mempool_free(rrqd, rrpc->rrq_pool);
-   mempool_free(rqd, rrpc->rq_pool);
-   return NVM_IO_DONE;
-   }
-
-   brrqd[i].addr = gp;
-   }
-
-   rqd->opcode = NVM_OP_HBREAD;
-
-   return NVM_IO_OK;
-}
-
-static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
-   unsigned long flags)
-{
-   struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-   int is_gc = flags & NVM_IOTYPE_GC;
-   sector_t laddr = rrpc_get_laddr(bio);
-   struct rrpc_addr *gp;
-
-   if (!is_gc && rrpc_lock_rq(rrpc, bio, rrqd)) {
-   mempool_free(rrqd, rrpc->rrq_pool);
-   mempool_free(rqd, rrpc->rq_pool);
-   return NVM_IO_REQUEUE;
-   }
-
-   BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
-   gp = >trans_map[laddr];
-
-   if (gp->rblk) {
-   rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
-   } else {
-   BUG_ON(is_gc);
-   rrpc_unlock_rq(rrpc, rrqd);
-   mempool_free(rrqd, rrpc->rrq_pool);
-   mempool_free(rqd, rrpc->rq_pool);
-   return NVM_IO_DONE;
-   }
-
-   rqd->opcode = NVM_OP_HBREAD;
-   rrqd->addr = gp;
-
-   return NVM_IO_OK;
-}
-
 /*
  * Copy data from current bio to block write buffer. This if necessary
  * to guarantee durability if a flash block becomes bad before all pages
@@ -1051,14 +977,335 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio 
*bio,
return NVM_IO_DONE;
 }
 
+static int rrpc_buffer_write(struct rrpc *rrpc, struct bio *bio,
+   struct rrpc_rq *rrqd, unsigned long flags)
+{
+   uint8_t nr_pages = rrpc_get_pages(bio);
+
+   rrqd->nr_pages = nr_pages;
+
+   if (nr_pages > 1)
+   return rrpc_write_ppalist_rq(rrpc, bio, rrqd, flags, nr_pages);
+   else
+   return rrpc_write_rq(rrpc, bio, rrqd, flags);
+}
+
+static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
+   struct