In pblk, buffers forming bios can be allocated on physically contiguous
or virtually contiguous memory. For physically contiguous memory, we
already use the bio_map_kern helper funciton, however, for virtually
contiguous memory, we from the bio manually. This makes the code more
complex, specially on the completion path, where mapped pages need to be
freed.
Instead, use bio_copy_kern, which does the same and at the same time
simplifies the completion path.
Signed-off-by: Javier González
Signed-off-by: Matias Bjørling
---
drivers/lightnvm/pblk-core.c | 39 ---
drivers/lightnvm/pblk-read.c | 3 +--
drivers/lightnvm/pblk-recovery.c | 3 +--
drivers/lightnvm/pblk-write.c| 7 +--
drivers/lightnvm/pblk.h | 2 +-
5 files changed, 8 insertions(+), 46 deletions(-)
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index f6b9afbe8589..e69e8829b093 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -423,42 +423,14 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
unsigned int nr_secs, unsigned int len,
- int alloc_type, gfp_t gfp_mask)
+ int alloc_type, gfp_t gfp_mask, int reading)
{
struct nvm_tgt_dev *dev = pblk->dev;
- void *kaddr = data;
- struct page *page;
- struct bio *bio;
- int i, ret;
if (alloc_type == PBLK_KMALLOC_META)
- return bio_map_kern(dev->q, kaddr, len, gfp_mask);
+ return bio_map_kern(dev->q, data, len, gfp_mask);
- bio = bio_kmalloc(gfp_mask, nr_secs);
- if (!bio)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < nr_secs; i++) {
- page = vmalloc_to_page(kaddr);
- if (!page) {
- pr_err("pblk: could not map vmalloc bio\n");
- bio_put(bio);
- bio = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
- if (ret != PAGE_SIZE) {
- pr_err("pblk: could not add page to bio\n");
- bio_put(bio);
- bio = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- kaddr += PAGE_SIZE;
- }
-out:
- return bio;
+ return bio_copy_kern(dev->q, data, len, GFP_KERNEL, reading);
}
int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
@@ -588,7 +560,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk,
struct pblk_line *line,
rq_len = rq_ppas * geo->sec_size;
bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
- l_mg->emeta_alloc_type, GFP_KERNEL);
+ l_mg->emeta_alloc_type, GFP_KERNEL, dir == READ);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
goto free_rqd_dma;
@@ -673,9 +645,6 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk,
struct pblk_line *line,
atomic_dec(>inflight_io);
reinit_completion();
- if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
- bio_put(bio);
-
if (rqd.error) {
if (dir == WRITE)
pblk_log_write_err(pblk, );
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index f32091503547..1be972521dcd 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -542,7 +542,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct
pblk_gc_rq *gc_rq)
data_len = (gc_rq->secs_to_gc) * geo->sec_size;
bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
- PBLK_VMALLOC_META, GFP_KERNEL);
+ PBLK_VMALLOC_META, GFP_KERNEL, 1);
if (IS_ERR(bio)) {
pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
goto err_free_dma;
@@ -583,7 +583,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct
pblk_gc_rq *gc_rq)
atomic_long_sub(gc_rq->secs_to_gc, >inflight_reads);
#endif
- bio_put(bio);
out:
nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
return ret;
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index 279638309d9a..b033d4f2b446 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -342,7 +342,6 @@ static void pblk_end_io_recov(struct nvm_rq *rqd)
pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
- bio_put(rqd->bio);
nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
pblk_free_rqd(pblk,