On Thu, Nov 15, 2018 at 04:52:51PM +0800, Ming Lei wrote:
> It is more efficient to use bio_for_each_bvec() to map sg, meantime
> we have to consider splitting multipage bvec as done in 
> blk_bio_segment_split().
> 
> Cc: Dave Chinner <dchin...@redhat.com>
> Cc: Kent Overstreet <kent.overstr...@gmail.com>
> Cc: Mike Snitzer <snit...@redhat.com>
> Cc: dm-de...@redhat.com
> Cc: Alexander Viro <v...@zeniv.linux.org.uk>
> Cc: linux-fsde...@vger.kernel.org
> Cc: Shaohua Li <s...@kernel.org>
> Cc: linux-r...@vger.kernel.org
> Cc: linux-er...@lists.ozlabs.org
> Cc: David Sterba <dste...@suse.com>
> Cc: linux-bt...@vger.kernel.org
> Cc: Darrick J. Wong <darrick.w...@oracle.com>
> Cc: linux-...@vger.kernel.org
> Cc: Gao Xiang <gaoxian...@huawei.com>
> Cc: Christoph Hellwig <h...@lst.de>
> Cc: Theodore Ts'o <ty...@mit.edu>
> Cc: linux-e...@vger.kernel.org
> Cc: Coly Li <col...@suse.de>
> Cc: linux-bca...@vger.kernel.org
> Cc: Boaz Harrosh <o...@electrozaur.com>
> Cc: Bob Peterson <rpete...@redhat.com>
> Cc: cluster-devel@redhat.com

Reviewed-by: Omar Sandoval <osan...@fb.com>

> Signed-off-by: Ming Lei <ming....@redhat.com>
> ---
>  block/blk-merge.c | 72 
> +++++++++++++++++++++++++++++++++++++++----------------
>  1 file changed, 52 insertions(+), 20 deletions(-)
> 
> diff --git a/block/blk-merge.c b/block/blk-merge.c
> index 6f7deb94a23f..cb9f49bcfd36 100644
> --- a/block/blk-merge.c
> +++ b/block/blk-merge.c
> @@ -473,6 +473,56 @@ static int blk_phys_contig_segment(struct request_queue 
> *q, struct bio *bio,
>       return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
>  }
>  
> +static struct scatterlist *blk_next_sg(struct scatterlist **sg,
> +             struct scatterlist *sglist)
> +{
> +     if (!*sg)
> +             return sglist;
> +     else {
> +             /*
> +              * If the driver previously mapped a shorter
> +              * list, we could see a termination bit
> +              * prematurely unless it fully inits the sg
> +              * table on each mapping. We KNOW that there
> +              * must be more entries here or the driver
> +              * would be buggy, so force clear the
> +              * termination bit to avoid doing a full
> +              * sg_init_table() in drivers for each command.
> +              */
> +             sg_unmark_end(*sg);
> +             return sg_next(*sg);
> +     }
> +}
> +
> +static unsigned blk_bvec_map_sg(struct request_queue *q,
> +             struct bio_vec *bvec, struct scatterlist *sglist,
> +             struct scatterlist **sg)
> +{
> +     unsigned nbytes = bvec->bv_len;
> +     unsigned nsegs = 0, total = 0;
> +
> +     while (nbytes > 0) {
> +             unsigned seg_size;
> +             struct page *pg;
> +             unsigned offset, idx;
> +
> +             *sg = blk_next_sg(sg, sglist);
> +
> +             seg_size = min(nbytes, queue_max_segment_size(q));
> +             offset = (total + bvec->bv_offset) % PAGE_SIZE;
> +             idx = (total + bvec->bv_offset) / PAGE_SIZE;
> +             pg = nth_page(bvec->bv_page, idx);
> +
> +             sg_set_page(*sg, pg, seg_size, offset);
> +
> +             total += seg_size;
> +             nbytes -= seg_size;
> +             nsegs++;
> +     }
> +
> +     return nsegs;
> +}
> +
>  static inline void
>  __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
>                    struct scatterlist *sglist, struct bio_vec *bvprv,
> @@ -490,25 +540,7 @@ __blk_segment_map_sg(struct request_queue *q, struct 
> bio_vec *bvec,
>               (*sg)->length += nbytes;
>       } else {
>  new_segment:
> -             if (!*sg)
> -                     *sg = sglist;
> -             else {
> -                     /*
> -                      * If the driver previously mapped a shorter
> -                      * list, we could see a termination bit
> -                      * prematurely unless it fully inits the sg
> -                      * table on each mapping. We KNOW that there
> -                      * must be more entries here or the driver
> -                      * would be buggy, so force clear the
> -                      * termination bit to avoid doing a full
> -                      * sg_init_table() in drivers for each command.
> -                      */
> -                     sg_unmark_end(*sg);
> -                     *sg = sg_next(*sg);
> -             }
> -
> -             sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
> -             (*nsegs)++;
> +             (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
>       }
>       *bvprv = *bvec;
>  }
> @@ -530,7 +562,7 @@ static int __blk_bios_map_sg(struct request_queue *q, 
> struct bio *bio,
>       int cluster = blk_queue_cluster(q), nsegs = 0;
>  
>       for_each_bio(bio)
> -             bio_for_each_segment(bvec, bio, iter)
> +             bio_for_each_bvec(bvec, bio, iter)
>                       __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
>                                            &nsegs, &cluster);
>  
> -- 
> 2.9.5
> 

Reply via email to