On 18.12.2017 15:22, Ming Lei wrote:
> When merging one bvec into segment, if the bvec is too big
> to merge, current policy is to move the whole bvec into another
> new segment.
> 
> This patchset changes the policy into trying to maximize size of
> front segments, that means in above situation, part of bvec
> is merged into current segment, and the remainder is put
> into next segment.
> 
> This patch prepares for support multipage bvec because
> it can be quite common to see this case and we should try
> to make front segments in full size.
> 
> Signed-off-by: Ming Lei <ming....@redhat.com>
> ---
>  block/blk-merge.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 49 insertions(+), 5 deletions(-)
> 
> diff --git a/block/blk-merge.c b/block/blk-merge.c
> index a476337a8ff4..42ceb89bc566 100644
> --- a/block/blk-merge.c
> +++ b/block/blk-merge.c
> @@ -109,6 +109,7 @@ static struct bio *blk_bio_segment_split(struct 
> request_queue *q,
>       bool do_split = true;
>       struct bio *new = NULL;
>       const unsigned max_sectors = get_max_io_size(q, bio);
> +     unsigned advance = 0;
>  
>       bio_for_each_segment(bv, bio, iter) {
>               /*
> @@ -134,12 +135,32 @@ static struct bio *blk_bio_segment_split(struct 
> request_queue *q,
>               }
>  
>               if (bvprvp && blk_queue_cluster(q)) {
> -                     if (seg_size + bv.bv_len > queue_max_segment_size(q))
> -                             goto new_segment;
>                       if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
>                               goto new_segment;
>                       if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
>                               goto new_segment;
> +                     if (seg_size + bv.bv_len > queue_max_segment_size(q)) {
> +                             /*
> +                              * On assumption is that initial value of
> +                              * @seg_size(equals to bv.bv_len) won't be
> +                              * bigger than max segment size, but will
> +                              * becomes false after multipage bvec comes.
> +                              */
> +                             advance = queue_max_segment_size(q) - seg_size;
> +
> +                             if (advance > 0) {
> +                                     seg_size += advance;
> +                                     sectors += advance >> 9;
> +                                     bv.bv_len -= advance;
> +                                     bv.bv_offset += advance;
> +                             }
> +
> +                             /*
> +                              * Still need to put remainder of current
> +                              * bvec into a new segment.
> +                              */
> +                             goto new_segment;
> +                     }
>  
>                       seg_size += bv.bv_len;
>                       bvprv = bv;
> @@ -161,6 +182,12 @@ static struct bio *blk_bio_segment_split(struct 
> request_queue *q,
>               seg_size = bv.bv_len;
>               sectors += bv.bv_len >> 9;
>  
> +             /* restore the bvec for iterator */
> +             if (advance) {
> +                     bv.bv_len += advance;
> +                     bv.bv_offset -= advance;
> +                     advance = 0;
> +             }
>       }
>  
>       do_split = false;
> @@ -361,16 +388,29 @@ __blk_segment_map_sg(struct request_queue *q, struct 
> bio_vec *bvec,
>  {
>  
>       int nbytes = bvec->bv_len;
> +     unsigned advance = 0;
>  
>       if (*sg && *cluster) {
> -             if ((*sg)->length + nbytes > queue_max_segment_size(q))
> -                     goto new_segment;
> -
>               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
>                       goto new_segment;
>               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
>                       goto new_segment;
>  
> +             /*
> +              * try best to merge part of the bvec into previous
> +              * segment and follow same policy with
> +              * blk_bio_segment_split()
> +              */
> +             if ((*sg)->length + nbytes > queue_max_segment_size(q)) {
> +                     advance = queue_max_segment_size(q) - (*sg)->length;
> +                     if (advance) {
> +                             (*sg)->length += advance;
> +                             bvec->bv_offset += advance;
> +                             bvec->bv_len -= advance;
> +                     }
> +                     goto new_segment;
> +             }
> +
>               (*sg)->length += nbytes;
>       } else {
>  new_segment:
> @@ -393,6 +433,10 @@ __blk_segment_map_sg(struct request_queue *q, struct 
> bio_vec *bvec,
>  
>               sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
>               (*nsegs)++;
> +
> +             /* for making iterator happy */
> +             bvec->bv_offset -= advance;
> +             bvec->bv_len += advance;
>       }
>       *bvprv = *bvec;
>  }
> 

Hello,

This patch breaks MMC on next-20180108, in particular MMC doesn't work anymore
with this patch on NVIDIA Tegra20:

<3>[   36.622253] print_req_error: I/O error, dev mmcblk1, sector 512
<3>[   36.671233] print_req_error: I/O error, dev mmcblk2, sector 128
<3>[   36.711308] print_req_error: I/O error, dev mmcblk1, sector 31325304
<3>[   36.749232] print_req_error: I/O error, dev mmcblk2, sector 512
<3>[   36.761235] print_req_error: I/O error, dev mmcblk1, sector 31325816
<3>[   36.832039] print_req_error: I/O error, dev mmcblk2, sector 31259768
<3>[   99.793248] print_req_error: I/O error, dev mmcblk1, sector 31323136
<3>[   99.982043] print_req_error: I/O error, dev mmcblk1, sector 929792
<3>[   99.986301] print_req_error: I/O error, dev mmcblk1, sector 930816
<3>[  100.293624] print_req_error: I/O error, dev mmcblk1, sector 932864
<3>[  100.466839] print_req_error: I/O error, dev mmcblk1, sector 947200
<3>[  100.642955] print_req_error: I/O error, dev mmcblk1, sector 949248
<3>[  100.818838] print_req_error: I/O error, dev mmcblk1, sector 230400

Any attempt of mounting MMC block dev ends with a kernel crash. Reverting this
patch fixes the issue.

-- 
Dmitry

Reply via email to