This reverts commit bd7d4ef6a4c9b3611fa487a0065bf042c71ce620.
---
 block/bfq-iosched.c | 15 ++++++++-------
 block/bfq-iosched.h |  6 ++++++
 2 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index cd307767a134..8cc3032b66de 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5303,25 +5303,26 @@ static unsigned int bfq_update_depths(struct bfq_data 
*bfqd,
                                      struct sbitmap_queue *bt)
 {
        unsigned int i, j, min_shallow = UINT_MAX;
+       bfqd->sb_shift = bt->sb.shift;
 
        /*
         * In-word depths if no bfq_queue is being weight-raised:
         * leaving 25% of tags only for sync reads.
         *
         * In next formulas, right-shift the value
-        * (1U<<bt->sb.shift), instead of computing directly
-        * (1U<<(bt->sb.shift - something)), to be robust against
-        * any possible value of bt->sb.shift, without having to
+        * (1U<<bfqd->sb_shift), instead of computing directly
+        * (1U<<(bfqd->sb_shift - something)), to be robust against
+        * any possible value of bfqd->sb_shift, without having to
         * limit 'something'.
         */
        /* no more than 50% of tags for async I/O */
-       bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
+       bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
        /*
         * no more than 75% of tags for sync writes (25% extra tags
         * w.r.t. async I/O, to prevent async I/O from starving sync
         * writes)
         */
-       bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
+       bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
 
        /*
         * In-word depths in case some bfq_queue is being weight-
@@ -5331,9 +5332,9 @@ static unsigned int bfq_update_depths(struct bfq_data 
*bfqd,
         * shortage.
         */
        /* no more than ~18% of tags for async I/O */
-       bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
+       bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
        /* no more than ~37% of tags for sync writes (~20% extra tags) */
-       bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
+       bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
 
        for (i = 0; i < 2; i++)
                for (j = 0; j < 2; j++)
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 0b02bf302de0..4de5dc349a1e 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -697,6 +697,12 @@ struct bfq_data {
        /* bfqq associated with the task issuing current bio for merging */
        struct bfq_queue *bio_bfqq;
 
+       /*
+        * Cached sbitmap shift, used to compute depth limits in
+        * bfq_update_depths.
+        */
+       unsigned int sb_shift;
+
        /*
         * Depth limits used in bfq_limit_depth (see comments on the
         * function)
-- 
2.20.1

Reply via email to