After reducing the search range when building the aux tree, the prev array
stuff is no longer useful, so remove it.

Signed-off-by: Alan Huang <mmpgour...@gmail.com>
---
 fs/bcachefs/bset.c | 31 +++----------------------------
 1 file changed, 3 insertions(+), 28 deletions(-)

diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 43b40f6b8c0a..f2591fa59fa1 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -304,11 +304,6 @@ struct bkey_float {
 };
 #define BKEY_MANTISSA_BITS     16
 
-static unsigned bkey_float_byte_offset(unsigned idx)
-{
-       return idx * sizeof(struct bkey_float);
-}
-
 struct ro_aux_tree {
        u8                      nothing[0];
        struct bkey_float       f[];
@@ -360,14 +355,6 @@ static struct ro_aux_tree *ro_aux_tree_base(const struct 
btree *b,
        return __aux_tree_base(b, t);
 }
 
-static u8 *ro_aux_tree_prev(const struct btree *b,
-                           const struct bset_tree *t)
-{
-       EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
-
-       return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
-}
-
 static struct bkey_float *bkey_float(const struct btree *b,
                                     const struct bset_tree *t,
                                     unsigned idx)
@@ -479,15 +466,6 @@ static inline struct bkey_packed *tree_to_bkey(const 
struct btree *b,
                        bkey_float(b, t, j)->key_offset);
 }
 
-static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
-                                            const struct bset_tree *t,
-                                            unsigned j)
-{
-       unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
-
-       return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
-}
-
 static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
                                       const struct bset_tree *t)
 {
@@ -689,8 +667,7 @@ static unsigned __bset_tree_capacity(struct btree *b, const 
struct bset_tree *t)
 
 static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree 
*t)
 {
-       return __bset_tree_capacity(b, t) /
-               (sizeof(struct bkey_float) + sizeof(u8));
+       return __bset_tree_capacity(b, t) / sizeof(struct bkey_float);
 }
 
 static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree 
*t)
@@ -719,7 +696,7 @@ static noinline void __build_rw_aux_tree(struct btree *b, 
struct bset_tree *t)
 
 static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
 {
-       struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
+       struct bkey_packed *k = btree_bkey_first(b, t);
        struct bkey_i min_key, max_key;
        unsigned cacheline = 1;
 
@@ -737,7 +714,7 @@ static noinline void __build_ro_aux_tree(struct btree *b, 
struct bset_tree *t)
        /* First we figure out where the first key in each cacheline is */
        eytzinger1_for_each(j, t->size - 1) {
                while (bkey_to_cacheline(b, t, k) < cacheline)
-                       prev = k, k = bkey_p_next(k);
+                       k = bkey_p_next(k);
 
                if (k >= btree_bkey_last(b, t)) {
                        /* XXX: this path sucks */
@@ -745,11 +722,9 @@ static noinline void __build_ro_aux_tree(struct btree *b, 
struct bset_tree *t)
                        goto retry;
                }
 
-               ro_aux_tree_prev(b, t)[j] = prev->u64s;
                bkey_float(b, t, j)->key_offset =
                        bkey_to_cacheline_offset(b, t, cacheline++, k);
 
-               EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
                EBUG_ON(tree_to_bkey(b, t, j) != k);
        }
 
-- 
2.45.2


Reply via email to