diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-11-24 03:12:22 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:09:47 -0400 |
commit | e88a75ebe86c1df42f0ca9ab6e8fa50db26e7cef (patch) | |
tree | 3b8608b0ae6e06d405bf6ef63e098416c68830db | |
parent | e15382125948523cd5c887c5fe4fa4303e9a9dc1 (diff) | |
download | linux-e88a75ebe86c1df42f0ca9ab6e8fa50db26e7cef.tar.gz linux-e88a75ebe86c1df42f0ca9ab6e8fa50db26e7cef.tar.bz2 linux-e88a75ebe86c1df42f0ca9ab6e8fa50db26e7cef.zip |
bcachefs: New bpos_cmp(), bkey_cmp() replacements
This patch introduces
- bpos_eq()
- bpos_lt()
- bpos_le()
- bpos_gt()
- bpos_ge()
and equivalent replacements for bkey_cmp().
Looking at the generated assembly these could probably be improved
further, but we already see a significant code size improvement with
this patch.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
31 files changed, 233 insertions, 178 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index ffcfb9f1916e..a0b9fa30260a 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -982,7 +982,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, goto out; } - if (bkey_cmp(*discard_pos_done, iter.pos) && + if (!bkey_eq(*discard_pos_done, iter.pos) && ca->mi.discard && !c->opts.nochanges) { /* * This works without any other locks because this is the only diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 9a4a62211755..dd47eeb1efc5 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -399,7 +399,7 @@ again: BTREE_ITER_SLOTS, k, ret) { struct bch_alloc_v4 a; - if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0) + if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) break; if (ca->new_fs_bucket_idx && diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h index df8189476016..dc2b91bc67f3 100644 --- a/fs/bcachefs/bkey.h +++ b/fs/bcachefs/bkey.h @@ -144,6 +144,37 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b, return bkey_cmp_left_packed(b, l, &r); } +static __always_inline bool bpos_eq(struct bpos l, struct bpos r) +{ + return !((l.inode ^ r.inode) | + (l.offset ^ r.offset) | + (l.snapshot ^ r.snapshot)); +} + +static __always_inline bool bpos_lt(struct bpos l, struct bpos r) +{ + return l.inode != r.inode ? l.inode < r.inode : + l.offset != r.offset ? l.offset < r.offset : + l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false; +} + +static __always_inline bool bpos_le(struct bpos l, struct bpos r) +{ + return l.inode != r.inode ? l.inode < r.inode : + l.offset != r.offset ? l.offset < r.offset : + l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true; +} + +static __always_inline bool bpos_gt(struct bpos l, struct bpos r) +{ + return bpos_lt(r, l); +} + +static __always_inline bool bpos_ge(struct bpos l, struct bpos r) +{ + return bpos_le(r, l); +} + static __always_inline int bpos_cmp(struct bpos l, struct bpos r) { return cmp_int(l.inode, r.inode) ?: @@ -151,6 +182,36 @@ static __always_inline int bpos_cmp(struct bpos l, struct bpos r) cmp_int(l.snapshot, r.snapshot); } +static __always_inline bool bkey_eq(struct bpos l, struct bpos r) +{ + return !((l.inode ^ r.inode) | + (l.offset ^ r.offset)); +} + +static __always_inline bool bkey_lt(struct bpos l, struct bpos r) +{ + return l.inode != r.inode + ? l.inode < r.inode + : l.offset < r.offset; +} + +static __always_inline bool bkey_le(struct bpos l, struct bpos r) +{ + return l.inode != r.inode + ? l.inode < r.inode + : l.offset <= r.offset; +} + +static __always_inline bool bkey_gt(struct bpos l, struct bpos r) +{ + return bkey_lt(r, l); +} + +static __always_inline bool bkey_ge(struct bpos l, struct bpos r) +{ + return bkey_le(r, l); +} + static __always_inline int bkey_cmp(struct bpos l, struct bpos r) { return cmp_int(l.inode, r.inode) ?: @@ -159,12 +220,12 @@ static __always_inline int bkey_cmp(struct bpos l, struct bpos r) static inline struct bpos bpos_min(struct bpos l, struct bpos r) { - return bpos_cmp(l, r) < 0 ? l : r; + return bpos_lt(l, r) ? l : r; } static inline struct bpos bpos_max(struct bpos l, struct bpos r) { - return bpos_cmp(l, r) > 0 ? l : r; + return bpos_gt(l, r) ? l : r; } void bch2_bpos_swab(struct bpos *); diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c index 141754db5fa1..7fcd6ca40b93 100644 --- a/fs/bcachefs/bkey_methods.c +++ b/fs/bcachefs/bkey_methods.c @@ -245,7 +245,7 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k, } if (type != BKEY_TYPE_btree && - !bkey_cmp(k.k->p, POS_MAX)) { + bkey_eq(k.k->p, POS_MAX)) { prt_printf(err, "key at POS_MAX"); return -EINVAL; } @@ -264,12 +264,12 @@ int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k, int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k, struct printbuf *err) { - if (bpos_cmp(k.k->p, b->data->min_key) < 0) { + if (bpos_lt(k.k->p, b->data->min_key)) { prt_printf(err, "key before start of btree node"); return -EINVAL; } - if (bpos_cmp(k.k->p, b->data->max_key) > 0) { + if (bpos_gt(k.k->p, b->data->max_key)) { prt_printf(err, "key past end of btree node"); return -EINVAL; } @@ -279,11 +279,11 @@ int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k, void bch2_bpos_to_text(struct printbuf *out, struct bpos pos) { - if (!bpos_cmp(pos, POS_MIN)) + if (bpos_eq(pos, POS_MIN)) prt_printf(out, "POS_MIN"); - else if (!bpos_cmp(pos, POS_MAX)) + else if (bpos_eq(pos, POS_MAX)) prt_printf(out, "POS_MAX"); - else if (!bpos_cmp(pos, SPOS_MAX)) + else if (bpos_eq(pos, SPOS_MAX)) prt_printf(out, "SPOS_MAX"); else { if (pos.inode == U64_MAX) diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h index 0c74ba335e64..7c907b7fd0d7 100644 --- a/fs/bcachefs/bkey_methods.h +++ b/fs/bcachefs/bkey_methods.h @@ -60,7 +60,7 @@ static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct b { return l->type == r->type && !bversion_cmp(l->version, r->version) && - !bpos_cmp(l->p, bkey_start_pos(r)); + bpos_eq(l->p, bkey_start_pos(r)); } bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c index aa8508efca00..50a1c9d8ebab 100644 --- a/fs/bcachefs/bset.c +++ b/fs/bcachefs/bset.c @@ -83,13 +83,12 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, n = bkey_unpack_key(b, _n); - if (bpos_cmp(n.p, k.k->p) < 0) { + if (bpos_lt(n.p, k.k->p)) { printk(KERN_ERR "Key skipped backwards\n"); continue; } - if (!bkey_deleted(k.k) && - !bpos_cmp(n.p, k.k->p)) + if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p)) printk(KERN_ERR "Duplicate keys\n"); } @@ -530,7 +529,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b, goto start; while (1) { if (rw_aux_to_bkey(b, t, j) == k) { - BUG_ON(bpos_cmp(rw_aux_tree(b, t)[j].k, + BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k, bkey_unpack_pos(b, k))); start: if (++j == t->size) @@ -1065,7 +1064,7 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b, while (l + 1 != r) { unsigned m = (l + r) >> 1; - if (bpos_cmp(rw_aux_tree(b, t)[m].k, *search) < 0) + if (bpos_lt(rw_aux_tree(b, t)[m].k, *search)) l = m; else r = m; @@ -1318,8 +1317,8 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter, struct bkey_packed *k[MAX_BSETS]; unsigned i; - EBUG_ON(bpos_cmp(*search, b->data->min_key) < 0); - EBUG_ON(bpos_cmp(*search, b->data->max_key) > 0); + EBUG_ON(bpos_lt(*search, b->data->min_key)); + EBUG_ON(bpos_gt(*search, b->data->max_key)); bset_aux_tree_verify(b); memset(iter, 0, sizeof(*iter)); diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 90be4c7325f7..0ac8636edba2 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -793,9 +793,9 @@ static inline void btree_check_header(struct bch_fs *c, struct btree *b) { if (b->c.btree_id != BTREE_NODE_ID(b->data) || b->c.level != BTREE_NODE_LEVEL(b->data) || - bpos_cmp(b->data->max_key, b->key.k.p) || + !bpos_eq(b->data->max_key, b->key.k.p) || (b->key.k.type == KEY_TYPE_btree_ptr_v2 && - bpos_cmp(b->data->min_key, + !bpos_eq(b->data->min_key, bkey_i_to_btree_ptr_v2(&b->key)->v.min_key))) btree_bad_header(c, b); } diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 3395fa56c724..f5b46f382340 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -76,7 +76,7 @@ static int bch2_gc_check_topology(struct bch_fs *c, if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) { struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k); - if (bpos_cmp(expected_start, bp->v.min_key)) { + if (!bpos_eq(expected_start, bp->v.min_key)) { bch2_topology_error(c); if (bkey_deleted(&prev->k->k)) { @@ -106,7 +106,7 @@ static int bch2_gc_check_topology(struct bch_fs *c, } } - if (is_last && bpos_cmp(cur.k->k.p, node_end)) { + if (is_last && !bpos_eq(cur.k->k.p, node_end)) { bch2_topology_error(c); printbuf_reset(&buf1); @@ -274,12 +274,12 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b, bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key)); if (prev && - bpos_cmp(expected_start, cur->data->min_key) > 0 && + bpos_gt(expected_start, cur->data->min_key) && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { /* cur overwrites prev: */ - if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key, - cur->data->min_key) >= 0, c, + if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key, + cur->data->min_key), c, "btree node overwritten by next node at btree %s level %u:\n" " node %s\n" " next %s", @@ -289,7 +289,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b, goto out; } - if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p, + if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p, bpos_predecessor(cur->data->min_key)), c, "btree node with incorrect max_key at btree %s level %u:\n" " node %s\n" @@ -301,8 +301,8 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b, } else { /* prev overwrites cur: */ - if (mustfix_fsck_err_on(bpos_cmp(expected_start, - cur->data->max_key) >= 0, c, + if (mustfix_fsck_err_on(bpos_ge(expected_start, + cur->data->max_key), c, "btree node overwritten by prev node at btree %s level %u:\n" " prev %s\n" " node %s", @@ -312,7 +312,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b, goto out; } - if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c, + if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c, "btree node with incorrect min_key at btree %s level %u:\n" " prev %s\n" " node %s", @@ -336,7 +336,7 @@ static int btree_repair_node_end(struct bch_fs *c, struct btree *b, bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key)); bch2_bpos_to_text(&buf2, b->key.k.p); - if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c, + if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c, "btree node with incorrect max_key at btree %s level %u:\n" " %s\n" " expected %s", @@ -374,8 +374,8 @@ again: bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { - BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); - BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); + BUG_ON(bpos_lt(k.k->p, b->data->min_key)); + BUG_ON(bpos_gt(k.k->p, b->data->max_key)); bch2_btree_and_journal_iter_advance(&iter); bch2_bkey_buf_reassemble(&cur_k, c, k); @@ -912,8 +912,8 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b bkey_init(&prev.k->k); while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { - BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); - BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); + BUG_ON(bpos_lt(k.k->p, b->data->min_key)); + BUG_ON(bpos_gt(k.k->p, b->data->max_key)); ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false, &k, true); @@ -1018,7 +1018,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans, six_lock_read(&b->c.lock, NULL, NULL); printbuf_reset(&buf); bch2_bpos_to_text(&buf, b->data->min_key); - if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c, + if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c, "btree root with incorrect min_key: %s", buf.buf)) { bch_err(c, "repair unimplemented"); ret = -BCH_ERR_fsck_repair_unimplemented; @@ -1027,7 +1027,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans, printbuf_reset(&buf); bch2_bpos_to_text(&buf, b->data->max_key); - if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c, + if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c, "btree root with incorrect max_key: %s", buf.buf)) { bch_err(c, "repair unimplemented"); ret = -BCH_ERR_fsck_repair_unimplemented; @@ -1341,7 +1341,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans, enum bch_data_type type; int ret; - if (bkey_cmp(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0) + if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets))) return 1; bch2_alloc_to_v4(k, &old); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 8dbe930c1eb2..9dedac2c7885 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -77,7 +77,7 @@ static void verify_no_dups(struct btree *b, struct bkey l = bkey_unpack_key(b, p); struct bkey r = bkey_unpack_key(b, k); - BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0); + BUG_ON(bpos_ge(l.p, bkey_start_pos(&r))); } #endif } @@ -645,8 +645,8 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b) bch2_btree_build_aux_trees(b); for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { - BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); - BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); + BUG_ON(bpos_lt(k.k->p, b->data->min_key)); + BUG_ON(bpos_gt(k.k->p, b->data->max_key)); } } @@ -744,7 +744,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, b->data->max_key = b->key.k.p; } - btree_err_on(bpos_cmp(b->data->min_key, bp->min_key), + btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), BTREE_ERR_MUST_RETRY, c, ca, b, NULL, "incorrect min_key: got %s should be %s", (printbuf_reset(&buf1), @@ -753,7 +753,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf)); } - btree_err_on(bpos_cmp(bn->max_key, b->key.k.p), + btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), BTREE_ERR_MUST_RETRY, c, ca, b, i, "incorrect max key %s", (printbuf_reset(&buf1), diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h index 4b1810ad7d91..a720dd74139b 100644 --- a/fs/bcachefs/btree_io.h +++ b/fs/bcachefs/btree_io.h @@ -201,7 +201,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id, { if (version < bcachefs_metadata_version_inode_btree_change && btree_node_type_is_extents(btree_id) && - bpos_cmp(bn->min_key, POS_MIN) && + !bpos_eq(bn->min_key, POS_MIN) && write) bn->min_key = bpos_nosnap_predecessor(bn->min_key); @@ -218,7 +218,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id, if (version < bcachefs_metadata_version_inode_btree_change && btree_node_type_is_extents(btree_id) && - bpos_cmp(bn->min_key, POS_MIN) && + !bpos_eq(bn->min_key, POS_MIN) && !write) bn->min_key = bpos_nosnap_successor(bn->min_key); } diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 238ba10d34e4..8a18b55cab26 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -93,7 +93,7 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter) struct bpos pos = iter->pos; if ((iter->flags & BTREE_ITER_IS_EXTENTS) && - bkey_cmp(pos, POS_MAX)) + !bkey_eq(pos, POS_MAX)) pos = bkey_successor(iter, pos); return pos; } @@ -101,13 +101,13 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter) static inline bool btree_path_pos_before_node(struct btree_path *path, struct btree *b) { - return bpos_cmp(path->pos, b->data->min_key) < 0; + return bpos_lt(path->pos, b->data->min_key); } static inline bool btree_path_pos_after_node(struct btree_path *path, struct btree *b) { - return bpos_cmp(b->key.k.p, path->pos) < 0; + return bpos_gt(path->pos, b->key.k.p); } static inline bool btree_path_pos_in_node(struct btree_path *path, @@ -133,7 +133,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans, ck = (void *) path->l[0].b; BUG_ON(ck->key.btree_id != path->btree_id || - bkey_cmp(ck->key.pos, path->pos)); + !bkey_eq(ck->key.pos, path->pos)); if (!locked) btree_node_unlock(trans, path, 0); @@ -278,8 +278,8 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && iter->pos.snapshot != iter->snapshot); - BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 || - bkey_cmp(iter->pos, iter->k.p) > 0); + BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) || + bkey_gt(iter->pos, iter->k.p)); } static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) @@ -313,7 +313,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k if (ret) goto out; - if (!bkey_cmp(prev.k->p, k.k->p) && + if (bkey_eq(prev.k->p, k.k->p) && bch2_snapshot_is_ancestor(trans->c, iter->snapshot, prev.k->p.snapshot) > 0) { struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; @@ -355,11 +355,11 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, continue; if (!key_cache) { - if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 && - bkey_cmp(pos, path->l[0].b->key.k.p) <= 0) + if (bkey_ge(pos, path->l[0].b->data->min_key) && + bkey_le(pos, path->l[0].b->key.k.p)) return; } else { - if (!bkey_cmp(pos, path->pos)) + if (bkey_eq(pos, path->pos)) return; } } @@ -1571,16 +1571,16 @@ struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey * _k = bch2_btree_node_iter_peek_all(&l->iter, l->b); k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null; - EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0); + EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos)); - if (!k.k || bpos_cmp(path->pos, k.k->p)) + if (!k.k || !bpos_eq(path->pos, k.k->p)) goto hole; } else { struct bkey_cached *ck = (void *) path->l[0].b; EBUG_ON(ck && (path->btree_id != ck->key.btree_id || - bkey_cmp(path->pos, ck->key.pos))); + !bkey_eq(path->pos, ck->key.pos))); EBUG_ON(!ck || !ck->valid); *u = ck->k->k; @@ -1638,7 +1638,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) if (!b) goto out; - BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0); + BUG_ON(bpos_lt(b->key.k.p, iter->pos)); bkey_init(&iter->k); iter->k.p = iter->pos = b->key.k.p; @@ -1689,7 +1689,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) b = btree_path_node(path, path->level + 1); - if (!bpos_cmp(iter->pos, b->key.k.p)) { + if (bpos_eq(iter->pos, b->key.k.p)) { __btree_path_set_level_up(trans, path, path->level++); } else { /* @@ -1732,9 +1732,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter) { if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) { struct bpos pos = iter->k.p; - bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS - ? bpos_cmp(pos, SPOS_MAX) - : bkey_cmp(pos, SPOS_MAX)) != 0; + bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS + ? bpos_eq(pos, SPOS_MAX) + : bkey_eq(pos, SPOS_MAX)); if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) pos = bkey_successor(iter, pos); @@ -1752,9 +1752,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter) inline bool bch2_btree_iter_rewind(struct btree_iter *iter) { struct bpos pos = bkey_start_pos(&iter->k); - bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS - ? bpos_cmp(pos, POS_MIN) - : bkey_cmp(pos, POS_MIN)) != 0; + bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS + ? bpos_eq(pos, POS_MIN) + : bkey_eq(pos, POS_MIN)); if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) pos = bkey_predecessor(iter, pos); @@ -1773,11 +1773,11 @@ struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter) continue; if (i->btree_id > iter->btree_id) break; - if (bpos_cmp(i->k->k.p, iter->path->pos) < 0) + if (bpos_lt(i->k->k.p, iter->path->pos)) continue; if (i->key_cache_already_flushed) continue; - if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0) + if (!ret || bpos_lt(i->k->k.p, ret->k.p)) ret = i->k; } @@ -1797,7 +1797,7 @@ struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans, { struct bkey_i *k; - if (bpos_cmp(iter->path->pos, iter->journal_pos) < 0) + if (bpos_lt(iter->path->pos, iter->journal_pos)) iter->journal_idx = 0; k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, @@ -1936,8 +1936,8 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp next_update = btree_trans_peek_updates(iter); if (next_update && - bpos_cmp(next_update->k.p, - k.k ? k.k->p : l->b->key.k.p) <= 0) { + bpos_le(next_update->k.p, + k.k ? k.k->p : l->b->key.k.p)) { iter->k = next_update->k; k = bkey_i_to_s_c(next_update); } @@ -1950,7 +1950,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp * whiteout, with a real key at the same position, since * in the btree deleted keys sort before non deleted. */ - search_key = bpos_cmp(search_key, k.k->p) + search_key = !bpos_eq(search_key, k.k->p) ? k.k->p : bpos_successor(k.k->p); continue; @@ -1958,7 +1958,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp if (likely(k.k)) { break; - } else if (likely(bpos_cmp(l->b->key.k.p, SPOS_MAX))) { + } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) { /* Advance to next leaf node: */ search_key = bpos_successor(l->b->key.k.p); } else { @@ -2008,19 +2008,19 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e */ if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) iter_pos = k.k->p; - else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) + else if (bkey_gt(bkey_start_pos(k.k), iter->pos)) iter_pos = bkey_start_pos(k.k); else iter_pos = iter->pos; - if (bkey_cmp(iter_pos, end) > 0) { + if (bkey_gt(iter_pos, end)) { bch2_btree_iter_set_pos(iter, end); k = bkey_s_c_null; goto out_no_locked; } if (iter->update_path && - bkey_cmp(iter->update_path->pos, k.k->p)) { + !bkey_eq(iter->update_path->pos, k.k->p)) { bch2_path_put_nokeep(trans, iter->update_path, iter->flags & BTREE_ITER_INTENT); iter->update_path = NULL; @@ -2143,7 +2143,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) /* Check if we should go up to the parent node: */ if (!k.k || (iter->advanced && - !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) { + bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) { iter->pos = path_l(iter->path)->b->key.k.p; btree_path_set_level_up(trans, iter->path); iter->advanced = false; @@ -2159,7 +2159,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) if (iter->path->level != iter->min_depth && (iter->advanced || !k.k || - bpos_cmp(iter->pos, k.k->p))) { + !bpos_eq(iter->pos, k.k->p))) { btree_path_set_level_down(trans, iter->path, iter->min_depth); iter->pos = bpos_successor(iter->pos); iter->advanced = false; @@ -2170,7 +2170,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) if (iter->path->level == iter->min_depth && iter->advanced && k.k && - !bpos_cmp(iter->pos, k.k->p)) { + bpos_eq(iter->pos, k.k->p)) { iter->pos = bpos_successor(iter->pos); iter->advanced = false; continue; @@ -2178,7 +2178,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) if (iter->advanced && iter->path->level == iter->min_depth && - bpos_cmp(k.k->p, iter->pos)) + !bpos_eq(k.k->p, iter->pos)) iter->advanced = false; BUG_ON(iter->advanced); @@ -2248,8 +2248,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) &iter->path->l[0], &iter->k); if (!k.k || ((iter->flags & BTREE_ITER_IS_EXTENTS) - ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0 - : bpos_cmp(k.k->p, search_key) > 0)) + ? bpos_ge(bkey_start_pos(k.k), search_key) + : bpos_gt(k.k->p, search_key))) k = btree_path_level_prev(trans, iter->path, &iter->path->l[0], &iter->k); @@ -2263,7 +2263,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) * longer at the same _key_ (not pos), return * that candidate */ - if (saved_path && bkey_cmp(k.k->p, saved_k.p)) { + if (saved_path && !bkey_eq(k.k->p, saved_k.p)) { bch2_path_put_nokeep(trans, iter->path, iter->flags & BTREE_ITER_INTENT); iter->path = saved_path; @@ -2298,7 +2298,7 @@ got_key: } break; - } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) { + } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) { /* Advance to previous leaf node: */ search_key = bpos_predecessor(iter->path->l[0].b->data->min_key); } else { @@ -2309,10 +2309,10 @@ got_key: } } - EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0); + EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos)); /* Extents can straddle iter->pos: */ - if (bkey_cmp(k.k->p, iter->pos) < 0) + if (bkey_lt(k.k->p, iter->pos)) iter->pos = k.k->p; if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) @@ -2377,7 +2377,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) struct bkey_i *next_update; if ((next_update = btree_trans_peek_updates(iter)) && - !bpos_cmp(next_update->k.p, iter->pos)) { + bpos_eq(next_update->k.p, iter->pos)) { iter->k |