diff options
Diffstat (limited to 'fs/bcachefs/btree_iter.c')
-rw-r--r-- | fs/bcachefs/btree_iter.c | 3215 |
1 files changed, 3215 insertions, 0 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c new file mode 100644 index 000000000000..1d79514754d7 --- /dev/null +++ b/fs/bcachefs/btree_iter.c @@ -0,0 +1,3215 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "bcachefs.h" +#include "bkey_methods.h" +#include "bkey_buf.h" +#include "btree_cache.h" +#include "btree_iter.h" +#include "btree_journal_iter.h" +#include "btree_key_cache.h" +#include "btree_locking.h" +#include "btree_update.h" +#include "debug.h" +#include "error.h" +#include "extents.h" +#include "journal.h" +#include "replicas.h" +#include "snapshot.h" +#include "trace.h" + +#include <linux/random.h> +#include <linux/prefetch.h> + +static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *); +static inline void btree_path_list_add(struct btree_trans *, struct btree_path *, + struct btree_path *); + +static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter) +{ +#ifdef TRACK_PATH_ALLOCATED + return iter->ip_allocated; +#else + return 0; +#endif +} + +static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *); + +static inline int __btree_path_cmp(const struct btree_path *l, + enum btree_id r_btree_id, + bool r_cached, + struct bpos r_pos, + unsigned r_level) +{ + /* + * Must match lock ordering as defined by __bch2_btree_node_lock: + */ + return cmp_int(l->btree_id, r_btree_id) ?: + cmp_int((int) l->cached, (int) r_cached) ?: + bpos_cmp(l->pos, r_pos) ?: + -cmp_int(l->level, r_level); +} + +static inline int btree_path_cmp(const struct btree_path *l, + const struct btree_path *r) +{ + return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level); +} + +static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p) +{ + /* Are we iterating over keys in all snapshots? */ + if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) { + p = bpos_successor(p); + } else { + p = bpos_nosnap_successor(p); + p.snapshot = iter->snapshot; + } + + return p; +} + +static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p) +{ + /* Are we iterating over keys in all snapshots? */ + if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) { + p = bpos_predecessor(p); + } else { + p = bpos_nosnap_predecessor(p); + p.snapshot = iter->snapshot; + } + + return p; +} + +static inline struct bpos btree_iter_search_key(struct btree_iter *iter) +{ + struct bpos pos = iter->pos; + + if ((iter->flags & BTREE_ITER_IS_EXTENTS) && + !bkey_eq(pos, POS_MAX)) + pos = bkey_successor(iter, pos); + return pos; +} + +static inline bool btree_path_pos_before_node(struct btree_path *path, + struct btree *b) +{ + return bpos_lt(path->pos, b->data->min_key); +} + +static inline bool btree_path_pos_after_node(struct btree_path *path, + struct btree *b) +{ + return bpos_gt(path->pos, b->key.k.p); +} + +static inline bool btree_path_pos_in_node(struct btree_path *path, + struct btree *b) +{ + return path->btree_id == b->c.btree_id && + !btree_path_pos_before_node(path, b) && + !btree_path_pos_after_node(path, b); +} + +/* Btree iterator: */ + +#ifdef CONFIG_BCACHEFS_DEBUG + +static void bch2_btree_path_verify_cached(struct btree_trans *trans, + struct btree_path *path) +{ + struct bkey_cached *ck; + bool locked = btree_node_locked(path, 0); + + if (!bch2_btree_node_relock(trans, path, 0)) + return; + + ck = (void *) path->l[0].b; + BUG_ON(ck->key.btree_id != path->btree_id || + !bkey_eq(ck->key.pos, path->pos)); + + if (!locked) + btree_node_unlock(trans, path, 0); +} + +static void bch2_btree_path_verify_level(struct btree_trans *trans, + struct btree_path *path, unsigned level) +{ + struct btree_path_level *l; + struct btree_node_iter tmp; + bool locked; + struct bkey_packed *p, *k; + struct printbuf buf1 = PRINTBUF; + struct printbuf buf2 = PRINTBUF; + struct printbuf buf3 = PRINTBUF; + const char *msg; + + if (!bch2_debug_check_iterators) + return; + + l = &path->l[level]; + tmp = l->iter; + locked = btree_node_locked(path, level); + + if (path->cached) { + if (!level) + bch2_btree_path_verify_cached(trans, path); + return; + } + + if (!btree_path_node(path, level)) + return; + + if (!bch2_btree_node_relock_notrace(trans, path, level)) + return; + + BUG_ON(!btree_path_pos_in_node(path, l->b)); + + bch2_btree_node_iter_verify(&l->iter, l->b); + + /* + * For interior nodes, the iterator will have skipped past deleted keys: + */ + p = level + ? bch2_btree_node_iter_prev(&tmp, l->b) + : bch2_btree_node_iter_prev_all(&tmp, l->b); + k = bch2_btree_node_iter_peek_all(&l->iter, l->b); + + if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) { + msg = "before"; + goto err; + } + + if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) { + msg = "after"; + goto err; + } + + if (!locked) + btree_node_unlock(trans, path, level); + return; +err: + bch2_bpos_to_text(&buf1, path->pos); + + if (p) { + struct bkey uk = bkey_unpack_key(l->b, p); + + bch2_bkey_to_text(&buf2, &uk); + } else { + prt_printf(&buf2, "(none)"); + } + + if (k) { + struct bkey uk = bkey_unpack_key(l->b, k); + + bch2_bkey_to_text(&buf3, &uk); + } else { + prt_printf(&buf3, "(none)"); + } + + panic("path should be %s key at level %u:\n" + "path pos %s\n" + "prev key %s\n" + "cur key %s\n", + msg, level, buf1.buf, buf2.buf, buf3.buf); +} + +static void bch2_btree_path_verify(struct btree_trans *trans, + struct btree_path *path) +{ + struct bch_fs *c = trans->c; + unsigned i; + + EBUG_ON(path->btree_id >= BTREE_ID_NR); + + for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) { + if (!path->l[i].b) { + BUG_ON(!path->cached && + bch2_btree_id_root(c, path->btree_id)->b->c.level > i); + break; + } + + bch2_btree_path_verify_level(trans, path, i); + } + + bch2_btree_path_verify_locks(path); +} + +void bch2_trans_verify_paths(struct btree_trans *trans) +{ + struct btree_path *path; + + trans_for_each_path(trans, path) + bch2_btree_path_verify(trans, path); +} + +static void bch2_btree_iter_verify(struct btree_iter *iter) +{ + struct btree_trans *trans = iter->trans; + + BUG_ON(iter->btree_id >= BTREE_ID_NR); + + BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached); + + BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) && + (iter->flags & BTREE_ITER_ALL_SNAPSHOTS)); + + BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) && + (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && + !btree_type_has_snapshots(iter->btree_id)); + + if (iter->update_path) + bch2_btree_path_verify(trans, iter->update_path); + bch2_btree_path_verify(trans, iter->path); +} + +static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) +{ + BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && + !iter->pos.snapshot); + + BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && + iter->pos.snapshot != iter->snapshot); + + BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) || + bkey_gt(iter->pos, iter->k.p)); +} + +static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) +{ + struct btree_trans *trans = iter->trans; + struct btree_iter copy; + struct bkey_s_c prev; + int ret = 0; + + if (!bch2_debug_check_iterators) + return 0; + + if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) + return 0; + + if (bkey_err(k) || !k.k) + return 0; + + BUG_ON(!bch2_snapshot_is_ancestor(trans->c, + iter->snapshot, + k.k->p.snapshot)); + + bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos, + BTREE_ITER_NOPRESERVE| + BTREE_ITER_ALL_SNAPSHOTS); + prev = bch2_btree_iter_prev(©); + if (!prev.k) + goto out; + + ret = bkey_err(prev); + if (ret) + goto out; + + if (bkey_eq(prev.k->p, k.k->p) && + bch2_snapshot_is_ancestor(trans->c, iter->snapshot, + prev.k->p.snapshot) > 0) { + struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; + + bch2_bkey_to_text(&buf1, k.k); + bch2_bkey_to_text(&buf2, prev.k); + + panic("iter snap %u\n" + "k %s\n" + "prev %s\n", + iter->snapshot, + buf1.buf, buf2.buf); + } +out: + bch2_trans_iter_exit(trans, ©); + return ret; +} + +void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, + struct bpos pos, bool key_cache) +{ + struct btree_path *path; + unsigned idx; + struct printbuf buf = PRINTBUF; + + btree_trans_sort_paths(trans); + + trans_for_each_path_inorder(trans, path, idx) { + int cmp = cmp_int(path->btree_id, id) ?: + cmp_int(path->cached, key_cache); + + if (cmp > 0) + break; + if (cmp < 0) + continue; + + if (!btree_node_locked(path, 0) || + !path->should_be_locked) + continue; + + if (!key_cache) { + if (bkey_ge(pos, path->l[0].b->data->min_key) && + bkey_le(pos, path->l[0].b->key.k.p)) + return; + } else { + if (bkey_eq(pos, path->pos)) + return; + } + } + + bch2_dump_trans_paths_updates(trans); + bch2_bpos_to_text(&buf, pos); + + panic("not locked: %s %s%s\n", + bch2_btree_ids[id], buf.buf, + key_cache ? " cached" : ""); +} + +#else + +static inline void bch2_btree_path_verify_level(struct btree_trans *trans, + struct btree_path *path, unsigned l) {} +static inline void bch2_btree_path_verify(struct btree_trans *trans, + struct btree_path *path) {} +static inline void bch2_btree_iter_verify(struct btree_iter *iter) {} +static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {} +static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; } + +#endif + +/* Btree path: fixups after btree updates */ + +static void btree_node_iter_set_set_pos(struct btree_node_iter *iter, + struct btree *b, + struct bset_tree *t, + struct bkey_packed *k) +{ + struct btree_node_iter_set *set; + + btree_node_iter_for_each(iter, set) + if (set->end == t->end_offset) { + set->k = __btree_node_key_to_offset(b, k); + bch2_btree_node_iter_sort(iter, b); + return; + } + + bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t)); +} + +static void __bch2_btree_path_fix_key_modified(struct btree_path *path, + struct btree *b, + struct bkey_packed *where) +{ + struct btree_path_level *l = &path->l[b->c.level]; + + if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b)) + return; + + if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0) + bch2_btree_node_iter_advance(&l->iter, l->b); +} + +void bch2_btree_path_fix_key_modified(struct btree_trans *trans, + struct btree *b, + struct bkey_packed *where) +{ + struct btree_path *path; + + trans_for_each_path_with_node(trans, b, path) { + __bch2_btree_path_fix_key_modified(path, b, where); + bch2_btree_path_verify_level(trans, path, b->c.level); + } +} + +static void __bch2_btree_node_iter_fix(struct btree_path *path, + struct btree *b, + struct btree_node_iter *node_iter, + struct bset_tree *t, + struct bkey_packed *where, + unsigned clobber_u64s, + unsigned new_u64s) +{ + const struct bkey_packed *end = btree_bkey_last(b, t); + struct btree_node_iter_set *set; + unsigned offset = __btree_node_key_to_offset(b, where); + int shift = new_u64s - clobber_u64s; + unsigned old_end = t->end_offset - shift; + unsigned orig_iter_pos = node_iter->data[0].k; + bool iter_current_key_modified = + orig_iter_pos >= offset && + orig_iter_pos <= offset + clobber_u64s; + + btree_node_iter_for_each(node_iter, set) + if (set->end == old_end) + goto found; + + /* didn't find the bset in the iterator - might have to readd it: */ + if (new_u64s && + bkey_iter_pos_cmp(b, where, &path->pos) >= 0) { + bch2_btree_node_iter_push(node_iter, b, where, end); + goto fixup_done; + } else { + /* Iterator is after key that changed */ + return; + } +found: + set->end = t->end_offset; + + /* Iterator hasn't gotten to the key that changed yet: */ + if (set->k < offset) + return; + + if (new_u64s && + bkey_iter_pos_cmp(b, where, &path->pos) >= 0) { + set->k = offset; + } else if (set->k < offset + clobber_u64s) { + set->k = offset + new_u64s; + if (set->k == set->end) + bch2_btree_node_iter_set_drop(node_iter, set); + } else { + /* Iterator is after key that changed */ + set->k = (int) set->k + shift; + return; + } + + bch2_btree_node_iter_sort(node_iter, b); +fixup_done: + if (node_iter->data[0].k != orig_iter_pos) + iter_current_key_modified = true; + + /* + * When a new key is added, and the node iterator now points to that + * key, the iterator might have skipped past deleted keys that should + * come after the key the iterator now points to. We have to rewind to + * before those deleted keys - otherwise + * bch2_btree_node_iter_prev_all() breaks: + */ + if (!bch2_btree_node_iter_end(node_iter) && + iter_current_key_modified && + b->c.level) { + struct bkey_packed *k, *k2, *p; + + k = bch2_btree_node_iter_peek_all(node_iter, b); + + for_each_bset(b, t) { + bool set_pos = false; + + if (node_iter->data[0].end == t->end_offset) + continue; + + k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t); + + while ((p = bch2_bkey_prev_all(b, t, k2)) && + bkey_iter_cmp(b, k, p) < 0) { + k2 = p; + set_pos = true; + } + + if (set_pos) + btree_node_iter_set_set_pos(node_iter, + b, t, k2); + } + } +} + +void bch2_btree_node_iter_fix(struct btree_trans *trans, + struct btree_path *path, + struct btree *b, + struct btree_node_iter *node_iter, + struct bkey_packed *where, + unsigned clobber_u64s, + unsigned new_u64s) +{ + struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where); + struct btree_path *linked; + + if (node_iter != &path->l[b->c.level].iter) { + __bch2_btree_node_iter_fix(path, b, node_iter, t, + where, clobber_u64s, new_u64s); + + if (bch2_debug_check_iterators) + bch2_btree_node_iter_verify(node_iter, b); + } + + trans_for_each_path_with_node(trans, b, linked) { + __bch2_btree_node_iter_fix(linked, b, + &linked->l[b->c.level].iter, t, + where, clobber_u64s, new_u64s); + bch2_btree_path_verify_level(trans, linked, b->c.level); + } +} + +/* Btree path level: pointer to a particular btree node and node iter */ + +static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c, + struct btree_path_level *l, + struct bkey *u, + struct bkey_packed *k) +{ + if (unlikely(!k)) { + /* + * signal to bch2_btree_iter_peek_slot() that we're currently at + * a hole + */ + u->type = KEY_TYPE_deleted; + return bkey_s_c_null; + } + + return bkey_disassemble(l->b, k, u); +} + +static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c, + struct btree_path_level *l, + struct bkey *u) +{ + return __btree_iter_unpack(c, l, u, + bch2_btree_node_iter_peek_all(&l->iter, l->b)); +} + +static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans, + struct btree_path *path, + struct btree_path_level *l, + struct bkey *u) +{ + struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u, + bch2_btree_node_iter_peek(&l->iter, l->b)); + + path->pos = k.k ? k.k->p : l->b->key.k.p; + trans->paths_sorted = false; + bch2_btree_path_verify_level(trans, path, l - path->l); + return k; +} + +static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans, + struct btree_path *path, + struct btree_path_level *l, + struct bkey *u) +{ + struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u, + bch2_btree_node_iter_prev(&l->iter, l->b)); + + path->pos = k.k ? k.k->p : l->b->data->min_key; + trans->paths_sorted = false; + bch2_btree_path_verify_level(trans, path, l - path->l); + return k; +} + +static inline bool btree_path_advance_to_pos(struct btree_path *path, + struct btree_path_level *l, + int max_advance) +{ + struct bkey_packed *k; + int nr_advanced = 0; + + while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) && + bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) { + if (max_advance > 0 && nr_advanced >= max_advance) + return false; + + bch2_btree_node_iter_advance(&l->iter, l->b); + nr_advanced++; + } + + return true; +} + +static inline void __btree_path_level_init(struct btree_path *path, + unsigned level) +{ + struct btree_path_level *l = &path->l[level]; + + bch2_btree_node_iter_init(&l->iter, l->b, &path->pos); + + /* + * Iterators to interior nodes should always be pointed at the first non + * whiteout: + */ + if (level) + bch2_btree_node_iter_peek(&l->iter, l->b); +} + +void bch2_btree_path_level_init(struct btree_trans *trans, + struct btree_path *path, + struct btree *b) +{ + BUG_ON(path->cached); + + EBUG_ON(!btree_path_pos_in_node(path, b)); + + path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); + path->l[b->c.level].b = b; + __btree_path_level_init(path, b->c.level); +} + +/* Btree path: fixups after btree node updates: */ + +static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b) +{ + struct bch_fs *c = trans->c; + struct btree_insert_entry *i; + + trans_for_each_update(trans, i) + if (!i->cached && + i->level == b->c.level && + i->btree_id == b->c.btree_id && + bpos_cmp(i->k->k.p, b->data->min_key) >= 0 && + bpos_cmp(i->k->k.p, b->data->max_key) <= 0) { + i->old_v = bch2_btree_path_peek_slot(i->path, &i->old_k).v; + + if (unlikely(trans->journal_replay_not_finished)) { + struct bkey_i *j_k = + bch2_journal_keys_peek_slot(c, i->btree_id, i->level, + i->k->k.p); + + if (j_k) { + i->old_k = j_k->k; + i->old_v = &j_k->v; + } + } + } +} + +/* + * A btree node is being replaced - update the iterator to point to the new + * node: + */ +void bch2_trans_node_add(struct btree_trans *trans, struct btree *b) +{ + struct btree_path *path; + + trans_for_each_path(trans, path) + if (path->uptodate == BTREE_ITER_UPTODATE && + !path->cached && + btree_path_pos_in_node(path, b)) { + enum btree_node_locked_type t = + btree_lock_want(path, b->c.level); + + if (t != BTREE_NODE_UNLOCKED) { + btree_node_unlock(trans, path, b->c.level); + six_lock_increment(&b->c.lock, (enum six_lock_type) t); + mark_btree_node_locked(trans, path, b->c.level, t); + } + + bch2_btree_path_level_init(trans, path, b); + } + + bch2_trans_revalidate_updates_in_node(trans, b); +} + +/* + * A btree node has been modified in such a way as to invalidate iterators - fix + * them: + */ +void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b) +{ + struct btree_path *path; + + trans_for_each_path_with_node(trans, b, path) + __btree_path_level_init(path, b->c.level); + + bch2_trans_revalidate_updates_in_node(trans, b); +} + +/* Btree path: traverse, set_pos: */ + +static inline int btree_path_lock_root(struct btree_trans *trans, + struct btree_path *path, + unsigned depth_want, + unsigned long trace_ip) +{ + struct bch_fs *c = trans->c; + struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b; + enum six_lock_type lock_type; + unsigned i; + int ret; + + EBUG_ON(path->nodes_locked); + + while (1) { + b = READ_ONCE(*rootp); + path->level = READ_ONCE(b->c.level); + + if (unlikely(path->level < depth_want)) { + /* + * the root is at a lower depth than the depth we want: + * got to the end of the btree, or we're walking nodes + * greater than some depth and there are no nodes >= + * that depth + */ + path->level = depth_want; + for (i = path->level; i < BTREE_MAX_DEPTH; i++) + path->l[i].b = NULL; + return 1; + } + + lock_type = __btree_lock_want(path, path->level); + ret = btree_node_lock(trans, path, &b->c, + path->level, lock_type, trace_ip); + if (unlikely(ret)) { + if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed)) + continue; + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ret; + BUG(); + } + + if (likely(b == READ_ONCE(*rootp) && + b->c.level == path->level && + !race_fault())) { + for (i = 0; i < path->level; i++) + path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root); + path->l[path->level].b = b; + for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++) + path->l[i].b = NULL; + + mark_btree_node_locked(trans, path, path->level, + (enum btree_node_locked_type) lock_type); + bch2_btree_path_level_init(trans, path, b); + return 0; + } + + six_unlock_type(&b->c.lock, lock_type); + } +} + +noinline +static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path) +{ + struct bch_fs *c = trans->c; + struct btree_path_level *l = path_l(path); + struct btree_node_iter node_iter = l->iter; + struct bkey_packed *k; + struct bkey_buf tmp; + unsigned nr = test_bit(BCH_FS_STARTED, &c->flags) + ? (path->level > 1 ? 0 : 2) + : (path->level > 1 ? 1 : 16); + bool was_locked = btree_node_locked(path, path->level); + int ret = 0; + + bch2_bkey_buf_init(&tmp); + + while (nr-- && !ret) { + if (!bch2_btree_node_relock(trans, path, path->level)) + break; + + bch2_btree_node_iter_advance(&node_iter, l->b); + k = bch2_btree_node_iter_peek(&node_iter, l->b); + if (!k) + break; + + bch2_bkey_buf_unpack(&tmp, c, l->b, k); + ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, + path->level - 1); + } + + if (!was_locked) + btree_node_unlock(trans, path, path->level); + + bch2_bkey_buf_exit(&tmp, c); + return ret; +} + +static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path, + struct btree_and_journal_iter *jiter) +{ + struct bch_fs *c = trans->c; + struct bkey_s_c k; + struct bkey_buf tmp; + unsigned nr = test_bit(BCH_FS_STARTED, &c->flags) + ? (path->level > 1 ? 0 : 2) + : (path->level > 1 ? 1 : 16); + bool was_locked = btree_node_locked(path, path->level); + int ret = 0; + + bch2_bkey_buf_init(&tmp); + + while (nr-- && !ret) { + if (!bch2_btree_node_relock(trans, path, path->level)) + break; + + bch2_btree_and_journal_iter_advance(jiter); + k = bch2_btree_and_journal_iter_peek(jiter); + if (!k.k) + break; + + bch2_bkey_buf_reassemble(&tmp, c, k); + ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, + path->level - 1); + } + + if (!was_locked) + btree_node_unlock(trans, path, path->level); + + bch2_bkey_buf_exit(&tmp, c); + return ret; +} + +static noinline void btree_node_mem_ptr_set(struct btree_trans *trans, + struct btree_path *path, + unsigned plevel, struct btree *b) +{ + struct btree_path_level *l = &path->l[plevel]; + bool locked = btree_node_locked(path, plevel); + struct bkey_packed *k; + struct bch_btree_ptr_v2 *bp; + + if (!bch2_btree_node_relock(trans, path, plevel)) + return; + + k = bch2_btree_node_iter_peek_all(&l->iter, l->b); + BUG_ON(k->type != KEY_TYPE_btree_ptr_v2); + + bp = (void *) bkeyp_val(&l->b->format, k); + bp->mem_ptr = (unsigned long)b; + + if (!locked) + btree_node_unlock(trans, path, plevel); +} + +static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, + struct btree_path *path, + unsigned flags, + struct bkey_buf *out) +{ + struct bch_fs *c = trans->c; + struct btree_path_level *l = path_l(path); + struct btree_and_journal_iter jiter; + struct bkey_s_c k; + int ret = 0; + + __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos); + + k = bch2_btree_and_journal_iter_peek(&jiter); + + bch2_bkey_buf_reassemble(out, c, k); + + if (flags & BTREE_ITER_PREFETCH) + ret = btree_path_prefetch_j(trans, path, &jiter); + + bch2_btree_and_journal_iter_exit(&jiter); + return ret; +} + +static __always_inline int btree_path_down(struct btree_trans *trans, + struct btree_path *path, + unsigned flags, + unsigned long trace_ip) +{ + struct bch_fs *c = trans->c; + struct btree_path_level *l = path_l(path); + struct btree *b; + unsigned level = path->level - 1; + enum six_lock_type lock_type = __btree_lock_want(path, level); + struct bkey_buf tmp; + int ret; + + EBUG_ON(!btree_node_locked(path, path->level)); + + bch2_bkey_buf_init(&tmp); + + if (unlikely(trans->journal_replay_not_finished)) { + ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp); + if (ret) + goto err; + } else { + bch2_bkey_buf_unpack(&tmp, c, l->b, + bch2_btree_node_iter_peek(&l->iter, l->b)); + + if (flags & BTREE_ITER_PREFETCH) { + ret = btree_path_prefetch(trans, path); + if (ret) + goto err; + } + } + + b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip); + ret = PTR_ERR_OR_ZERO(b); + if (unlikely(ret)) + goto err; + + if (likely(!trans->journal_replay_not_finished && + tmp.k->k.type == KEY_TYPE_btree_ptr_v2) && + unlikely(b != btree_node_mem_ptr(tmp.k))) + btree_node_mem_ptr_set(trans, path, level + 1, b); + + if (btree_node_read_locked(path, level + 1)) + btree_node_unlock(trans, path, level + 1); + + mark_btree_node_locked(trans, path, level, + (enum btree_node_locked_type) lock_type); + path->level = level; + bch2_btree_path_level_init(trans, path, b); + + bch2_btree_path_verify_locks(path); +err: + bch2_bkey_buf_exit(&tmp, c); + return ret; +} + + +static int bch2_btree_path_traverse_all(struct btree_trans *trans) +{ + struct bch_fs *c = trans->c; + struct btree_path *path; + unsigned long trace_ip = _RET_IP_; + int i, ret = 0; + + if (trans->in_traverse_all) + return -BCH_ERR_transaction_restart_in_traverse_all; + + trans->in_traverse_all = true; +retry_all: + trans->restarted = 0; + trans->last_restarted_ip = 0; + + trans_for_each_path(trans, path) + path->should_be_locked = false; + + btree_trans_sort_paths(trans); + + bch2_trans_unlock(trans); + cond_resched(); + + if (unlikely(trans->memory_allocation_failure)) { + struct closure cl; + + closure_init_stack(&cl); + + do { + ret = bch2_btree_cache_cannibalize_lock(c, &cl); + closure_sync(&cl); + } while (ret); + } + + /* Now, redo traversals in correct order: */ + i = 0; + while (i < trans->nr_sorted) { + path = trans->paths + trans->sorted[i]; + + /* + * Traversing a path can cause another path to be added at about + * the same position: + */ + if (path->uptodate) { + __btree_path_get(path, false); + ret = bch2_btree_path_traverse_one(trans, path, 0, _THIS_IP_); + __btree_path_put(path, false); + + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + bch2_err_matches(ret, ENOMEM)) + goto retry_all; + if (ret) + goto err; + } else { + i++; + } + } + + /* + * We used to assert that all paths had been traversed here + * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since + * path->should_be_locked is not set yet, we might have unlocked and + * then failed to relock a path - that's fine. + */ +err: + bch2_btree_cache_cannibalize_unlock(c); + + trans->in_traverse_all = false; + + trace_and_count(c, trans_traverse_all, trans, trace_ip); + return ret; +} + +static inline bool btree_path_check_pos_in_node(struct btree_path *path, + unsigned l, int check_pos) +{ + if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b)) + return false; + if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b)) + return false; + return true; +} + +static inline bool btree_path_good_node(struct btree_trans *trans, + struct btree_path *path, + unsigned l, int check_pos) +{ + return is_btree_node(path, l) && + bch2_btree_node_relock(trans, path, l) && + btree_path_check_pos_in_node(path, l, check_pos); +} + +static void btree_path_set_level_down(struct btree_trans *trans, + struct btree_path *path, + unsigned new_level) +{ + unsigned l; + + path->level = new_level; + + for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++) + if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED) + btree_node_unlock(trans, path, l); + + btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + bch2_btree_path_verify(trans, path); +} + +static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans, + struct btree_path *path, + int check_pos) +{ + unsigned i, l = path->level; +again: + while (btree_path_node(path, l) && + !btree_path_good_node(trans, path, l, check_pos)) + __btree_path_set_level_up(trans, path, l++); + + /* If we need intent locks, take them too: */ + for (i = l + 1; + i < path->locks_want && btree_path_node(path, i); + i++) + if (!bch2_btree_node_relock(trans, path, i)) { + while (l <= i) + __btree_path_set_level_up(trans, path, l++); + goto again; + } + + return l; +} + +static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans, + struct btree_path *path, + int check_pos) +{ + return likely(btree_node_locked(path, path->level) && + btree_path_check_pos_in_node(path, path->level, check_pos)) + ? path->level + : __btree_path_up_until_good_node(trans, path, check_pos); +} + +/* + * This is the main state machine for walking down the btree - walks down to a + * specified depth + * + * Returns 0 on success, -EIO on error (error reading in a btree node). + * + * On error, caller (peek_node()/peek_key()) must return NULL; the error is + * stashed in the iterator and returned from bch2_trans_exit(). + */ +int bch2_btree_path_traverse_one(struct btree_trans *trans, + struct btree_path *path, + unsigned flags, + unsigned long trace_ip) +{ + unsigned depth_want = path->level; + int ret = -((int) trans->restarted); + + if (unlikely(ret)) + goto out; + + /* + * Ensure we obey path->should_be_locked: if it's set, we can't unlock + * and re-traverse the path without a transaction restart: + */ + if (path->should_be_locked) { + ret = bch2_btree_path_relock(trans, path, trace_ip); + goto out; + } + + if (path->cached) { + ret = bch2_btree_path_traverse_cached(trans, path, flags); + goto out; + } + + if (unlikely(path->level >= BTREE_MAX_DEPTH)) + goto out; + + path->level = btree_path_up_until_good_node(trans, path, 0); + + EBUG_ON(btree_path_node(path, path->level) && + !btree_node_locked(path, path->level)); + + /* + * Note: path->nodes[path->level] may be temporarily NULL here - that + * would indicate to other code that we got to the end of the btree, + * here it indicates that relocking the root failed - it's critical that + * btree_path_lock_root() comes next and that it can't fail + */ + while (path->level > depth_want) { + ret = btree_path_node(path, path->level) + ? btree_path_down(trans, path, flags, trace_ip) + : btree_path_lock_root(trans, path, depth_want, trace_ip); + if (unlikely(ret)) { + if (ret == 1) { + /* + * No nodes at this level - got to the end of + * the btree: + */ + ret = 0; + goto out; + } + + __bch2_btree_path_unlock(trans, path); + path->level = depth_want; + path->l[path->level].b = ERR_PTR(ret); + goto out; + } + } + + path->uptodate = BTREE_ITER_UPTODATE; +out: + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted) + panic("ret %s (%i) trans->restarted %s (%i)\n", + bch2_err_str(ret), ret, + bch2_err_str |