diff options
Diffstat (limited to 'fs/bcachefs/btree_io.c')
-rw-r--r-- | fs/bcachefs/btree_io.c | 2298 |
1 files changed, 2298 insertions, 0 deletions
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c new file mode 100644 index 000000000000..37d896edb06e --- /dev/null +++ b/fs/bcachefs/btree_io.c @@ -0,0 +1,2298 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "bcachefs.h" +#include "bkey_methods.h" +#include "bkey_sort.h" +#include "btree_cache.h" +#include "btree_io.h" +#include "btree_iter.h" +#include "btree_locking.h" +#include "btree_update.h" +#include "btree_update_interior.h" +#include "buckets.h" +#include "checksum.h" +#include "debug.h" +#include "error.h" +#include "extents.h" +#include "io_write.h" +#include "journal_reclaim.h" +#include "journal_seq_blacklist.h" +#include "recovery.h" +#include "super-io.h" +#include "trace.h" + +#include <linux/sched/mm.h> + +void bch2_btree_node_io_unlock(struct btree *b) +{ + EBUG_ON(!btree_node_write_in_flight(b)); + + clear_btree_node_write_in_flight_inner(b); + clear_btree_node_write_in_flight(b); + wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); +} + +void bch2_btree_node_io_lock(struct btree *b) +{ + bch2_assert_btree_nodes_not_locked(); + + wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight, + TASK_UNINTERRUPTIBLE); +} + +void __bch2_btree_node_wait_on_read(struct btree *b) +{ + wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, + TASK_UNINTERRUPTIBLE); +} + +void __bch2_btree_node_wait_on_write(struct btree *b) +{ + wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, + TASK_UNINTERRUPTIBLE); +} + +void bch2_btree_node_wait_on_read(struct btree *b) +{ + bch2_assert_btree_nodes_not_locked(); + + wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, + TASK_UNINTERRUPTIBLE); +} + +void bch2_btree_node_wait_on_write(struct btree *b) +{ + bch2_assert_btree_nodes_not_locked(); + + wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, + TASK_UNINTERRUPTIBLE); +} + +static void verify_no_dups(struct btree *b, + struct bkey_packed *start, + struct bkey_packed *end) +{ +#ifdef CONFIG_BCACHEFS_DEBUG + struct bkey_packed *k, *p; + + if (start == end) + return; + + for (p = start, k = bkey_p_next(start); + k != end; + p = k, k = bkey_p_next(k)) { + struct bkey l = bkey_unpack_key(b, p); + struct bkey r = bkey_unpack_key(b, k); + + BUG_ON(bpos_ge(l.p, bkey_start_pos(&r))); + } +#endif +} + +static void set_needs_whiteout(struct bset *i, int v) +{ + struct bkey_packed *k; + + for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) + k->needs_whiteout = v; +} + +static void btree_bounce_free(struct bch_fs *c, size_t size, + bool used_mempool, void *p) +{ + if (used_mempool) + mempool_free(p, &c->btree_bounce_pool); + else + vpfree(p, size); +} + +static void *btree_bounce_alloc(struct bch_fs *c, size_t size, + bool *used_mempool) +{ + unsigned flags = memalloc_nofs_save(); + void *p; + + BUG_ON(size > btree_bytes(c)); + + *used_mempool = false; + p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT); + if (!p) { + *used_mempool = true; + p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); + } + memalloc_nofs_restore(flags); + return p; +} + +static void sort_bkey_ptrs(const struct btree *bt, + struct bkey_packed **ptrs, unsigned nr) +{ + unsigned n = nr, a = nr / 2, b, c, d; + + if (!a) + return; + + /* Heap sort: see lib/sort.c: */ + while (1) { + if (a) + a--; + else if (--n) + swap(ptrs[0], ptrs[n]); + else + break; + + for (b = a; c = 2 * b + 1, (d = c + 1) < n;) + b = bch2_bkey_cmp_packed(bt, + ptrs[c], + ptrs[d]) >= 0 ? c : d; + if (d == n) + b = c; + + while (b != a && + bch2_bkey_cmp_packed(bt, + ptrs[a], + ptrs[b]) >= 0) + b = (b - 1) / 2; + c = b; + while (b != a) { + b = (b - 1) / 2; + swap(ptrs[b], ptrs[c]); + } + } +} + +static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) +{ + struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k; + bool used_mempool = false; + size_t bytes = b->whiteout_u64s * sizeof(u64); + + if (!b->whiteout_u64s) + return; + + new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool); + + ptrs = ptrs_end = ((void *) new_whiteouts + bytes); + + for (k = unwritten_whiteouts_start(c, b); + k != unwritten_whiteouts_end(c, b); + k = bkey_p_next(k)) + *--ptrs = k; + + sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs); + + k = new_whiteouts; + + while (ptrs != ptrs_end) { + bkey_p_copy(k, *ptrs); + k = bkey_p_next(k); + ptrs++; + } + + verify_no_dups(b, new_whiteouts, + (void *) ((u64 *) new_whiteouts + b->whiteout_u64s)); + + memcpy_u64s(unwritten_whiteouts_start(c, b), + new_whiteouts, b->whiteout_u64s); + + btree_bounce_free(c, bytes, used_mempool, new_whiteouts); +} + +static bool should_compact_bset(struct btree *b, struct bset_tree *t, + bool compacting, enum compact_mode mode) +{ + if (!bset_dead_u64s(b, t)) + return false; + + switch (mode) { + case COMPACT_LAZY: + return should_compact_bset_lazy(b, t) || + (compacting && !bset_written(b, bset(b, t))); + case COMPACT_ALL: + return true; + default: + BUG(); + } +} + +static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) +{ + struct bset_tree *t; + bool ret = false; + + for_each_bset(b, t) { + struct bset *i = bset(b, t); + struct bkey_packed *k, *n, *out, *start, *end; + struct btree_node_entry *src = NULL, *dst = NULL; + + if (t != b->set && !bset_written(b, i)) { + src = container_of(i, struct btree_node_entry, keys); + dst = max(write_block(b), + (void *) btree_bkey_last(b, t - 1)); + } + + if (src != dst) + ret = true; + + if (!should_compact_bset(b, t, ret, mode)) { + if (src != dst) { + memmove(dst, src, sizeof(*src) + + le16_to_cpu(src->keys.u64s) * + sizeof(u64)); + i = &dst->keys; + set_btree_bset(b, t, i); + } + continue; + } + + start = btree_bkey_first(b, t); + end = btree_bkey_last(b, t); + + if (src != dst) { + memmove(dst, src, sizeof(*src)); + i = &dst->keys; + set_btree_bset(b, t, i); + } + + out = i->start; + + for (k = start; k != end; k = n) { + n = bkey_p_next(k); + + if (!bkey_deleted(k)) { + bkey_p_copy(out, k); + out = bkey_p_next(out); + } else { + BUG_ON(k->needs_whiteout); + } + } + + i->u64s = cpu_to_le16((u64 *) out - i->_data); + set_btree_bset_end(b, t); + bch2_bset_set_no_aux_tree(b, t); + ret = true; + } + + bch2_verify_btree_nr_keys(b); + + bch2_btree_build_aux_trees(b); + + return ret; +} + +bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b, + enum compact_mode mode) +{ + return bch2_drop_whiteouts(b, mode); +} + +static void btree_node_sort(struct bch_fs *c, struct btree *b, + unsigned start_idx, + unsigned end_idx, + bool filter_whiteouts) +{ + struct btree_node *out; + struct sort_iter_stack sort_iter; + struct bset_tree *t; + struct bset *start_bset = bset(b, &b->set[start_idx]); + bool used_mempool = false; + u64 start_time, seq = 0; + unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1; + bool sorting_entire_node = start_idx == 0 && + end_idx == b->nsets; + + sort_iter_stack_init(&sort_iter, b); + + for (t = b->set + start_idx; + t < b->set + end_idx; + t++) { + u64s += le16_to_cpu(bset(b, t)->u64s); + sort_iter_add(&sort_iter.iter, + btree_bkey_first(b, t), + btree_bkey_last(b, t)); + } + + bytes = sorting_entire_node + ? btree_bytes(c) + : __vstruct_bytes(struct btree_node, u64s); + + out = btree_bounce_alloc(c, bytes, &used_mempool); + + start_time = local_clock(); + + u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts); + + out->keys.u64s = cpu_to_le16(u64s); + + BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes); + + if (sorting_entire_node) + bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], + start_time); + + /* Make sure we preserve bset journal_seq: */ + for (t = b->set + start_idx; t < b->set + end_idx; t++) + seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq)); + start_bset->journal_seq = cpu_to_le64(seq); + + if (sorting_entire_node) { + u64s = le16_to_cpu(out->keys.u64s); + + BUG_ON(bytes != btree_bytes(c)); + + /* + * Our temporary buffer is the same size as the btree node's + * buffer, we can just swap buffers instead of doing a big + * memcpy() + */ + *out = *b->data; + out->keys.u64s = cpu_to_le16(u64s); + swap(out, b->data); + set_btree_bset(b, b->set, &b->data->keys); + } else { + start_bset->u64s = out->keys.u64s; + memcpy_u64s(start_bset->start, + out->keys.start, + le16_to_cpu(out->keys.u64s)); + } + + for (i = start_idx + 1; i < end_idx; i++) + b->nr.bset_u64s[start_idx] += + b->nr.bset_u64s[i]; + + b->nsets -= shift; + + for (i = start_idx + 1; i < b->nsets; i++) { + b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift]; + b->set[i] = b->set[i + shift]; + } + + for (i = b->nsets; i < MAX_BSETS; i++) + b->nr.bset_u64s[i] = 0; + + set_btree_bset_end(b, &b->set[start_idx]); + bch2_bset_set_no_aux_tree(b, &b->set[start_idx]); + + btree_bounce_free(c, bytes, used_mempool, out); + + bch2_verify_btree_nr_keys(b); +} + +void bch2_btree_sort_into(struct bch_fs *c, + struct btree *dst, + struct btree *src) +{ + struct btree_nr_keys nr; + struct btree_node_iter src_iter; + u64 start_time = local_clock(); + + BUG_ON(dst->nsets != 1); + + bch2_bset_set_no_aux_tree(dst, dst->set); + + bch2_btree_node_iter_init_from_start(&src_iter, src); + + nr = bch2_sort_repack(btree_bset_first(dst), + src, &src_iter, + &dst->format, + true); + + bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], + start_time); + + set_btree_bset_end(dst, dst->set); + + dst->nr.live_u64s += nr.live_u64s; + dst->nr.bset_u64s[0] += nr.bset_u64s[0]; + dst->nr.packed_keys += nr.packed_keys; + dst->nr.unpacked_keys += nr.unpacked_keys; + + bch2_verify_btree_nr_keys(dst); +} + +/* + * We're about to add another bset to the btree node, so if there's currently + * too many bsets - sort some of them together: + */ +static bool btree_node_compact(struct bch_fs *c, struct btree *b) +{ + unsigned unwritten_idx; + bool ret = false; + + for (unwritten_idx = 0; + unwritten_idx < b->nsets; + unwritten_idx++) + if (!bset_written(b, bset(b, &b->set[unwritten_idx]))) + break; + + if (b->nsets - unwritten_idx > 1) { + btree_node_sort(c, b, unwritten_idx, + b->nsets, false); + ret = true; + } + + if (unwritten_idx > 1) { + btree_node_sort(c, b, 0, unwritten_idx, false); + ret = true; + } + + return ret; +} + +void bch2_btree_build_aux_trees(struct btree *b) +{ + struct bset_tree *t; + + for_each_bset(b, t) + bch2_bset_build_aux_tree(b, t, + !bset_written(b, bset(b, t)) && + t == bset_tree_last(b)); +} + +/* + * If we have MAX_BSETS (3) bsets, should we sort them all down to just one? + * + * The first bset is going to be of similar order to the size of the node, the + * last bset is bounded by btree_write_set_buffer(), which is set to keep the + * memmove on insert from being too expensive: the middle bset should, ideally, + * be the geometric mean of the first and the last. + * + * Returns true if the middle bset is greater than that geometric mean: + */ +static inline bool should_compact_all(struct bch_fs *c, struct btree *b) +{ + unsigned mid_u64s_bits = + (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2; + + return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits; +} + +/* + * @bch_btree_init_next - initialize a new (unwritten) bset that can then be + * inserted into + * + * Safe to call if there already is an unwritten bset - will only add a new bset + * if @b doesn't already have one. + * + * Returns true if we sorted (i.e. invalidated iterators + */ +void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) +{ + struct bch_fs *c = trans->c; + struct btree_node_entry *bne; + bool reinit_iter = false; + + EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]); + BUG_ON(bset_written(b, bset(b, &b->set[1]))); + BUG_ON(btree_node_just_written(b)); + + if (b->nsets == MAX_BSETS && + !btree_node_write_in_flight(b) && + should_compact_all(c, b)) { + bch2_btree_node_write(c, b, SIX_LOCK_write, + BTREE_WRITE_init_next_bset); + reinit_iter = true; + } + + if (b->nsets == MAX_BSETS && + btree_node_compact(c, b)) + reinit_iter = true; + + BUG_ON(b->nsets >= MAX_BSETS); + + bne = want_new_bset(c, b); + if (bne) + bch2_bset_init_next(c, b, bne); + + bch2_btree_build_aux_trees(b); + + if (reinit_iter) + bch2_trans_node_reinit_iter(trans, b); +} + +static void btree_err_msg(struct printbuf *out, struct bch_fs *c, + struct bch_dev *ca, + struct btree *b, struct bset *i, + unsigned offset, int write) +{ + prt_printf(out, bch2_log_msg(c, "%s"), + write == READ + ? "error validating btree node " + : "corrupt btree node before write "); + if (ca) + prt_printf(out, "on %s ", ca->name); + prt_printf(out, "at btree "); + bch2_btree_pos_to_text(out, c, b); + + prt_printf(out, "\n node offset %u", b->written); + if (i) + prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s)); + prt_str(out, ": "); +} + +__printf(9, 10) +static int __btree_err(int ret, + struct bch_fs *c, + struct bch_dev *ca, + struct btree *b, + struct bset *i, + int write, + bool have_retry, + enum bch_sb_error_id err_type, + const char *fmt, ...) +{ + struct printbuf out = PRINTBUF; + va_list args; + + btree_err_msg(&out, c, ca, b, i, b->written, write); + + va_start(args, fmt); + prt_vprintf(&out, fmt, args); + va_end(args); + + if (write == WRITE) { + bch2_print_string_as_lines(KERN_ERR, out.buf); + ret = c->opts.errors == BCH_ON_ERROR_continue + ? 0 + : -BCH_ERR_fsck_errors_not_fixed; + goto out; + } + + if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry) + ret = -BCH_ERR_btree_node_read_err_fixable; + if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry) + ret = -BCH_ERR_btree_node_read_err_bad_node; + + if (ret != -BCH_ERR_btree_node_read_err_fixable) + bch2_sb_error_count(c, err_type); + + switch (ret) { + case -BCH_ERR_btree_node_read_err_fixable: + ret = bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf); + if (ret != -BCH_ERR_fsck_fix && + ret != -BCH_ERR_fsck_ignore) + goto fsck_err; + ret = -BCH_ERR_fsck_fix; + break; + case -BCH_ERR_btree_node_read_err_want_retry: + case -BCH_ERR_btree_node_read_err_must_retry: + bch2_print_string_as_lines(KERN_ERR, out.buf); + break; + case -BCH_ERR_btree_node_read_err_bad_node: + bch2_print_string_as_lines(KERN_ERR, out.buf); + bch2_topology_error(c); + ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology) ?: -EIO; + break; + case -BCH_ERR_btree_node_read_err_incompatible: + bch2_print_string_as_lines(KERN_ERR, out.buf); + ret = -BCH_ERR_fsck_errors_not_fixed; + break; + default: + BUG(); + } +out: +fsck_err: + printbuf_exit(&out); + return ret; +} + +#define btree_err(type, c, ca, b, i, _err_type, msg, ...) \ +({ \ + int _ret = __btree_err(type, c, ca, b, i, write, have_retry, \ + BCH_FSCK_ERR_##_err_type, \ + msg, ##__VA_ARGS__); \ + \ + if (_ret != -BCH_ERR_fsck_fix) { \ + ret = _ret; \ + goto fsck_err; \ + } \ + \ + *saw_error = true; \ +}) + +#define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false) + +/* + * When btree topology repair changes the start or end of a node, that might + * mean we have to drop keys that are no longer inside the node: + */ +__cold +void bch2_btree_node_drop_keys_outside_node(struct btree *b) +{ + struct bset_tree *t; + + for_each_bset(b, t) { + struct bset *i = bset(b, t); + struct bkey_packed *k; + + for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) + if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0) + break; + + if (k != i->start) { + unsigned shift = (u64 *) k - (u64 *) i->start; + + memmove_u64s_down(i->start, k, + (u64 *) vstruct_end(i) - (u64 *) k); + i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift); + set_btree_bset_end(b, t); + } + + for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) + if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0) + break; + + if (k != vstruct_last(i)) { + i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start); + set_btree_bset_end(b, t); + } + } + + /* + * Always rebuild search trees: eytzinger search tree nodes directly + * depend on the values of min/max key: + */ + bch2_bset_set_no_aux_tree(b, b->set); + bch2_btree_build_aux_trees(b); + + struct bkey_s_c k; + struct bkey unpacked; + struct btree_node_iter iter; + for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { + BUG_ON(bpos_lt(k.k->p, b->data->min_key)); + BUG_ON(bpos_gt(k.k->p, b->data->max_key)); + } +} + +static int validate_bset(struct bch_fs *c, struct bch_dev *ca, + struct btree *b, struct bset *i, + unsigned offset, unsigned sectors, + int write, bool have_retry, bool *saw_error) +{ + unsigned version = le16_to_cpu(i->version); + struct printbuf buf1 = PRINTBUF; + struct printbuf buf2 = PRINTBUF; + int ret = 0; + + btree_err_on(!bch2_version_compatible(version), + -BCH_ERR_btree_node_read_err_incompatible, + c, ca, b, i, + btree_node_unsupported_version, + "unsupported bset version %u.%u", + BCH_VERSION_MAJOR(version), + BCH_VERSION_MINOR(version)); + + if (btree_err_on(version < c->sb.version_min, + -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bset_older_than_sb_min, + "bset version %u older than superblock version_min %u", + version, c->sb.version_min)) { + mutex_lock(&c->sb_lock); + c->disk_sb.sb->version_min = cpu_to_le16(version); + bch2_write_super(c); + mutex_unlock(&c->sb_lock); + } + + if (btree_err_on(BCH_VERSION_MAJOR(version) > + BCH_VERSION_MAJOR(c->sb.version), + -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bset_newer_than_sb, + "bset version %u newer than superblock version %u", + version, c->sb.version)) { + mutex_lock(&c->sb_lock); + c->disk_sb.sb->version = cpu_to_le16(version); + bch2_write_super(c); + mutex_unlock(&c->sb_lock); + } + + btree_err_on(BSET_SEPARATE_WHITEOUTS(i), + -BCH_ERR_btree_node_read_err_incompatible, + c, ca, b, i, + btree_node_unsupported_version, + "BSET_SEPARATE_WHITEOUTS no longer supported"); + + if (btree_err_on(offset + sectors > btree_sectors(c), + -BCH_ERR_btree_node_read_err_fixable, + c, ca, b, i, + bset_past_end_of_btree_node, + "bset past end of btree node")) { + i->u64s = 0; + ret = 0; + goto out; + } + + btree_err_on(offset && !i->u64s, + -BCH_ERR_btree_node_read_err_fixable, + c, ca, b, i, + bset_empty, + "empty bset"); + + btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, + bset_wrong_sector_offset, + "bset at wrong sector offset"); + + if (!offset) { + struct btree_node *bn = + container_of(i, struct btree_node, keys); + /* These indicate that we read the wrong btree node: */ + + if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { + struct bch_btree_ptr_v2 *bp = + &bkey_i_to_btree_ptr_v2(&b->key)->v; + + /* XXX endianness */ + btree_err_on(bp->seq != bn->keys.seq, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + bset_bad_seq, + "incorrect sequence number (wrong btree node)"); + } + + btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, i, + btree_node_bad_btree, + "incorrect btree id"); + + btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, i, + btree_node_bad_level, + "incorrect level"); + + if (!write) + compat_btree_node(b->c.level, b->c.btree_id, version, + BSET_BIG_ENDIAN(i), write, bn); + + if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { + struct bch_btree_ptr_v2 *bp = + &bkey_i_to_btree_ptr_v2(&b->key)->v; + + if (BTREE_PTR_RANGE_UPDATED(bp)) { + b->data->min_key = bp->min_key; + b->data->max_key = b->key.k.p; + } + + btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + btree_node_bad_min_key, + "incorrect min_key: got %s should be %s", + (printbuf_reset(&buf1), + bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf), + (printbuf_reset(&buf2), + bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf)); + } + + btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, i, + btree_node_bad_max_key, + "incorrect max key %s", + (printbuf_reset(&buf1), + bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf)); + + if (write) + compat_btree_node(b->c.level, b->c.btree_id, version, + BSET_BIG_ENDIAN(i), write, bn); + + btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1), + -BCH_ERR_btree_node_read_err_bad_node, + c, ca, b, i, + btree_node_bad_format, + "invalid bkey format: %s\n %s", buf1.buf, + (printbuf_reset(&buf2), + bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf)); + printbuf_reset(&buf1); + + compat_bformat(b->c.level, b->c.btree_id, version, + BSET_BIG_ENDIAN(i), write, + &bn->format); + } +out: +fsck_err: + printbuf_exit(&buf2); + printbuf_exit(&buf1); + return ret; +} + +static int bset_key_invalid(struct bch_fs *c, struct btree *b, + struct bkey_s_c k, + bool updated_range, int rw, + struct printbuf *err) +{ + return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?: + (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?: + (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0); +} + +static int validate_bset_keys(struct bch_fs *c, struct btree *b, + struct bset *i, int write, + bool have_retry, bool *saw_error) +{ + unsigned version = le16_to_cpu(i->version); + struct bkey_packed *k, *prev = NULL; + struct printbuf buf = PRINTBUF; + bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && + BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); + int ret = 0; + + for (k = i->start; + k != vstruct_last(i);) { + struct bkey_s u; + struct bkey tmp; + + if (btree_err_on(bkey_p_next(k) > vstruct_last(i), + -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bkey_past_bset_end, + "key extends past end of bset")) { + i->u64s = cpu_to_le16((u64 *) k - i->_data); + break; + } + + if (btree_err_on(k->format > KEY_FORMAT_CURRENT, + -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bkey_bad_format, + "invalid bkey format %u", k->format)) { + i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); + memmove_u64s_down(k, bkey_p_next(k), + (u64 *) vstruct_end(i) - (u64 *) k); + continue; + } + + /* XXX: validate k->u64s */ + if (!write) + bch2_bkey_compat(b->c.level, b->c.btree_id, version, + BSET_BIG_ENDIAN(i), write, + &b->format, k); + + u = __bkey_disassemble(b, k, &tmp); + + printbuf_reset(&buf); + if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) { + printbuf_reset(&buf); + bset_key_invalid(c, b, u.s_c, updated_range, write, &buf); + prt_printf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, u.s_c); + + btree_err(-BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bad_bkey, + "invalid bkey: %s", buf.buf); + + i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); + memmove_u64s_down(k, bkey_p_next(k), + (u64 *) vstruct_end(i) - (u64 *) k); + continue; + } + + if (write) + bch2_bkey_compat(b->c.level, b->c.btree_id, version, + BSET_BIG_ENDIAN(i), write, + &b->format, k); + + if (prev && bkey_iter_cmp(b, prev, k) > 0) { + struct bkey up = bkey_unpack_key(b, prev); + + printbuf_reset(&buf); + prt_printf(&buf, "keys out of order: "); + bch2_bkey_to_text(&buf, &up); + prt_printf(&buf, " > "); + bch2_bkey_to_text(&buf, u.k); + + bch2_dump_bset(c, b, i, 0); + + if (btree_err(-BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bkey_out_of_order, + "%s", buf.buf)) { + i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); + memmove_u64s_down(k, bkey_p_next(k), + (u64 *) vstruct_end(i) - (u64 *) k); + continue; + } + } + + prev = k; + k = bkey_p_next(k); + } +fsck_err: + printbuf_exit(&buf); + return ret; +} + +int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, + struct btree *b, bool have_retry, bool *saw_error) +{ + struct btree_node_entry *bne; + struct sort_iter *iter; + struct btree_node *sorted; + struct bkey_packed *k; + struct bch_extent_ptr *ptr; + struct bset *i; + bool used_mempool, blacklisted; + bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && + BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); + unsigned u64s; + unsigned ptr_written = btree_ptr_sectors_written(&b->key); + struct printbuf buf = PRINTBUF; + int ret = 0, retry_read = 0, write = READ; + + b->version_ondisk = U16_MAX; + /* We might get called multiple times on read retry: */ + b->written = 0; + + iter = mempool_alloc(&c->fill_iter, GFP_NOFS); + sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2); + + if (bch2_meta_read_fault("btree")) + btree_err(-BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + btree_node_fault_injected, + "dynamic fault"); + + btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c), + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + btree_node_bad_magic, + "bad magic: want %llx, got %llx", + bset_magic(c), le64_to_cpu(b->data->magic)); + + if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { + struct bch_btree_ptr_v2 *bp = + &bkey_i_to_btree_ptr_v2(&b->key)->v; + + btree_err_on(b->data->keys.seq != bp->seq, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + btree_node_bad_seq, + "got wrong btree node (seq %llx want %llx)", + b->data->keys.seq, bp->seq); + } else { + btree_err_on(!b->data->keys.seq, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + btree_node_bad_seq, + "bad btree header: seq 0"); + } + + while (b->written < (ptr_written ?: btree_sectors(c))) { + unsigned sectors; + struct nonce nonce; + bool first = !b->written; + bool csum_bad; + + if (!b->written) { + i = &b->data->keys; + + btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, + bset_unknown_csum, + "unknown checksum type %llu", BSET_CSUM_TYPE(i)); + + nonce = btree_nonce(i, b->written << 9); + + csum_bad = bch2_crc_cmp(b->data->csum, + csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data)); + if (csum_bad) + bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + + btree_err_on(csum_bad, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, + bset_bad_csum, + "invalid checksum"); + + ret = bset_encrypt(c, i, b->written << 9); + if (bch2_fs_fatal_err_on(ret, c, + "error decrypting btree node: %i", ret)) + goto fsck_err; + + btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && + !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), + -BCH_ERR_btree_node_read_err_incompatible, + c, NULL, b, NULL, + btree_node_unsupported_version, + "btree node does not have NEW_EXTENT_OVERWRITE set"); + + sectors = vstruct_sectors(b->data, c->block_bits); + } else { + bne = write_block(b); + i = &bne->keys; + + if (i->seq != b->data->keys.seq) + break; + + btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, + bset_unknown_csum, + "unknown checksum type %llu", BSET_CSUM_TYPE(i)); + + nonce = btree_nonce(i, b->written << 9); + csum_bad = bch2_crc_cmp(bne->csum, + csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne)); + if (csum_bad) + bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + + btree_err_on(csum_bad, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, + bset_bad_csum, + "invalid checksum"); + + ret = bset_encrypt(c, i, b->written << 9); + if (bch2_fs_fatal_err_on(ret, c, + "error decrypting btree node: %i\n", ret)) + goto fsck_err; + + sectors = vstruct_sectors(bne, c->block_bits); + } + + b->version_ondisk = min(b->version_ondisk, + le16_to_cpu(i->version)); + + ret = validate_bset(c, ca, b, i, b->written, sectors, + READ, have_retry, saw_error); + if (ret) + goto fsck_err; + + if (!b->written) + btree_node_set_format(b, b->data->format); + + ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error); + if (ret) + goto fsck_err; + + SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN); + + blacklisted = bch2_journal_seq_is_blacklisted(c, + le64_to_cpu(i->journal_seq), + true); + + btree_err_on(blacklisted && first, + -BCH_ERR_btree_node_read_err_fixable, + c, ca, b, i, + bset_blacklisted_journal_seq, + "first btree node bset has blacklisted journal seq (%llu)", + le64_to_cpu(i->journal_seq)); + + btree_err_on(blacklisted && ptr_written, + -BCH_ERR_btree_node_read_err_fixable, + c, ca, b, i, + first_bset_blacklisted_journal_seq, + "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u", + le64_to_cpu(i->journal_seq), + b->written, b->written + sectors, ptr_written); + + b->written += sectors; + + if (blacklisted && !first) + continue; + + sort_iter_add(iter, + vstruct_idx(i, 0), + vstruct_last(i)); + } + + if (ptr_written) { + btree_err_on(b->written < ptr_written, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, NULL, + btree_node_data_missing, + "btree node data missing: expected %u sectors, found %u", + ptr_written, b->written); + } else { + for (bne = write_block(b); + bset_byte_offset(b, bne) < btree_bytes(c); + bne = (void *) bne + block_bytes(c)) + btree_err_on(bne->keys.seq == b->data->keys.seq && + !bch2_journal_seq_is_blacklisted(c, + le64_to_cpu(bne->keys.journal_seq), + true), + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, NULL, + btree_node_bset_after_end, + "found bset signature after last bset"); + } + + sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool); + sorted->keys.u64s = 0; + + set_btree_bset(b, b->set, &b->data->keys); + + b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter); + + u64s = le16_to_cpu(sorted->keys.u64s); + *sorted = *b->data; + sorted->keys.u64s = cpu_to_le16(u64s); + swap(sorted, b->data); + set_btree_bset(b, b->set, &b->data->keys); + b->nsets = 1; + + BUG_ON(b->nr.live_u64s != u64s); + + btree_bounce_free(c, btree_bytes(c), used_mempool, sorted); + + if (updated_range) + bch2_btree_node_drop_keys_outside_node(b); + + i = &b->data->keys; + for (k = i->start; k != vstruct_last(i);) { + struct bkey tmp; + struct bkey_s u = __bkey_disassemble(b, k, &tmp); + + printbuf_reset(&buf); + + if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) || + (bch2_inject_i |