diff options
Diffstat (limited to 'fs/bcachefs/btree_update_interior.c')
-rw-r--r-- | fs/bcachefs/btree_update_interior.c | 2444 |
1 files changed, 2444 insertions, 0 deletions
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c new file mode 100644 index 000000000000..76f27bc9fa24 --- /dev/null +++ b/fs/bcachefs/btree_update_interior.c @@ -0,0 +1,2444 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "bcachefs.h" +#include "alloc_foreground.h" +#include "bkey_methods.h" +#include "btree_cache.h" +#include "btree_gc.h" +#include "btree_journal_iter.h" +#include "btree_update.h" +#include "btree_update_interior.h" +#include "btree_io.h" +#include "btree_iter.h" +#include "btree_locking.h" +#include "buckets.h" +#include "clock.h" +#include "error.h" +#include "extents.h" +#include "journal.h" +#include "journal_reclaim.h" +#include "keylist.h" +#include "replicas.h" +#include "super-io.h" +#include "trace.h" + +#include <linux/random.h> + +static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *, + struct btree_path *, struct btree *, + struct keylist *, unsigned); +static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *); + +static struct btree_path *get_unlocked_mut_path(struct btree_trans *trans, + enum btree_id btree_id, + unsigned level, + struct bpos pos) +{ + struct btree_path *path; + + path = bch2_path_get(trans, btree_id, pos, level + 1, level, + BTREE_ITER_NOPRESERVE| + BTREE_ITER_INTENT, _RET_IP_); + path = bch2_btree_path_make_mut(trans, path, true, _RET_IP_); + bch2_btree_path_downgrade(trans, path); + __bch2_btree_path_unlock(trans, path); + return path; +} + +/* Debug code: */ + +/* + * Verify that child nodes correctly span parent node's range: + */ +static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) +{ +#ifdef CONFIG_BCACHEFS_DEBUG + struct bpos next_node = b->data->min_key; + struct btree_node_iter iter; + struct bkey_s_c k; + struct bkey_s_c_btree_ptr_v2 bp; + struct bkey unpacked; + struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; + + BUG_ON(!b->c.level); + + if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) + return; + + bch2_btree_node_iter_init_from_start(&iter, b); + + while (1) { + k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked); + if (k.k->type != KEY_TYPE_btree_ptr_v2) + break; + bp = bkey_s_c_to_btree_ptr_v2(k); + + if (!bpos_eq(next_node, bp.v->min_key)) { + bch2_dump_btree_node(c, b); + bch2_bpos_to_text(&buf1, next_node); + bch2_bpos_to_text(&buf2, bp.v->min_key); + panic("expected next min_key %s got %s\n", buf1.buf, buf2.buf); + } + + bch2_btree_node_iter_advance(&iter, b); + + if (bch2_btree_node_iter_end(&iter)) { + if (!bpos_eq(k.k->p, b->key.k.p)) { + bch2_dump_btree_node(c, b); + bch2_bpos_to_text(&buf1, b->key.k.p); + bch2_bpos_to_text(&buf2, k.k->p); + panic("expected end %s got %s\n", buf1.buf, buf2.buf); + } + break; + } + + next_node = bpos_successor(k.k->p); + } +#endif +} + +/* Calculate ideal packed bkey format for new btree nodes: */ + +void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b) +{ + struct bkey_packed *k; + struct bset_tree *t; + struct bkey uk; + + for_each_bset(b, t) + bset_tree_for_each_key(b, t, k) + if (!bkey_deleted(k)) { + uk = bkey_unpack_key(b, k); + bch2_bkey_format_add_key(s, &uk); + } +} + +static struct bkey_format bch2_btree_calc_format(struct btree *b) +{ + struct bkey_format_state s; + + bch2_bkey_format_init(&s); + bch2_bkey_format_add_pos(&s, b->data->min_key); + bch2_bkey_format_add_pos(&s, b->data->max_key); + __bch2_btree_calc_format(&s, b); + + return bch2_bkey_format_done(&s); +} + +static size_t btree_node_u64s_with_format(struct btree *b, + struct bkey_format *new_f) +{ + struct bkey_format *old_f = &b->format; + + /* stupid integer promotion rules */ + ssize_t delta = + (((int) new_f->key_u64s - old_f->key_u64s) * + (int) b->nr.packed_keys) + + (((int) new_f->key_u64s - BKEY_U64s) * + (int) b->nr.unpacked_keys); + + BUG_ON(delta + b->nr.live_u64s < 0); + + return b->nr.live_u64s + delta; +} + +/** + * bch2_btree_node_format_fits - check if we could rewrite node with a new format + * + * @c: filesystem handle + * @b: btree node to rewrite + * @new_f: bkey format to translate keys to + * + * Returns: true if all re-packed keys will be able to fit in a new node. + * + * Assumes all keys will successfully pack with the new format. + */ +bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, + struct bkey_format *new_f) +{ + size_t u64s = btree_node_u64s_with_format(b, new_f); + + return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c); +} + +/* Btree node freeing/allocation: */ + +static void __btree_node_free(struct bch_fs *c, struct btree *b) +{ + trace_and_count(c, btree_node_free, c, b); + + BUG_ON(btree_node_write_blocked(b)); + BUG_ON(btree_node_dirty(b)); + BUG_ON(btree_node_need_write(b)); + BUG_ON(b == btree_node_root(c, b)); + BUG_ON(b->ob.nr); + BUG_ON(!list_empty(&b->write_blocked)); + BUG_ON(b->will_make_reachable); + + clear_btree_node_noevict(b); + + mutex_lock(&c->btree_cache.lock); + list_move(&b->list, &c->btree_cache.freeable); + mutex_unlock(&c->btree_cache.lock); +} + +static void bch2_btree_node_free_inmem(struct btree_trans *trans, + struct btree_path *path, + struct btree *b) +{ + struct bch_fs *c = trans->c; + unsigned level = b->c.level; + + bch2_btree_node_lock_write_nofail(trans, path, &b->c); + bch2_btree_node_hash_remove(&c->btree_cache, b); + __btree_node_free(c, b); + six_unlock_write(&b->c.lock); + mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); + + trans_for_each_path(trans, path) + if (path->l[level].b == b) { + btree_node_unlock(trans, path, level); + path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init); + } +} + +static void bch2_btree_node_free_never_used(struct btree_update *as, + struct btree_trans *trans, + struct btree *b) +{ + struct bch_fs *c = as->c; + struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL]; + struct btree_path *path; + unsigned level = b->c.level; + + BUG_ON(!list_empty(&b->write_blocked)); + BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as)); + + b->will_make_reachable = 0; + closure_put(&as->cl); + + clear_btree_node_will_make_reachable(b); + clear_btree_node_accessed(b); + clear_btree_node_dirty_acct(c, b); + clear_btree_node_need_write(b); + + mutex_lock(&c->btree_cache.lock); + list_del_init(&b->list); + bch2_btree_node_hash_remove(&c->btree_cache, b); + mutex_unlock(&c->btree_cache.lock); + + BUG_ON(p->nr >= ARRAY_SIZE(p->b)); + p->b[p->nr++] = b; + + six_unlock_intent(&b->c.lock); + + trans_for_each_path(trans, path) + if (path->l[level].b == b) { + btree_node_unlock(trans, path, level); + path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init); + } +} + +static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, + struct disk_reservation *res, + struct closure *cl, + bool interior_node, + unsigned flags) +{ + struct bch_fs *c = trans->c; + struct write_point *wp; + struct btree *b; + BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; + struct open_buckets obs = { .nr = 0 }; + struct bch_devs_list devs_have = (struct bch_devs_list) { 0 }; + enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; + unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim + ? BTREE_NODE_RESERVE + : 0; + int ret; + + mutex_lock(&c->btree_reserve_cache_lock); + if (c->btree_reserve_cache_nr > nr_reserve) { + struct btree_alloc *a = + &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; + + obs = a->ob; + bkey_copy(&tmp.k, &a->k); + mutex_unlock(&c->btree_reserve_cache_lock); + goto mem_alloc; + } + mutex_unlock(&c->btree_reserve_cache_lock); + +retry: + ret = bch2_alloc_sectors_start_trans(trans, + c->opts.metadata_target ?: + c->opts.foreground_target, + 0, + writepoint_ptr(&c->btree_write_point), + &devs_have, + res->nr_replicas, + c->opts.metadata_replicas_required, + watermark, 0, cl, &wp); + if (unlikely(ret)) + return ERR_PTR(ret); + + if (wp->sectors_free < btree_sectors(c)) { + struct open_bucket *ob; + unsigned i; + + open_bucket_for_each(c, &wp->ptrs, ob, i) + if (ob->sectors_free < btree_sectors(c)) + ob->sectors_free = 0; + + bch2_alloc_sectors_done(c, wp); + goto retry; + } + + bkey_btree_ptr_v2_init(&tmp.k); + bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false); + + bch2_open_bucket_get(c, wp, &obs); + bch2_alloc_sectors_done(c, wp); +mem_alloc: + b = bch2_btree_node_mem_alloc(trans, interior_node); + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); + + /* we hold cannibalize_lock: */ + BUG_ON(IS_ERR(b)); + BUG_ON(b->ob.nr); + + bkey_copy(&b->key, &tmp.k); + b->ob = obs; + + return b; +} + +static struct btree *bch2_btree_node_alloc(struct btree_update *as, + struct btree_trans *trans, + unsigned level) +{ + struct bch_fs *c = as->c; + struct btree *b; + struct prealloc_nodes *p = &as->prealloc_nodes[!!level]; + int ret; + + BUG_ON(level >= BTREE_MAX_DEPTH); + BUG_ON(!p->nr); + + b = p->b[--p->nr]; + + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); + + set_btree_node_accessed(b); + set_btree_node_dirty_acct(c, b); + set_btree_node_need_write(b); + + bch2_bset_init_first(b, &b->data->keys); + b->c.level = level; + b->c.btree_id = as->btree_id; + b->version_ondisk = c->sb.version; + + memset(&b->nr, 0, sizeof(b->nr)); + b->data->magic = cpu_to_le64(bset_magic(c)); + memset(&b->data->_ptr, 0, sizeof(b->data->_ptr)); + b->data->flags = 0; + SET_BTREE_NODE_ID(b->data, as->btree_id); + SET_BTREE_NODE_LEVEL(b->data, level); + + if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { + struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(&b->key); + + bp->v.mem_ptr = 0; + bp->v.seq = b->data->keys.seq; + bp->v.sectors_written = 0; + } + + SET_BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data, true); + + bch2_btree_build_aux_trees(b); + + ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id); + BUG_ON(ret); + + trace_and_count(c, btree_node_alloc, c, b); + bch2_increment_clock(c, btree_sectors(c), WRITE); + return b; +} + +static void btree_set_min(struct btree *b, struct bpos pos) +{ + if (b->key.k.type == KEY_TYPE_btree_ptr_v2) + bkey_i_to_btree_ptr_v2(&b->key)->v.min_key = pos; + b->data->min_key = pos; +} + +static void btree_set_max(struct btree *b, struct bpos pos) +{ + b->key.k.p = pos; + b->data->max_key = pos; +} + +static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as, + struct btree_trans *trans, + struct btree *b) +{ + struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level); + struct bkey_format format = bch2_btree_calc_format(b); + + /* + * The keys might expand with the new format - if they wouldn't fit in + * the btree node anymore, use the old format for now: + */ + if (!bch2_btree_node_format_fits(as->c, b, &format)) + format = b->format; + + SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1); + + btree_set_min(n, b->data->min_key); + btree_set_max(n, b->data->max_key); + + n->data->format = format; + btree_node_set_format(n, format); + + bch2_btree_sort_into(as->c, n, b); + + btree_node_reset_sib_u64s(n); + return n; +} + +static struct btree *__btree_root_alloc(struct btree_update *as, + struct btree_trans *trans, unsigned level) +{ + struct btree *b = bch2_btree_node_alloc(as, trans, level); + + btree_set_min(b, POS_MIN); + btree_set_max(b, SPOS_MAX); + b->data->format = bch2_btree_calc_format(b); + + btree_node_set_format(b, b->data->format); + bch2_btree_build_aux_trees(b); + + return b; +} + +static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans *trans) +{ + struct bch_fs *c = as->c; + struct prealloc_nodes *p; + + for (p = as->prealloc_nodes; + p < as->prealloc_nodes + ARRAY_SIZE(as->prealloc_nodes); + p++) { + while (p->nr) { + struct btree *b = p->b[--p->nr]; + + mutex_lock(&c->btree_reserve_cache_lock); + + if (c->btree_reserve_cache_nr < + ARRAY_SIZE(c->btree_reserve_cache)) { + struct btree_alloc *a = + &c->btree_reserve_cache[c->btree_reserve_cache_nr++]; + + a->ob = b->ob; + b->ob.nr = 0; + bkey_copy(&a->k, &b->key); + } else { + bch2_open_buckets_put(c, &b->ob); + } + + mutex_unlock(&c->btree_reserve_cache_lock); + + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); + __btree_node_free(c, b); + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); + } + } +} + +static int bch2_btree_reserve_get(struct btree_trans *trans, + struct btree_update *as, + unsigned nr_nodes[2], + unsigned flags, + struct closure *cl) +{ + struct bch_fs *c = as->c; + struct btree *b; + unsigned interior; + int ret = 0; + + BUG_ON(nr_nodes[0] + nr_nodes[1] > BTREE_RESERVE_MAX); + + /* + * Protects reaping from the btree node cache and using the btree node + * open bucket reserve: + * + * BTREE_INSERT_NOWAIT only applies to btree node allocation, not + * blocking on this lock: + */ + ret = bch2_btree_cache_cannibalize_lock(c, cl); + if (ret) + return ret; + + for (interior = 0; interior < 2; interior++) { + struct prealloc_nodes *p = as->prealloc_nodes + interior; + + while (p->nr < nr_nodes[interior]) { + b = __bch2_btree_node_alloc(trans, &as->disk_res, + flags & BTREE_INSERT_NOWAIT ? NULL : cl, + interior, flags); + if (IS_ERR(b)) { + ret = PTR_ERR(b); + goto err; + } + + p->b[p->nr++] = b; + } + } +err: + bch2_btree_cache_cannibalize_unlock(c); + return ret; +} + +/* Asynchronous interior node update machinery */ + +static void bch2_btree_update_free(struct btree_update *as, struct btree_trans *trans) +{ + struct bch_fs *c = as->c; + + if (as->took_gc_lock) + up_read(&c->gc_lock); + as->took_gc_lock = false; + + bch2_journal_pin_drop(&c->journal, &as->journal); + bch2_journal_pin_flush(&c->journal, &as->journal); + bch2_disk_reservation_put(c, &as->disk_res); + bch2_btree_reserve_put(as, trans); + + bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_total], + as->start_time); + + mutex_lock(&c->btree_interior_update_lock); + list_del(&as->unwritten_list); + list_del(&as->list); + + closure_debug_destroy(&as->cl); + mempool_free(as, &c->btree_interior_update_pool); + + /* + * Have to do the wakeup with btree_interior_update_lock still held, + * since being on btree_interior_update_list is our ref on @c: + */ + closure_wake_up(&c->btree_interior_update_wait); + + mutex_unlock(&c->btree_interior_update_lock); +} + +static void btree_update_add_key(struct btree_update *as, + struct keylist *keys, struct btree *b) +{ + struct bkey_i *k = &b->key; + + BUG_ON(bch2_keylist_u64s(keys) + k->k.u64s > + ARRAY_SIZE(as->_old_keys)); + + bkey_copy(keys->top, k); + bkey_i_to_btree_ptr_v2(keys->top)->v.mem_ptr = b->c.level + 1; + + bch2_keylist_push(keys); +} + +/* + * The transactional part of an interior btree node update, where we journal the + * update we did to the interior node and update alloc info: + */ +static int btree_update_nodes_written_trans(struct btree_trans *trans, + struct btree_update *as) +{ + struct bkey_i *k; + int ret; + + ret = darray_make_room(&trans->extra_journal_entries, as->journal_u64s); + if (ret) + return ret; + + memcpy(&darray_top(trans->extra_journal_entries), + as->journal_entries, + as->journal_u64s * sizeof(u64)); + trans->extra_journal_entries.nr += as->journal_u64s; + + trans->journal_pin = &as->journal; + + for_each_keylist_key(&as->old_keys, k) { + unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr; + + ret = bch2_trans_mark_old(trans, as->btree_id, level, bkey_i_to_s_c(k), 0); + if (ret) + return ret; + } + + for_each_keylist_key(&as->new_keys, k) { + unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr; + + ret = bch2_trans_mark_new(trans, as->btree_id, level, k, 0); + if (ret) + return ret; + } + + return 0; +} + +static void btree_update_nodes_written(struct btree_update *as) +{ + struct bch_fs *c = as->c; + struct btree *b; + struct btree_trans *trans = bch2_trans_get(c); + u64 journal_seq = 0; + unsigned i; + int ret; + + /* + * If we're already in an error state, it might be because a btree node + * was never written, and we might be trying to free that same btree + * node here, but it won't have been marked as allocated and we'll see + * spurious disk usage inconsistencies in the transactional part below + * if we don't skip it: + */ + ret = bch2_journal_error(&c->journal); + if (ret) + goto err; + + /* + * Wait for any in flight writes to finish before we free the old nodes + * on disk: + */ + for (i = 0; i < as->nr_old_nodes; i++) { + __le64 seq; + + b = as->old_nodes[i]; + + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); + seq = b->data ? b->data->keys.seq : 0; + six_unlock_read(&b->c.lock); + + if (seq == as->old_nodes_seq[i]) + wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight_inner, + TASK_UNINTERRUPTIBLE); + } + + /* + * We did an update to a parent node where the pointers we added pointed + * to child nodes that weren't written yet: now, the child nodes have + * been written so we can write out the update to the interior node. + */ + + /* + * We can't call into journal reclaim here: we'd block on the journal + * reclaim lock, but we may need to release the open buckets we have + * pinned in order for other btree updates to make forward progress, and + * journal reclaim does btree updates when flushing bkey_cached entries, + * which may require allocations as well. + */ + ret = commit_do(trans, &as->disk_res, &journal_seq, + BCH_WATERMARK_reclaim| + BTREE_INSERT_NOFAIL| + BTREE_INSERT_NOCHECK_RW| + BTREE_INSERT_JOURNAL_RECLAIM, + btree_update_nodes_written_trans(trans, as)); + bch2_trans_unlock(trans); + + bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c, + "%s(): error %s", __func__, bch2_err_str(ret)); +err: + if (as->b) { + struct btree_path *path; + + b = as->b; + path = get_unlocked_mut_path(trans, as->btree_id, b->c.level, b->key.k.p); + /* + * @b is the node we did the final insert into: + * + * On failure to get a journal reservation, we still have to + * unblock the write and allow most of the write path to happen + * so that shutdown works, but the i->journal_seq mechanism + * won't work to prevent the btree write from being visible (we + * didn't get a journal sequence number) - instead + * __bch2_btree_node_write() doesn't do the actual write if + * we're in journal error state: + */ + + /* + * Ensure transaction is unlocked before using + * btree_node_lock_nopath() (the use of which is always suspect, + * we need to work on removing this in the future) + * + * It should be, but get_unlocked_mut_path() -> bch2_path_get() + * calls bch2_path_upgrade(), before we call path_make_mut(), so + * we may rarely end up with a locked path besides the one we + * have here: + */ + bch2_trans_unlock(trans); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); + mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED); + path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); + path->l[b->c.level].b = b; + + bch2_btree_node_lock_write_nofail(trans, path, &b->c); + + mutex_lock(&c->btree_interior_update_lock); + + list_del(&as->write_blocked_list); + if (list_empty(&b->write_blocked)) + clear_btree_node_write_blocked(b); + + /* + * Node might have been freed, recheck under + * btree_interior_update_lock: + */ + if (as->b == b) { + BUG_ON(!b->c.level); + BUG_ON(!btree_node_dirty(b)); + + if (!ret) { + struct bset *last = btree_bset_last(b); + + last->journal_seq = cpu_to_le64( + max(journal_seq, + le64_to_cpu(last->journal_seq))); + + bch2_btree_add_journal_pin(c, b, journal_seq); + } else { + /* + * If we didn't get a journal sequence number we + * can't write this btree node, because recovery + * won't know to ignore this write: + */ + set_btree_node_never_write(b); + } + } + + mutex_unlock(&c->btree_interior_update_lock); + + mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); + six_unlock_write(&b->c.lock); + + btree_node_write_if_need(c, b, SIX_LOCK_intent); + btree_node_unlock(trans, path, b->c.level); + bch2_path_put(trans, path, true); + } + + bch2_journal_pin_drop(&c->journal, &as->journal); + + mutex_lock(&c->btree_interior_update_lock); + for (i = 0; i < as->nr_new_nodes; i++) { + b = as->new_nodes[i]; + + BUG_ON(b->will_make_reachable != (unsigned long) as); + b->will_make_reachable = 0; + clear_btree_node_will_make_reachable(b); + } + mutex_unlock(&c->btree_interior_update_lock); + + for (i = 0; i < as->nr_new_nodes; i++) { + b = as->new_nodes[i]; + + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); + btree_node_write_if_need(c, b, SIX_LOCK_read); + six_unlock_read(&b->c.lock); + } + + for (i = 0; i < as->nr_open_buckets; i++) + bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]); + + bch2_btree_update_free(as, trans); + bch2_trans_put(trans); +} + +static void btree_interior_update_work(struct work_struct *work) +{ + struct bch_fs *c = + container_of(work, struct bch_fs, btree_interior_update_work); + struct btree_update *as; + + while (1) { + mutex_lock(&c->btree_interior_update_lock); + as = list_first_entry_or_null(&c->btree_interior_updates_unwritten, + struct btree_update, unwritten_list); + if (as && !as->nodes_written) + as = NULL; + mutex_unlock(&c->btree_interior_update_lock); + + if (!as) + break; + + btree_update_nodes_written(as); + } +} + +static void btree_update_set_nodes_written(struct closure *cl) +{ + struct btree_update *as = container_of(cl, struct btree_update, cl); + struct bch_fs *c = as->c; + + mutex_lock(&c->btree_interior_update_lock); + as->nodes_written = true; + mutex_unlock(&c->btree_interior_update_lock); + + queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work); +} + +/* + * We're updating @b with pointers to nodes that haven't finished writing yet: + * block @b from being written until @as completes + */ +static void btree_update_updated_node(struct btree_update *as, struct btree *b) +{ + struct bch_fs *c = as->c; + + mutex_lock(&c->btree_interior_update_lock); + list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten); + + BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE); + BUG_ON(!btree_node_dirty(b)); + BUG_ON(!b->c.level); + + as->mode = BTREE_INTERIOR_UPDATING_NODE; + as->b = b; + + set_btree_node_write_blocked(b); + list_add(&as->write_blocked_list, &b->write_blocked); + + mutex_unlock(&c->btree_interior_update_lock); +} + +static void btree_update_reparent(struct btree_update *as, + struct btree_update *child) +{ + struct bch_fs *c = as->c; + + lockdep_assert_held(&c->btree_interior_update_lock); + + child->b = NULL; + child->mode = BTREE_INTERIOR_UPDATING_AS; + + bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, NULL); +} + +static void btree_update_updated_root(struct btree_update *as, struct btree *b) +{ + struct bkey_i *insert = &b->key; + struct bch_fs *c = as->c; + + BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE); + + BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) > + ARRAY_SIZE(as->journal_entries)); + + as->journal_u64s += + journal_entry_set((void *) &as->journal_entries[as->journal_u64s], + BCH_JSET_ENTRY_btree_root, + b->c.btree_id, b->c.level, + insert, insert->k.u64s); + + mutex_lock(&c->btree_interior_update_lock); + list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten); + + as->mode = BTREE_INTERIOR_UPDATING_ROOT; + mutex_unlock(&c->btree_interior_update_lock); +} + +/* + * bch2_btree_update_add_new_node: + * + * This causes @as to wait on @b to be written, before it gets to + * bch2_btree_update_nodes_written + * + * Additionally, it sets b->will_make_reachable to prevent any additional writes + * to @b from happening besides the first until @b is reachable on disk + * + * And it adds @b to the list of @as's new nodes, so that we can update sector + * counts in bch2_btree_update_nodes_written: + */ +static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b) +{ + struct bch_fs *c = as->c; + + closure_get(&as->cl); + + mutex_lock(&c->btree_interior_update_lock); + BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes)); + BUG_ON(b->will_make_reachable); + + as->new_nodes[as->nr_new_nodes++] = b; + b->will_make_reachable = 1UL|(unsigned long) as; + set_btree_node_will_make_reachable(b); + + mutex_unlock(&c->btree_interior_update_lock); + + btree_update_add_key(as, &as->new_keys, b); + + if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { + unsigned bytes = vstruct_end(&b->data->keys) - (void *) b->data; + unsigned sectors = round_up(bytes, block_bytes(c)) >> 9; + + bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written = + cpu_to_le16(sectors); + } +} + +/* + * returns true if @b was a new node + */ +static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b) +{ + struct btree_update *as; + unsigned long v; + unsigned i; + + mutex_lock(&c->btree_interior_update_lock); + /* + * When b->will_make_reachable != 0, it owns a ref on as->cl that's + * dropped when it gets written by bch2_btree_complete_write - the + * xchg() is for synchronization with bch2_btree_complete_write: + */ + v = xchg(&b->will_make_reachable, 0); + clear_btree_node_will_make_reachable(b); + as = (struct btree_update *) (v & ~1UL); + + if (!as) { + mutex_unlock(&c->btree_interior_update_lock); + return; + } + + for (i = 0; i < as->nr_new_nodes; i++) + if (as->new_nodes[i] == b) + goto found; + + BUG(); +found: + array_remove_item(as->new_nodes, as->nr_new_nodes, i); + mutex_unlock(&c->btree_interior_update_lock); + + if (v & 1) + closure_put(&as->cl); +} + +static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b) +{ + while (b->ob.nr) + as->open_buckets[as->nr_open_buckets++] = + b->ob.v[--b->ob.nr]; +} + +/* + * @b is being split/rewritten: it may have pointers to not-yet-written btree + * nodes and thus outstanding btree_updates - redirect @b's + * btree_updates to point to this btree_update: + */ +static void bch2_btree_interior_update_will_free_node(struct btree_update *as, + struct btree *b) +{ + struct bch_fs *c = as->c; + struct btree_update *p, *n; + struct btree_write *w; + + set_btree_node_dying(b); + + if (btree_node_fake(b)) + return; + + mutex_lock(&c->btree_interior_update_lock); + + /* + * Does this node have any btree_update operations preventing + * it from being written? + * + * If so, redirect them to point to this btree_update: we can + * write out our new nodes, but we won't make them visible until those + * operations complete + */ + list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) { + list_del_init(&p->write_blocked_list); + btree_update_reparent(as, p); + + /* + * for flush_held_btree_writes() waiting on updates to flush or + * nodes to be writeable: + */ + closure_wake_up(&c->btree_interior_update_wait); + } + + clear_btree_node_dirty_acct(c, b); + clear_btree_node_need_write(b); + clear_btree_node_write_blocked(b); + + /* + * Does this node have unwritten data that has a pin on the journal? + * + * If so, transfer that pin to the btree_update operation - + * note that if we're freeing multiple nodes, we only need to keep the + * oldest pin of any of the nodes we're freeing. We'll release the pin + * when the new nodes are persistent and reachable on disk: + */ + w = btree_current_write(b); + bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL); + bch2_journal_pin_drop(&c->journal, &w->journal); + + w = btree_prev_write(b); + bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL); + bch2_journal_pin_drop(&c->journal, &w->journal); + + mutex_unlock(&c->btree_interior_update_lock); + + /* + * Is this a node that isn't reachable on disk yet? + * + * Nodes that aren't reachable yet have writes blocked until they're + * reachable - now that we've cancelled any pending writes and moved + * things waiting on that write to wait on this update, we can drop this + * node from the list of nodes that the other update is making + * reachable, prior to freeing it: + */ + btree_update_drop_new_node(c, b); + + btree_update_add_key(as, &as->old_keys, b); + + as->old_nodes[as->nr_old_nodes] = b; + as->old_nodes_seq[as->nr_old_nodes] = b->data->keys.seq; + as->nr_old_nodes++; +} + +static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *trans) +{ + struct bch_fs *c = as->c; + u64 start_time = as->start_time; + + BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE); + + if (as->took_gc_lock) + up_read(&as->c->gc_lock); + as->took_gc_lock = false; + + bch2_btree_reserve_put(as, trans); + + continue_at(&as->cl, btree_update_set_nodes_written, + as->c->btree_interior_update_worker); + + bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_foreground], + start_time); +} + +static struct btree_update * +bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, + unsigned level, bool split, unsigned flags) +{ + struct bch_fs *c = trans->c; + struct btree_update *as; + u64 start_time = local_clock(); + int disk_res_flags = (flags & BTREE_INSERT_NOFAIL) + ? BCH_DISK_RESERVATION_NOFAIL : 0; + unsigned nr_nodes[2] = { 0, 0 }; + unsigned update_level = level; + enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; + int ret = 0; + u32 restart_count = trans->restart_count; + + BUG_ON(!path->should_be_locked); + + if (watermark == BCH_WATERMARK_copygc) + watermark = BCH_WATERMARK_btree_copygc; + if (watermark < BCH_WATERMARK_btree) + watermark = BCH_WATERMARK_btree; + + flags &= ~BCH_WATERMARK_MASK; + flags |= watermark; + + while (1) { + nr_nodes[!!update_level] += 1 + split; + update_level++; + + ret = bch2_btree_path_upgrade(trans, path, update_level + 1); + if (ret) + return ERR_PTR(ret); + + if (!btree_path_node(path, update_level)) { + /* Allocating new root? */ + nr_nodes[1] += split; + update_level = BTREE_MAX_DEPTH; + break; + } + + if (bch2_btree_node_insert_fits(c, path->l[update_level].b, + BKEY_BTREE_PTR_U64s_MAX * (1 + split))) + break; + + split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c); + } + + if (flags & BTREE_INSERT_GC_LOCK_HELD) + lockdep_assert_held(&c->gc_lock); + else if (!down_read_trylock(&c->gc_lock)) { + ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0)); + if (ret) { + up_read(&c->gc_lock); + return ERR_PTR(ret); + } + } + + as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS); + memset(as, 0, sizeof(*as)); + closure_init(&as->cl, NULL); + as->c = c; + as->start_time = start_time; + as->mode = BTREE_INTERIOR_NO_UPDATE; + as->took_gc_lock = !(flags & BTREE_INSERT_GC_LOCK_HELD); + as->btree_id = path->btree_id; + as->update_level = update_level; + INIT_LIST_HEAD(&as->list); + INIT_LIST_HEAD(&as->unwritten_list); + INIT_LIST_HEAD(&as->write_blocked_list); + bch2_keylist_init(&as->old_keys, as->_old_keys); + bch2_keylist_init(&as->new_keys, as->_new_keys); + bch2_keylist_init(&as->parent_keys, as->inline_keys); + + mutex_lock(&c->btree_interior_update_lock); + list_add_tail(&as->list, &c->btree_interior_update_list); + mutex_unlock(&c->btree_interior_update_lock); + < |