diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2022-07-17 23:06:38 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:09:37 -0400 |
commit | 549d173c1bd9b58c2ad41217522462e012a6545f (patch) | |
tree | 9bf146d761ae1f81ba18ec45a1bda63cb47549eb | |
parent | 0990efaeeab14de1e3e3bf2791808afebadd1cc4 (diff) | |
download | linux-549d173c1bd9b58c2ad41217522462e012a6545f.tar.gz linux-549d173c1bd9b58c2ad41217522462e012a6545f.tar.bz2 linux-549d173c1bd9b58c2ad41217522462e012a6545f.zip |
bcachefs: EINTR -> BCH_ERR_transaction_restart
Now that we have error codes, with subtypes, we can switch to our own
error code for transaction restarts - and even better, a distinct error
code for each transaction restart reason: clearer code and better
debugging.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
28 files changed, 314 insertions, 270 deletions
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c index 00cd40a8d7fa..7edebeed779e 100644 --- a/fs/bcachefs/acl.c +++ b/fs/bcachefs/acl.c @@ -234,7 +234,7 @@ retry: &X_SEARCH(acl_to_xattr_type(type), "", 0), 0); if (ret) { - if (ret == -EINTR) + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; if (ret != -ENOENT) acl = ERR_PTR(ret); @@ -334,7 +334,7 @@ retry: btree_err: bch2_trans_iter_exit(&trans, &inode_iter); - if (ret == -EINTR) + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; if (unlikely(ret)) goto err; diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index eb44a8bc04fe..15c3c9a2da7b 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -995,7 +995,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, GFP_KERNEL); *discard_pos_done = iter.pos; - ret = bch2_trans_relock(trans) ? 0 : -EINTR; + ret = bch2_trans_relock(trans); if (ret) goto out; } diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index a9f893361c73..99fbf1d2dee5 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -470,8 +470,9 @@ again: for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k)); alloc_cursor < k.k->p.offset; alloc_cursor++) { - if (btree_trans_too_many_iters(trans)) { - ob = ERR_PTR(-EINTR); + ret = btree_trans_too_many_iters(trans); + if (ret) { + ob = ERR_PTR(ret); break; } @@ -488,7 +489,8 @@ again: break; } } - if (ob) + + if (ob || ret) break; } bch2_trans_iter_exit(trans, &iter); @@ -738,7 +740,7 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans, ret = PTR_ERR_OR_ZERO(ob); if (ret) { - if (ret == -EINTR || cl) + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl) break; continue; } @@ -925,7 +927,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans, target, erasure_code, nr_replicas, nr_effective, have_cache, flags, _cl); - if (ret == -EINTR || + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || bch2_err_matches(ret, BCH_ERR_freelist_empty) || bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) return ret; @@ -949,7 +951,7 @@ retry_blocking: nr_replicas, nr_effective, have_cache, reserve, flags, cl); if (ret && - ret != -EINTR && + !bch2_err_matches(ret, BCH_ERR_transaction_restart) && !bch2_err_matches(ret, BCH_ERR_insufficient_devices) && !cl && _cl) { cl = _cl; @@ -1191,7 +1193,8 @@ retry: nr_replicas, &nr_effective, &have_cache, reserve, ob_flags, NULL); - if (!ret || ret == -EINTR) + if (!ret || + bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto alloc_done; ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 1f80f08a69b2..4032c27fcc9c 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -7,6 +7,7 @@ #include "btree_iter.h" #include "btree_locking.h" #include "debug.h" +#include "errcode.h" #include "error.h" #include "trace.h" @@ -692,8 +693,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, if (trans && !bch2_btree_node_relock(trans, path, level + 1)) { trace_trans_restart_relock_parent_for_fill(trans->fn, _THIS_IP_, btree_id, &path->pos); - btree_trans_restart(trans); - return ERR_PTR(-EINTR); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock)); } b = bch2_btree_node_mem_alloc(c, level != 0); @@ -702,8 +702,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, trans->memory_allocation_failure = true; trace_trans_restart_memory_allocation_failure(trans->fn, _THIS_IP_, btree_id, &path->pos); - btree_trans_restart(trans); - return ERR_PTR(-EINTR); + + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail)); } if (IS_ERR(b)) @@ -740,18 +740,19 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, if (!sync) return NULL; - if (trans && - (!bch2_trans_relock(trans) || - !bch2_btree_path_relock_intent(trans, path))) { - BUG_ON(!trans->restarted); - return ERR_PTR(-EINTR); + if (trans) { + int ret = bch2_trans_relock(trans) ?: + bch2_btree_path_relock_intent(trans, path); + if (ret) { + BUG_ON(!trans->restarted); + return ERR_PTR(ret); + } } if (!six_relock_type(&b->c.lock, lock_type, seq)) { trace_trans_restart_relock_after_fill(trans->fn, _THIS_IP_, btree_id, &path->pos); - btree_trans_restart(trans); - return ERR_PTR(-EINTR); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill)); } return b; @@ -762,7 +763,9 @@ static int lock_node_check_fn(struct six_lock *lock, void *p) struct btree *b = container_of(lock, struct btree, c.lock); const struct bkey_i *k = p; - return b->hash_val == btree_ptr_hash_val(k) ? 0 : -1; + if (b->hash_val != btree_ptr_hash_val(k)) + return BCH_ERR_lock_fail_node_reused; + return 0; } static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) @@ -821,6 +824,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path * struct btree_cache *bc = &c->btree_cache; struct btree *b; struct bset_tree *t; + int ret; EBUG_ON(level >= BTREE_MAX_DEPTH); @@ -885,11 +889,14 @@ lock_node: if (btree_node_read_locked(path, level + 1)) btree_node_unlock(trans, path, level + 1); - if (!btree_node_lock(trans, path, b, k->k.p, level, lock_type, - lock_node_check_fn, (void *) k, trace_ip)) { - if (!trans->restarted) + ret = btree_node_lock(trans, path, b, k->k.p, level, lock_type, + lock_node_check_fn, (void *) k, trace_ip); + if (unlikely(ret)) { + if (bch2_err_matches(ret, BCH_ERR_lock_fail_node_reused)) goto retry; - return ERR_PTR(-EINTR); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ERR_PTR(ret); + BUG(); } if (unlikely(b->hash_val != btree_ptr_hash_val(k) || @@ -903,8 +910,7 @@ lock_node: trace_ip, path->btree_id, &path->pos); - btree_trans_restart(trans); - return ERR_PTR(-EINTR); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused)); } } @@ -920,11 +926,13 @@ lock_node: * should_be_locked is not set on this path yet, so we need to * relock it specifically: */ - if (trans && - (!bch2_trans_relock(trans) || - !bch2_btree_path_relock_intent(trans, path))) { - BUG_ON(!trans->restarted); - return ERR_PTR(-EINTR); + if (trans) { + int ret = bch2_trans_relock(trans) ?: + bch2_btree_path_relock_intent(trans, path); + if (ret) { + BUG_ON(!trans->restarted); + return ERR_PTR(ret); + } } if (!six_relock_type(&b->c.lock, lock_type, seq)) diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 45ecd196bceb..db247c96298f 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -34,7 +34,7 @@ static inline int bch2_trans_cond_resched(struct btree_trans *trans) if (need_resched() || race_fault()) { bch2_trans_unlock(trans); schedule(); - return bch2_trans_relock(trans) ? 0 : -EINTR; + return bch2_trans_relock(trans); } else { return 0; } @@ -285,13 +285,13 @@ static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b, } /* Slowpath: */ -bool __bch2_btree_node_lock(struct btree_trans *trans, - struct btree_path *path, - struct btree *b, - struct bpos pos, unsigned level, - enum six_lock_type type, - six_lock_should_sleep_fn should_sleep_fn, void *p, - unsigned long ip) +int __bch2_btree_node_lock(struct btree_trans *trans, + struct btree_path *path, + struct btree *b, + struct bpos pos, unsigned level, + enum six_lock_type type, + six_lock_should_sleep_fn should_sleep_fn, void *p, + unsigned long ip) { struct btree_path *linked; unsigned reason; @@ -369,8 +369,7 @@ deadlock: path->btree_id, path->cached, &pos); - btree_trans_restart(trans); - return false; + return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock); } /* Btree iterator locking: */ @@ -408,8 +407,8 @@ static inline void bch2_btree_path_verify_locks(struct btree_path *path) {} /* * Only for btree_cache.c - only relocks intent locks */ -bool bch2_btree_path_relock_intent(struct btree_trans *trans, - struct btree_path *path) +int bch2_btree_path_relock_intent(struct btree_trans *trans, + struct btree_path *path) { unsigned l; @@ -421,16 +420,15 @@ bool bch2_btree_path_relock_intent(struct btree_trans *trans, btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_, path->btree_id, &path->pos); - btree_trans_restart(trans); - return false; + return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent); } } - return true; + return 0; } noinline __flatten -static bool __bch2_btree_path_relock(struct btree_trans *trans, +static int __bch2_btree_path_relock(struct btree_trans *trans, struct btree_path *path, unsigned long trace_ip) { bool ret = btree_path_get_locks(trans, path, false); @@ -438,16 +436,17 @@ static bool __bch2_btree_path_relock(struct btree_trans *trans, if (!ret) { trace_trans_restart_relock_path(trans->fn, trace_ip, path->btree_id, &path->pos); - btree_trans_restart(trans); + return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path); } - return ret; + + return 0; } -static inline bool bch2_btree_path_relock(struct btree_trans *trans, +static inline int bch2_btree_path_relock(struct btree_trans *trans, struct btree_path *path, unsigned long trace_ip) { return btree_node_locked(path, path->level) - ? true + ? 0 : __bch2_btree_path_relock(trans, path, trace_ip); } @@ -532,22 +531,22 @@ void bch2_trans_downgrade(struct btree_trans *trans) /* Btree transaction locking: */ -bool bch2_trans_relock(struct btree_trans *trans) +int bch2_trans_relock(struct btree_trans *trans) { struct btree_path *path; if (unlikely(trans->restarted)) - return false; + return -BCH_ERR_transaction_restart_relock; trans_for_each_path(trans, path) if (path->should_be_locked && - !bch2_btree_path_relock(trans, path, _RET_IP_)) { + bch2_btree_path_relock(trans, path, _RET_IP_)) { trace_trans_restart_relock(trans->fn, _RET_IP_, path->btree_id, &path->pos); BUG_ON(!trans->restarted); - return false; + return -BCH_ERR_transaction_restart_relock; } - return true; + return 0; } void bch2_trans_unlock(struct btree_trans *trans) @@ -1187,7 +1186,9 @@ static int lock_root_check_fn(struct six_lock *lock, void *p) struct btree *b = container_of(lock, struct btree, c.lock); struct btree **rootp = p; - return b == *rootp ? 0 : -1; + if (b != *rootp) + return BCH_ERR_lock_fail_root_changed; + return 0; } static inline int btree_path_lock_root(struct btree_trans *trans, @@ -1199,6 +1200,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans, struct btree *b, **rootp = &c->btree_roots[path->btree_id].b; enum six_lock_type lock_type; unsigned i; + int ret; EBUG_ON(path->nodes_locked); @@ -1220,13 +1222,16 @@ static inline int btree_path_lock_root(struct btree_trans *trans, } lock_type = __btree_lock_want(path, path->level); - if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX, - path->level, lock_type, - lock_root_check_fn, rootp, - trace_ip))) { - if (trans->restarted) - return -EINTR; - continue; + ret = btree_node_lock(trans, path, b, SPOS_MAX, + path->level, lock_type, + lock_root_check_fn, rootp, + trace_ip); + if (unlikely(ret)) { + if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed)) + continue; + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ret; + BUG(); } if (likely(b == READ_ONCE(*rootp) && @@ -1431,12 +1436,12 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans) int i, ret = 0; if (trans->in_traverse_all) - return -EINTR; + return -BCH_ERR_transaction_restart_in_traverse_all; trans->in_traverse_all = true; retry_all: prev = NULL; - trans->restarted = false; + trans->restarted = 0; trans_for_each_path(trans, path) path->should_be_locked = false; @@ -1480,7 +1485,8 @@ retry_all: */ if (path->uptodate) { ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_); - if (ret == -EINTR || ret == -ENOMEM) + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + ret == -ENOMEM) goto retry_all; if (ret) goto err; @@ -1587,19 +1593,17 @@ static int btree_path_traverse_one(struct btree_trans *trans, unsigned long trace_ip) { unsigned depth_want = path->level; - int ret = 0; + int ret = trans->restarted; - if (unlikely(trans->restarted)) { - ret = -EINTR; + if (unlikely(ret)) goto out; - } /* * Ensure we obey path->should_be_locked: if it's set, we can't unlock * and re-traverse the path without a transaction restart: */ if (path->should_be_locked) { - ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR; + ret = bch2_btree_path_relock(trans, path, trace_ip); goto out; } @@ -1648,7 +1652,7 @@ static int btree_path_traverse_one(struct btree_trans *trans, path->uptodate = BTREE_ITER_UPTODATE; out: - BUG_ON((ret == -EINTR) != !!trans->restarted); + BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted); bch2_btree_path_verify(trans, path); return ret; } @@ -2135,8 +2139,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_, path->btree_id, &path->pos); - btree_trans_restart(trans); - ret = -EINTR; + ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock); goto err; } @@ -2517,8 +2520,9 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e BUG_ON(!iter->path->nodes_locked); out: if (iter->update_path) { - if (unlikely(!bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_))) { - k = bkey_s_c_err(-EINTR); + ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_); + if (unlikely(ret)) { + k = bkey_s_c_err(ret); } else { BUG_ON(!(iter->update_path->nodes_locked & 1)); iter->update_path->should_be_locked = true; @@ -3169,8 +3173,7 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) if (old_bytes) { trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes); - btree_trans_restart(trans); - return ERR_PTR(-EINTR); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced)); } } @@ -3184,9 +3187,9 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) * bch2_trans_begin() - reset a transaction after a interrupted attempt * @trans: transaction to reset * - * While iterating over nodes or updating nodes a attempt to lock a btree - * node may return EINTR when the trylock fails. When this occurs - * bch2_trans_begin() should be called and the transaction retried. + * While iterating over nodes or updating nodes a attempt to lock a btree node + * may return BCH_ERR_transaction_restart when the trylock fails. When this + * occurs bch2_trans_begin() should be called and the transaction retried. */ u32 bch2_trans_begin(struct btree_trans *trans) { diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 1952a7683610..79339a6abcd7 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -197,27 +197,36 @@ void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *, struct btree *, struct btree_node_iter *, struct bkey_packed *, unsigned, unsigned); -bool bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *); +int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *); void bch2_path_put(struct btree_trans *, struct btree_path *, bool); -bool bch2_trans_relock(struct btree_trans *); +int bch2_trans_relock(struct btree_trans *); void bch2_trans_unlock(struct btree_trans *); -static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count) +static inline bool trans_was_restarted(struct btree_trans *trans, u32 restart_count) { - return restart_count != trans->restart_count ? -EINTR : 0; + return restart_count != trans->restart_count; } void bch2_trans_verify_not_restarted(struct btree_trans *, u32); __always_inline -static inline int btree_trans_restart(struct btree_trans *trans) +static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err) { - trans->restarted = true; + BUG_ON(err <= 0); + BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart)); + + trans->restarted = err; trans->restart_count++; - bch2_trans_unlock(trans); - return -EINTR; + return -err; +} + +__always_inline +static inline int btree_trans_restart(struct btree_trans *trans, int err) +{ + btree_trans_restart_nounlock(trans, err); + return -err; } bool bch2_btree_node_upgrade(struct btree_trans *, @@ -338,7 +347,7 @@ __btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter struct btree *b; while (b = bch2_btree_iter_peek_node(iter), - PTR_ERR_OR_ZERO(b) == -EINTR) + bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart)) bch2_trans_begin(trans); return b; @@ -387,7 +396,7 @@ static inline int btree_trans_too_many_iters(struct btree_trans *trans) { if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX / 2) { trace_trans_restart_too_many_iters(trans->fn, _THIS_IP_); - return btree_trans_restart(trans); + return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters); } return 0; @@ -401,7 +410,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans, while (btree_trans_too_many_iters(trans) || (k = bch2_btree_iter_peek_type(iter, flags), - bkey_err(k) == -EINTR)) + bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart))) bch2_trans_begin(trans); return k; @@ -414,7 +423,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans, do { \ bch2_trans_begin(_trans); \ _ret = (_do); \ - } while (_ret == -EINTR); \ + } while (bch2_err_matches(_ret, BCH_ERR_transaction_restart)); \ \ _ret; \ }) @@ -425,7 +434,8 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans, * These are like lockrestart_do() and commit_do(), with two differences: * * - We don't call bch2_trans_begin() unless we had a transaction restart - * - We return -EINTR if we succeeded after a transaction restart + * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a + * transaction restart */ #define nested_lockrestart_do(_trans, _do) \ ({ \ @@ -434,13 +444,16 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans, \ _restart_count = _orig_restart_count = (_trans)->restart_count; \ \ - while ((_ret = (_do)) == -EINTR) \ + while (bch2_err_matches(_ret = (_do), BCH_ERR_transaction_restart))\ _restart_count = bch2_trans_begin(_trans); \ \ if (!_ret) \ bch2_trans_verify_not_restarted(_trans, _restart_count);\ \ - _ret ?: trans_was_restarted(_trans, _orig_restart_count); \ + if (!_ret && trans_was_restarted(_trans, _orig_restart_count)) \ + _ret = -BCH_ERR_transaction_restart_nested; \ + \ + _ret; \ }) #define for_each_btree_key2(_trans, _iter, _btree_id, \ @@ -451,7 +464,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans, bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ \ - do { \ + while (1) { \ bch2_trans_begin(_trans); \ (_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \ if (!(_k).k) { \ @@ -460,9 +473,12 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans, } \ \ _ret = bkey_err(_k) ?: (_do); \ - if (!_ret) \ - bch2_btree_iter_advance(&(_iter)); \ - } while (_ret == 0 || _ret == -EINTR); \ + if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\ + continue; \ + if (_ret) \ + break; \ + bch2_btree_iter_advance(&(_iter)); \ + } \ \ bch2_trans_iter_exit((_trans), &(_iter)); \ _ret; \ diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index e5a29240bbcc..549abe607b53 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -5,6 +5,7 @@ #include "btree_key_cache.h" #include "btree_locking.h" #include "btree_update.h" +#include "errcode.h" #include "error.h" #include "journal.h" #include "journal_reclaim.h" @@ -292,7 +293,7 @@ static int btree_key_cache_fill(struct btree_trans *trans, if (!bch2_btree_node_relock(trans, ck_path, 0)) { trace_trans_restart_relock_key_cache_fill(trans->fn, _THIS_IP_, ck_path->btree_id, &ck_path->pos); - ret = btree_trans_restart(trans); + ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced); goto err; } @@ -347,8 +348,10 @@ static int bkey_cached_check_fn(struct six_lock *lock, void *p) struct bkey_cached *ck = container_of(lock, struct bkey_cached, c.lock); const struct btree_path *path = p; - return ck->key.btree_id == path->btree_id && - !bpos_cmp(ck->key.pos, path->pos) ? 0 : -1; + if (ck->key.btree_id != path->btree_id && + bpos_cmp(ck->key.pos, path->pos)) + return BCH_ERR_lock_fail_node_reused; + return 0; } __flatten @@ -387,14 +390,15 @@ retry: } else { enum six_lock_type lock_want = __btree_lock_want(path, 0); - if (!btree_node_lock(trans, path, (void *) ck, path->pos, 0, - lock_want, - bkey_cached_check_fn, path, _THIS_IP_)) { - if (!trans->restarted) + ret = btree_node_lock(trans, path, (void *) ck, path->pos, 0, + lock_want, + bkey_cached_check_fn, path, _THIS_IP_); + if (ret) { + if (bch2_err_matches(ret, BCH_ERR_lock_fail_node_reused)) goto retry; - - ret = -EINTR; - goto err; + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + goto err; + BUG(); } if (ck->key.btree_id != path->btree_id || @@ -413,7 +417,7 @@ fill: if (!path->locks_want && !__bch2_btree_path_upgrade(trans, path, 1)) { trace_transaction_restart_ip(trans->fn, _THIS_IP_); - ret = btree_trans_restart(trans); + ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade); goto err; } @@ -430,7 +434,7 @@ fill: return ret; err: - if (ret != -EINTR) { + if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) { btree_node_unlock(trans, path, 0); path->l[0].b = BTREE_ITER_NO_NODE_ERROR; } @@ -497,13 +501,14 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, ? JOURNAL_WATERMARK_reserved : 0)| commit_flags); - if (ret) { - bch2_fs_fatal_err_on(ret != -EINTR && - ret != -EAGAIN && - !bch2_journal_error(j), c, - "error flushing key cache: %i", ret); + + bch2_fs_fatal_err_on(ret && + !bch2_err_matches(ret, BCH_ERR_transaction_restart) && + !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) && + !bch2_journal_error(j), c, + "error flushing key cache: %s", bch2_err_str(ret)); + if (ret) goto out; - } bch2_journal_pin_drop(j, &ck->journal); bch2_journal_preres_put(j, &ck->res); diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index b8708466c4e3..33a69e27c39e 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -152,7 +152,7 @@ static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type) } } -static inline bool btree_node_lock_type(struct btree_trans *trans, +static inline int btree_node_lock_type(struct btree_trans *trans, struct btree_path *path, struct btree *b, struct bpos pos, unsigned level, @@ -161,10 +161,10 @@ static inline bool btree_node_lock_type(struct btree_trans *trans, { struct bch_fs *c = trans->c; u64 start_time; - bool ret; + int ret; if (six_trylock_type(&b->c.lock, type)) - return true; + return 0; start_time = local_clock(); @@ -174,13 +174,14 @@ static inline bool btree_node_lock_type(struct btree_trans *trans, trans->locking_level = level; trans->locking_lock_type = type; trans->locking = b; - ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0; + ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p); trans->locking = NULL; if (ret) - bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time); + return ret; - return ret; + bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time); + return 0; } /* @@ -203,33 +204,34 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans, return false; } -bool __bch2_btree_node_lock(struct btree_trans *, struct btree_path *, - struct btree *, struct bpos, unsigned, - enum six_lock_type, - six_lock_should_sleep_fn, void *, - unsigned long); +int __bch2_btree_node_lock(struct btree_trans *, struct btree_path *, + struct btree *, struct bpos, unsigned, + enum six_lock_type, + six_lock_should_sleep_fn, void *, + unsigned long); -static inline bool btree_node_lock(struct btree_trans *trans, +static inline int btree_node_lock(struct btree_trans *trans, struct btree_path *path, struct btree *b, struct bpos pos, unsigned level, enum six_lock_type type, six_lock_should_sleep_fn should_sleep_fn, void *p, unsigned long ip) { + int ret = 0; + EBUG_ON(level >= BTREE_MAX_DEPTH); EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx))); if (likely(six_trylock_type(&b->c.lock, type)) || - btree_node_lock_increment(trans, b, level, type) || - __bch2_btree_node_lock(trans, path, b, pos, level, type, - should_sleep_fn, p, ip)) { + btree_node_lock_increment(trans, b, level, type) || + !(ret = __bch2_btree_node_lock(trans, path, b, pos, level, type, + should_sleep_fn, p, ip))) { #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS path->l[b->c.level].lock_taken_time = ktime_get_ns(); #endif - return true; - } else { - return false; } + + return ret; } |