diff options
| author | Kent Overstreet <kent.overstreet@linux.dev> | 2023-09-12 18:41:22 -0400 |
|---|---|---|
| committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:10:13 -0400 |
| commit | 96dea3d599dbc31f59eb786af2ac5079122beb88 (patch) | |
| tree | 2b3bf4a0641f1529bffbda9a02a9c66974e8d6e5 | |
| parent | b5e85d4d0ccf819df1ee73db41bf388ddd6e1830 (diff) | |
| download | linux-96dea3d599dbc31f59eb786af2ac5079122beb88.tar.gz linux-96dea3d599dbc31f59eb786af2ac5079122beb88.tar.bz2 linux-96dea3d599dbc31f59eb786af2ac5079122beb88.zip | |
bcachefs: Fix W=12 build errors
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
51 files changed, 459 insertions, 451 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index ee21aeece39e..4eab7e59ae93 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -1200,15 +1200,15 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans, } if (need_update) { - struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(g)); + struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g)); - ret = PTR_ERR_OR_ZERO(k); + ret = PTR_ERR_OR_ZERO(u); if (ret) goto err; - memcpy(k, &g, sizeof(g)); + memcpy(u, &g, sizeof(g)); - ret = bch2_trans_update(trans, bucket_gens_iter, k, 0); + ret = bch2_trans_update(trans, bucket_gens_iter, u, 0); if (ret) goto err; } @@ -1354,15 +1354,14 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans, } if (need_update) { - struct bkey_i *k; + struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g)); - k = bch2_trans_kmalloc(trans, sizeof(g)); - ret = PTR_ERR_OR_ZERO(k); + ret = PTR_ERR_OR_ZERO(u); if (ret) goto out; - memcpy(k, &g, sizeof(g)); - ret = bch2_trans_update(trans, iter, k, 0); + memcpy(u, &g, sizeof(g)); + ret = bch2_trans_update(trans, iter, u, 0); } out: fsck_err: diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 8e1888a89011..e73b6c82870a 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -502,9 +502,14 @@ again: } /** - * bch_bucket_alloc - allocate a single bucket from a specific device + * bch2_bucket_alloc_trans - allocate a single bucket from a specific device + * @trans: transaction object + * @ca: device to allocate from + * @watermark: how important is this allocation? + * @cl: if not NULL, closure to be used to wait if buckets not available + * @usage: for secondarily also returning the current device usage * - * Returns index of bucket on success, 0 on failure + * Returns: an open_bucket on success, or an ERR_PTR() on failure. */ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, struct bch_dev *ca, @@ -775,7 +780,6 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans, struct dev_alloc_list devs_sorted; struct ec_stripe_head *h; struct open_bucket *ob; - struct bch_dev *ca; unsigned i, ec_idx; int ret = 0; @@ -805,8 +809,6 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans, } goto out_put_head; got_bucket: - ca = bch_dev_bkey_exists(c, ob->dev); - ob->ec_idx = ec_idx; ob->ec = h->s; ec_stripe_new_get(h->s, STRIPE_REF_io); @@ -1032,10 +1034,13 @@ static int open_bucket_add_buckets(struct btree_trans *trans, /** * should_drop_bucket - check if this is open_bucket should go away + * @ob: open_bucket to predicate on + * @c: filesystem handle * @ca: if set, we're killing buckets for a particular device * @ec: if true, we're shutting down erasure coding and killing all ec * open_buckets * otherwise, return true + * Returns: true if we should kill this open_bucket * * We're killing open_buckets because we're shutting down a device, erasure * coding, or the entire filesystem - check if this open_bucket matches: diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index bec62e5b21e5..82109585439b 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -351,7 +351,6 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_ { struct bch_fs *c = trans->c; struct btree_iter alloc_iter = { NULL }; - struct bch_dev *ca; struct bkey_s_c alloc_k; struct printbuf buf = PRINTBUF; int ret = 0; @@ -363,8 +362,6 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_ goto out; } - ca = bch_dev_bkey_exists(c, k.k->p.inode); - alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, bp_pos_to_bucket(c, k.k->p), 0); ret = bkey_err(alloc_k); @@ -629,7 +626,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans, struct bch_fs *c = trans->c; struct btree_iter iter; enum btree_id btree_id; - struct bpos_level last_flushed = { UINT_MAX }; + struct bpos_level last_flushed = { UINT_MAX, POS_MIN }; int ret = 0; for (btree_id = 0; btree_id < btree_id_nr_alive(c); btree_id++) { diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index e80fef1537c9..9fe3dac4a005 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -371,7 +371,7 @@ BCH_DEBUG_PARAMS() #undef BCH_DEBUG_PARAM #ifndef CONFIG_BCACHEFS_DEBUG -#define BCH_DEBUG_PARAM(name, description) static const bool bch2_##name; +#define BCH_DEBUG_PARAM(name, description) static const __maybe_unused bool bch2_##name; BCH_DEBUG_PARAMS_DEBUG() #undef BCH_DEBUG_PARAM #endif diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h index c434202f351a..f0d130440baa 100644 --- a/fs/bcachefs/bcachefs_format.h +++ b/fs/bcachefs/bcachefs_format.h @@ -83,8 +83,8 @@ typedef uuid_t __uuid_t; #endif #define BITMASK(name, type, field, offset, end) \ -static const unsigned name##_OFFSET = offset; \ -static const unsigned name##_BITS = (end - offset); \ +static const __maybe_unused unsigned name##_OFFSET = offset; \ +static const __maybe_unused unsigned name##_BITS = (end - offset); \ \ static inline __u64 name(const type *k) \ { \ @@ -98,9 +98,9 @@ static inline void SET_##name(type *k, __u64 v) \ } #define LE_BITMASK(_bits, name, type, field, offset, end) \ -static const unsigned name##_OFFSET = offset; \ -static const unsigned name##_BITS = (end - offset); \ -static const __u##_bits name##_MAX = (1ULL << (end - offset)) - 1; \ +static const __maybe_unused unsigned name##_OFFSET = offset; \ +static const __maybe_unused unsigned name##_BITS = (end - offset); \ +static const __maybe_unused __u##_bits name##_MAX = (1ULL << (end - offset)) - 1;\ \ static inline __u64 name(const type *k) \ { \ @@ -1668,7 +1668,8 @@ enum bcachefs_metadata_version { bcachefs_metadata_version_max }; -static const unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_major_minor; +static const __maybe_unused +unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_major_minor; #define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1) @@ -1975,7 +1976,7 @@ enum bch_csum_type { BCH_CSUM_NR }; -static const unsigned bch_crc_bytes[] = { +static const __maybe_unused unsigned bch_crc_bytes[] = { [BCH_CSUM_none] = 0, [BCH_CSUM_crc32c_nonzero] = 4, [BCH_CSUM_crc32c] = 4, diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c index a3abd9d2d176..abdb05507d16 100644 --- a/fs/bcachefs/bkey.c +++ b/fs/bcachefs/bkey.c @@ -308,9 +308,14 @@ struct bpos __bkey_unpack_pos(const struct bkey_format *format, /** * bch2_bkey_pack_key -- pack just the key, not the value + * @out: packed result + * @in: key to pack + * @format: format of packed result + * + * Returns: true on success, false on failure */ bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in, - const struct bkey_format *format) + const struct bkey_format *format) { struct pack_state state = pack_state_init(format, out); u64 *w = out->_data; @@ -336,9 +341,12 @@ bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in, /** * bch2_bkey_unpack -- unpack the key and the value + * @b: btree node of @src key (for packed format) + * @dst: unpacked result + * @src: packed input */ void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst, - const struct bkey_packed *src) + const struct bkey_packed *src) { __bkey_unpack_key(b, &dst->k, src); @@ -349,19 +357,24 @@ void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst, /** * bch2_bkey_pack -- pack the key and the value + * @dst: packed result + * @src: unpacked input + * @format: format of packed result + * + * Returns: true on success, false on failure */ -bool bch2_bkey_pack(struct bkey_packed *out, const struct bkey_i *in, - const struct bkey_format *format) +bool bch2_bkey_pack(struct bkey_packed *dst, const struct bkey_i *src, + const struct bkey_format *format) { struct bkey_packed tmp; - if (!bch2_bkey_pack_key(&tmp, &in->k, format)) + if (!bch2_bkey_pack_key(&tmp, &src->k, format)) return false; - memmove_u64s((u64 *) out + format->key_u64s, - &in->v, - bkey_val_u64s(&in->k)); - memcpy_u64s_small(out, &tmp, format->key_u64s); + memmove_u64s((u64 *) dst + format->key_u64s, + &src->v, + bkey_val_u64s(&src->k)); + memcpy_u64s_small(dst, &tmp, format->key_u64s); return true; } diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c index 82f30ffbfb86..be9f012fc7be 100644 --- a/fs/bcachefs/bkey_methods.c +++ b/fs/bcachefs/bkey_methods.c @@ -369,7 +369,6 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id, { const struct bkey_ops *ops; struct bkey uk; - struct bkey_s u; unsigned nr_compat = 5; int i; @@ -434,7 +433,9 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id, } break; - case 4: + case 4: { + struct bkey_s u; + if (!bkey_packed(k)) { u = bkey_i_to_s(packed_to_bkey(k)); } else { @@ -451,6 +452,7 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id, if (ops->compat) ops->compat(btree_id, version, big_endian, write, u); break; + } default: BUG(); } diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c index 685792137d2a..cff7486ef446 100644 --- a/fs/bcachefs/bset.c +++ b/fs/bcachefs/bset.c @@ -172,10 +172,10 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter, printk(KERN_ERR "iter was:"); btree_node_iter_for_each(_iter, set) { - struct bkey_packed *k = __btree_node_offset_to_key(b, set->k); - struct bset_tree *t = bch2_bkey_to_bset(b, k); + struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k); + struct bset_tree *t = bch2_bkey_to_bset(b, k2); printk(" [%zi %zi]", t - b->set, - k->_data - bset(b, t)->_data); + k2->_data - bset(b, t)->_data); } panic("\n"); } @@ -1269,9 +1269,13 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter, } /** - * bch_btree_node_iter_init - initialize a btree node iterator, starting from a + * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a * given position * + * @iter: iterator to initialize + * @b: btree node to search + * @search: search key + * * Main entry point to the lookup code for individual btree nodes: * * NOTE: diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 245ddd92b2d1..ef9492f7e937 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -885,7 +885,7 @@ retry: } if (unlikely(need_relock)) { - int ret = bch2_trans_relock(trans) ?: + ret = bch2_trans_relock(trans) ?: bch2_btree_path_relock_intent(trans, path); if (ret) { six_unlock_type(&b->c.lock, lock_type); @@ -916,11 +916,20 @@ retry: } /** - * bch_btree_node_get - find a btree node in the cache and lock it, reading it + * bch2_btree_node_get - find a btree node in the cache and lock it, reading it * in from disk if necessary. * + * @trans: btree transaction object + * @path: btree_path being traversed + * @k: pointer to btree node (generally KEY_TYPE_btree_ptr_v2) + * @level: level of btree node being looked up (0 == leaf node) + * @lock_type: SIX_LOCK_read or SIX_LOCK_intent + * @trace_ip: ip of caller of btree iterator code (i.e. caller of bch2_btree_iter_peek()) + * * The btree node will have either a read or a write lock held, depending on * the @write parameter. + * + * Returns: btree node or ERR_PTR() */ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path, const struct bkey_i *k, unsigned level, @@ -979,7 +988,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path * * relock it specifically: */ if (trans) { - int ret = bch2_trans_relock(trans) ?: + ret = bch2_trans_relock(trans) ?: bch2_btree_path_relock_intent(trans, path); if (ret) { BUG_ON(!trans->restarted); diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 3c8ffbbaef4f..9496ff16fc91 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -566,8 +566,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id struct bkey_s_c *k) { struct bch_fs *c = trans->c; - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(*k); - const union bch_extent_entry *entry; + struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(*k); + const union bch_extent_entry *entry_c; struct extent_ptr_decoded p = { 0 }; bool do_update = false; struct printbuf buf = PRINTBUF; @@ -577,10 +577,10 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id * XXX * use check_bucket_ref here */ - bkey_for_each_ptr_decode(k->k, ptrs, p, entry) { + bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) { struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev); struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); - enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr); + enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry_c->ptr); if (!g->gen_valid && (c->opts.reconstruct_alloc || @@ -1217,14 +1217,6 @@ static int bch2_gc_done(struct bch_fs *c, fsck_err(c, _msg ": got %llu, should be %llu" \ , ##__VA_ARGS__, dst->_f, src->_f))) \ dst->_f = src->_f -#define copy_stripe_field(_f, _msg, ...) \ - if (dst->_f != src->_f && \ - (!verify || \ - fsck_err(c, "stripe %zu has wrong "_msg \ - ": got %u, should be %u", \ - iter.pos, ##__VA_ARGS__, \ - dst->_f, src->_f))) \ - dst->_f = src->_f #define copy_dev_field(_f, _msg, ...) \ copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__) #define copy_fs_field(_f, _msg, ...) \ @@ -1776,6 +1768,12 @@ static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only) /** * bch2_gc - walk _all_ references to buckets, and recompute them: * + * @c: filesystem object + * @initial: are we in recovery? + * @metadata_only: are we just checking metadata references, or everything? + * + * Returns: 0 on success, or standard errcode on failure + * * Order matters here: * - Concurrent GC relies on the fact that we have a total ordering for * everything that GC walks - see gc_will_visit_node(), @@ -1985,11 +1983,9 @@ int bch2_gc_gens(struct bch_fs *c) for (i = 0; i < BTREE_ID_NR; i++) if (btree_type_has_ptrs(i)) { - struct btree_iter iter; - struct bkey_s_c k; - c->gc_gens_btree = i; c->gc_gens_pos = POS_MIN; + ret = for_each_btree_key_commit(&trans, iter, i, POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 00f53cb5d44b..9fa9ed641300 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -336,7 +336,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b, start_bset->journal_seq = cpu_to_le64(seq); if (sorting_entire_node) { - unsigned u64s = le16_to_cpu(out->keys.u64s); + u64s = le16_to_cpu(out->keys.u64s); BUG_ON(bytes != btree_bytes(c)); @@ -410,8 +410,6 @@ void bch2_btree_sort_into(struct bch_fs *c, bch2_verify_btree_nr_keys(dst); } -#define SORT_CRIT (4096 / sizeof(u64)) - /* * We're about to add another bset to the btree node, so if there's currently * too many bsets - sort some of them together: @@ -542,6 +540,7 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c, prt_str(out, ": "); } +__printf(8, 9) static int __btree_err(int ret, struct bch_fs *c, struct bch_dev *ca, @@ -622,9 +621,6 @@ __cold void bch2_btree_node_drop_keys_outside_node(struct btree *b) { struct bset_tree *t; - struct bkey_s_c k; - struct bkey unpacked; - struct btree_node_iter iter; for_each_bset(b, t) { struct bset *i = bset(b, t); @@ -660,6 +656,9 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b) bch2_bset_set_no_aux_tree(b, b->set); bch2_btree_build_aux_trees(b); + struct bkey_s_c k; + struct bkey unpacked; + struct btree_node_iter iter; for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { BUG_ON(bpos_lt(k.k->p, b->data->min_key)); BUG_ON(bpos_gt(k.k->p, b->data->max_key)); @@ -908,7 +907,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); unsigned u64s; - unsigned blacklisted_written, nonblacklisted_written = 0; unsigned ptr_written = btree_ptr_sectors_written(&b->key); struct printbuf buf = PRINTBUF; int ret = 0, retry_read = 0, write = READ; @@ -1042,8 +1040,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, sort_iter_add(iter, vstruct_idx(i, 0), vstruct_last(i)); - - nonblacklisted_written = b->written; } if (ptr_written) { @@ -1061,18 +1057,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, true), -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL, "found bset signature after last bset"); - - /* - * Blacklisted bsets are those that were written after the most recent - * (flush) journal write. Since there wasn't a flush, they may not have - * made it to all devices - which means we shouldn't write new bsets - * after them, as that could leave a gap and then reads from that device - * wouldn't find all the bsets in that btree node - which means it's - * important that we start writing new bsets after the most recent _non_ - * blacklisted bset: - */ - blacklisted_written = b->written; - b->written = nonblacklisted_written; } sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool); @@ -1140,9 +1124,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, btree_node_reset_sib_u64s(b); bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev); - if (ca->mi.state != BCH_MEMBER_STATE_rw) + if (ca2->mi.state != BCH_MEMBER_STATE_rw) set_btree_node_need_rewrite(b); } @@ -1224,19 +1208,17 @@ start: bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], rb->start_time); bio_put(&rb->bio); - printbuf_exit(&buf); if (saw_error && !btree_node_read_error(b)) { - struct printbuf buf = PRINTBUF; - + printbuf_reset(&buf); bch2_bpos_to_text(&buf, b->key.k.p); bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error", __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf); - printbuf_exit(&buf); bch2_btree_node_rewrite_async(c, b); } + printbuf_exit(&buf); clear_btree_node_read_in_flight(b); wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); } diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 8d089bbdb1e5..6c064e82c0c8 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -488,7 +488,6 @@ fixup_done: if (!bch2_btree_node_iter_end(node_iter) && iter_current_key_modified && b->c.level) { - struct bset_tree *t; struct bkey_packed *k, *k2, *p; k = bch2_btree_node_iter_peek_all(node_iter, b); @@ -2048,8 +2047,12 @@ out: } /** - * bch2_btree_iter_peek: returns first key greater than or equal to iterator's - * current position + * bch2_btree_iter_peek_upto() - returns first key greater than or equal to + * iterator's current position + * @iter: iterator to peek from + * @end: search limit: returns keys less than or equal to @end + * + * Returns: key if found, or an error extractable with bkey_err(). */ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end) { @@ -2186,10 +2189,13 @@ end: } /** - * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal - * to iterator's current position, returning keys from every level of the btree. - * For keys at different levels of the btree that compare equal, the key from - * the lower level (leaf) is returned first. + * bch2_btree_iter_peek_all_levels() - returns the first key greater than or + * equal to iterator's current position, returning keys from every level of the + * btree. For keys at different levels of the btree that compare equal, the key + * from the lower level (leaf) is returned first. + * @iter: iterator to peek from + * + * Returns: key if found, or an error extractable with bkey_err(). */ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) { @@ -2280,8 +2286,11 @@ out_no_locked: } /** - * bch2_btree_iter_next: returns first key greater than iterator's current + * bch2_btree_iter_next() - returns first key greater than iterator's current * position + * @iter: iterator to peek from + * + * Returns: key if found, or an error extractable with bkey_err(). */ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) { @@ -2292,8 +2301,11 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) } /** - * bch2_btree_iter_peek_prev: returns first key less than or equal to + * bch2_btree_iter_peek_prev() - returns first key less than or equal to * iterator's current position + * @iter: iterator to peek from + * + * Returns: key if found, or an error extractable with bkey_err(). |
