diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-13 13:03:54 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-13 13:03:54 -0700 |
| commit | 0c9f4ac808b017a0013cee92a30de980550145d5 (patch) | |
| tree | 94eedbb9ef4815df9dc8d1dd6424fc92a2fbcd7a /drivers/md | |
| parent | 9961a785944601e32f185ea696347b22ffda634c (diff) | |
| parent | a3166c51702bb00b8f8b84022090cbab8f37be1a (diff) | |
| download | linux-0c9f4ac808b017a0013cee92a30de980550145d5.tar.gz linux-0c9f4ac808b017a0013cee92a30de980550145d5.tar.bz2 linux-0c9f4ac808b017a0013cee92a30de980550145d5.zip | |
Merge tag 'for-6.10/block-20240511' of git://git.kernel.dk/linux
Pull block updates from Jens Axboe:
- Add a partscan attribute in sysfs, fixing an issue with systemd
relying on an internal interface that went away.
- Attempt #2 at making long running discards interruptible. The
previous attempt went into 6.9, but we ended up mostly reverting it
as it had issues.
- Remove old ida_simple API in bcache
- Support for zoned write plugging, greatly improving the performance
on zoned devices.
- Remove the old throttle low interface, which has been experimental
since 2017 and never made it beyond that and isn't being used.
- Remove page->index debugging checks in brd, as it hasn't caught
anything and prepares us for removing in struct page.
- MD pull request from Song
- Don't schedule block workers on isolated CPUs
* tag 'for-6.10/block-20240511' of git://git.kernel.dk/linux: (84 commits)
blk-throttle: delay initialization until configuration
blk-throttle: remove CONFIG_BLK_DEV_THROTTLING_LOW
block: fix that util can be greater than 100%
block: support to account io_ticks precisely
block: add plug while submitting IO
bcache: fix variable length array abuse in btree_iter
bcache: Remove usage of the deprecated ida_simple_xx() API
md: Revert "md: Fix overflow in is_mddev_idle"
blk-lib: check for kill signal in ioctl BLKDISCARD
block: add a bio_await_chain helper
block: add a blk_alloc_discard_bio helper
block: add a bio_chain_and_submit helper
block: move discard checks into the ioctl handler
block: remove the discard_granularity check in __blkdev_issue_discard
block/ioctl: prefer different overflow check
null_blk: Fix the WARNING: modpost: missing MODULE_DESCRIPTION()
block: fix and simplify blkdevparts= cmdline parsing
block: refine the EOF check in blkdev_iomap_begin
block: add a partscan sysfs attribute for disks
block: add a disk_has_partscan helper
...
Diffstat (limited to 'drivers/md')
| -rw-r--r-- | drivers/md/bcache/bset.c | 44 | ||||
| -rw-r--r-- | drivers/md/bcache/bset.h | 28 | ||||
| -rw-r--r-- | drivers/md/bcache/btree.c | 40 | ||||
| -rw-r--r-- | drivers/md/bcache/super.c | 15 | ||||
| -rw-r--r-- | drivers/md/bcache/sysfs.c | 2 | ||||
| -rw-r--r-- | drivers/md/bcache/writeback.c | 10 | ||||
| -rw-r--r-- | drivers/md/dm-bio-prison-v2.c | 3 | ||||
| -rw-r--r-- | drivers/md/dm-cache-target.c | 12 | ||||
| -rw-r--r-- | drivers/md/dm-clone-target.c | 14 | ||||
| -rw-r--r-- | drivers/md/dm-core.h | 2 | ||||
| -rw-r--r-- | drivers/md/dm-era-target.c | 3 | ||||
| -rw-r--r-- | drivers/md/dm-mpath.c | 3 | ||||
| -rw-r--r-- | drivers/md/dm-table.c | 3 | ||||
| -rw-r--r-- | drivers/md/dm-thin.c | 12 | ||||
| -rw-r--r-- | drivers/md/dm-vdo/data-vio.c | 3 | ||||
| -rw-r--r-- | drivers/md/dm-vdo/flush.c | 3 | ||||
| -rw-r--r-- | drivers/md/dm-zone.c | 501 | ||||
| -rw-r--r-- | drivers/md/dm.c | 72 | ||||
| -rw-r--r-- | drivers/md/dm.h | 2 | ||||
| -rw-r--r-- | drivers/md/md-bitmap.c | 6 | ||||
| -rw-r--r-- | drivers/md/md.c | 7 | ||||
| -rw-r--r-- | drivers/md/md.h | 3 | ||||
| -rw-r--r-- | drivers/md/raid5.c | 15 |
23 files changed, 242 insertions, 561 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 2bba4d6aaaa2..463eb13bd0b2 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -54,7 +54,7 @@ void bch_dump_bucket(struct btree_keys *b) int __bch_count_data(struct btree_keys *b) { unsigned int ret = 0; - struct btree_iter iter; + struct btree_iter_stack iter; struct bkey *k; if (b->ops->is_extents) @@ -67,7 +67,7 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) { va_list args; struct bkey *k, *p = NULL; - struct btree_iter iter; + struct btree_iter_stack iter; const char *err; for_each_key(b, k, &iter) { @@ -879,7 +879,7 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, unsigned int status = BTREE_INSERT_STATUS_NO_INSERT; struct bset *i = bset_tree_last(b)->data; struct bkey *m, *prev = NULL; - struct btree_iter iter; + struct btree_iter_stack iter; struct bkey preceding_key_on_stack = ZERO_KEY; struct bkey *preceding_key_p = &preceding_key_on_stack; @@ -895,9 +895,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, else preceding_key(k, &preceding_key_p); - m = bch_btree_iter_init(b, &iter, preceding_key_p); + m = bch_btree_iter_stack_init(b, &iter, preceding_key_p); - if (b->ops->insert_fixup(b, k, &iter, replace_key)) + if (b->ops->insert_fixup(b, k, &iter.iter, replace_key)) return status; status = BTREE_INSERT_STATUS_INSERT; @@ -1100,33 +1100,33 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, btree_iter_cmp)); } -static struct bkey *__bch_btree_iter_init(struct btree_keys *b, - struct btree_iter *iter, - struct bkey *search, - struct bset_tree *start) +static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b, + struct btree_iter_stack *iter, + struct bkey *search, + struct bset_tree *start) { struct bkey *ret = NULL; - iter->size = ARRAY_SIZE(iter->data); - iter->used = 0; + iter->iter.size = ARRAY_SIZE(iter->stack_data); + iter->iter.used = 0; #ifdef CONFIG_BCACHE_DEBUG - iter->b = b; + iter->iter.b = b; #endif for (; start <= bset_tree_last(b); start++) { ret = bch_bset_search(b, start, search); - bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); + bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data)); } return ret; } -struct bkey *bch_btree_iter_init(struct btree_keys *b, - struct btree_iter *iter, +struct bkey *bch_btree_iter_stack_init(struct btree_keys *b, + struct btree_iter_stack *iter, struct bkey *search) { - return __bch_btree_iter_init(b, iter, search, b->set); + return __bch_btree_iter_stack_init(b, iter, search, b->set); } static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, @@ -1293,10 +1293,10 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, struct bset_sort_state *state) { size_t order = b->page_order, keys = 0; - struct btree_iter iter; + struct btree_iter_stack iter; int oldsize = bch_count_data(b); - __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); + __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]); if (start) { unsigned int i; @@ -1307,7 +1307,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, order = get_order(__set_bytes(b->set->data, keys)); } - __btree_sort(b, &iter, start, order, false, state); + __btree_sort(b, &iter.iter, start, order, false, state); EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize); } @@ -1323,11 +1323,11 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, struct bset_sort_state *state) { uint64_t start_time = local_clock(); - struct btree_iter iter; + struct btree_iter_stack iter; - bch_btree_iter_init(b, &iter, NULL); + bch_btree_iter_stack_init(b, &iter, NULL); - btree_mergesort(b, new->set->data, &iter, false, true); + btree_mergesort(b, new->set->data, &iter.iter, false, true); bch_time_stats_update(&state->time, start_time); diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index d795c84246b0..011f6062c4c0 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -321,7 +321,14 @@ struct btree_iter { #endif struct btree_iter_set { struct bkey *k, *end; - } data[MAX_BSETS]; + } data[]; +}; + +/* Fixed-size btree_iter that can be allocated on the stack */ + +struct btree_iter_stack { + struct btree_iter iter; + struct btree_iter_set stack_data[MAX_BSETS]; }; typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k); @@ -333,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, struct bkey *end); -struct bkey *bch_btree_iter_init(struct btree_keys *b, - struct btree_iter *iter, - struct bkey *search); +struct bkey *bch_btree_iter_stack_init(struct btree_keys *b, + struct btree_iter_stack *iter, + struct bkey *search); struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, const struct bkey *search); @@ -350,13 +357,14 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b, return search ? __bch_bset_search(b, t, search) : t->data->start; } -#define for_each_key_filter(b, k, iter, filter) \ - for (bch_btree_iter_init((b), (iter), NULL); \ - ((k) = bch_btree_iter_next_filter((iter), (b), filter));) +#define for_each_key_filter(b, k, stack_iter, filter) \ + for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \ + ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \ + filter));) -#define for_each_key(b, k, iter) \ - for (bch_btree_iter_init((b), (iter), NULL); \ - ((k) = bch_btree_iter_next(iter));) +#define for_each_key(b, k, stack_iter) \ + for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \ + ((k) = bch_btree_iter_next(&((stack_iter)->iter)));) /* Sorting */ diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 196cdacce38f..d011a7154d33 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1309,7 +1309,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) uint8_t stale = 0; unsigned int keys = 0, good_keys = 0; struct bkey *k; - struct btree_iter iter; + struct btree_iter_stack iter; struct bset_tree *t; gc->nodes++; @@ -1570,7 +1570,7 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, static unsigned int btree_gc_count_keys(struct btree *b) { struct bkey *k; - struct btree_iter iter; + struct btree_iter_stack iter; unsigned int ret = 0; for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) @@ -1611,17 +1611,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, int ret = 0; bool should_rewrite; struct bkey *k; - struct btree_iter iter; + struct btree_iter_stack iter; struct gc_merge_info r[GC_MERGE_NODES]; struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; - bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); + bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done); for (i = r; i < r + ARRAY_SIZE(r); i++) i->b = ERR_PTR(-EINTR); while (1) { - k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); + k = bch_btree_iter_next_filter(&iter.iter, &b->keys, + bch_ptr_bad); if (k) { r->b = bch_btree_node_get(b->c, op, k, b->level - 1, true, b); @@ -1911,7 +1912,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) { int ret = 0; struct bkey *k, *p = NULL; - struct btree_iter iter; + struct btree_iter_stack iter; for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) bch_initial_mark_key(b->c, b->level, k); @@ -1919,10 +1920,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) bch_initial_mark_key(b->c, b->level + 1, &b->key); if (b->level) { - bch_btree_iter_init(&b->keys, &iter, NULL); + bch_btree_iter_stack_init(&b->keys, &iter, NULL); do { - k = bch_btree_iter_next_filter(&iter, &b->keys, + k = bch_btree_iter_next_filter(&iter.iter, &b->keys, bch_ptr_bad); if (k) { btree_node_prefetch(b, k); @@ -1950,7 +1951,7 @@ static int bch_btree_check_thread(void *arg) struct btree_check_info *info = arg; struct btree_check_state *check_state = info->state; struct cache_set *c = check_state->c; - struct btree_iter iter; + struct btree_iter_stack iter; struct bkey *k, *p; int cur_idx, prev_idx, skip_nr; @@ -1959,8 +1960,8 @@ static int bch_btree_check_thread(void *arg) ret = 0; /* root node keys are checked before thread created */ - bch_btree_iter_init(&c->root->keys, &iter, NULL); - k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); + bch_btree_iter_stack_init(&c->root->keys, &iter, NULL); + k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); BUG_ON(!k); p = k; @@ -1978,7 +1979,7 @@ static int bch_btree_check_thread(void *arg) skip_nr = cur_idx - prev_idx; while (skip_nr) { - k = bch_btree_iter_next_filter(&iter, + k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); if (k) @@ -2051,7 +2052,7 @@ int bch_btree_check(struct cache_set *c) int ret = 0; int i; struct bkey *k = NULL; - struct btree_iter iter; + struct btree_iter_stack iter; struct btree_check_state check_state; /* check and mark root node keys */ @@ -2547,11 +2548,11 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, if (b->level) { struct bkey *k; - struct btree_iter iter; + struct btree_iter_stack iter; - bch_btree_iter_init(&b->keys, &iter, from); + bch_btree_iter_stack_init(&b->keys, &iter, from); - while ((k = bch_btree_iter_next_filter(&iter, &b->keys, + while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, bch_ptr_bad))) { ret = bcache_btree(map_nodes_recurse, k, b, op, from, fn, flags); @@ -2580,11 +2581,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, { int ret = MAP_CONTINUE; struct bkey *k; - struct btree_iter iter; + struct btree_iter_stack iter; - bch_btree_iter_init(&b->keys, &iter, from); + bch_btree_iter_stack_init(&b->keys, &iter, from); - while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { + while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, + bch_ptr_bad))) { ret = !b->level ? fn(op, b, k) : bcache_btree(map_keys_recurse, k, diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 330bcd9ea4a9..cba09660148a 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -881,8 +881,8 @@ static void bcache_device_free(struct bcache_device *d) bcache_device_detach(d); if (disk) { - ida_simple_remove(&bcache_device_idx, - first_minor_to_idx(disk->first_minor)); + ida_free(&bcache_device_idx, + first_minor_to_idx(disk->first_minor)); put_disk(disk); } @@ -940,8 +940,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, if (!d->full_dirty_stripes) goto out_free_stripe_sectors_dirty; - idx = ida_simple_get(&bcache_device_idx, 0, - BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); + idx = ida_alloc_max(&bcache_device_idx, BCACHE_DEVICE_IDX_MAX - 1, + GFP_KERNEL); if (idx < 0) goto out_free_full_dirty_stripes; @@ -986,7 +986,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, out_bioset_exit: bioset_exit(&d->bio_split); out_ida_remove: - ida_simple_remove(&bcache_device_idx, idx); + ida_free(&bcache_device_idx, idx); out_free_full_dirty_stripes: kvfree(d->full_dirty_stripes); out_free_stripe_sectors_dirty: @@ -1914,8 +1914,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) INIT_LIST_HEAD(&c->btree_cache_freed); INIT_LIST_HEAD(&c->data_buckets); - iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) * - sizeof(struct btree_iter_set); + iter_size = sizeof(struct btree_iter) + + ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) * + sizeof(struct btree_iter_set); c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL); if (!c->devices) diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 6956beb55326..826b14cae4e5 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -660,7 +660,7 @@ static unsigned int bch_root_usage(struct cache_set *c) unsigned int bytes = 0; struct bkey *k; struct btree *b; - struct btree_iter iter; + struct btree_iter_stack iter; goto lock_root; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 8827a6f130ad..792e070ccf38 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -908,15 +908,15 @@ static int bch_dirty_init_thread(void *arg) struct dirty_init_thrd_info *info = arg; struct bch_dirty_init_state *state = info->state; struct cache_set *c = state->c; - struct btree_iter iter; + struct btree_iter_stack iter; struct bkey *k, *p; int cur_idx, prev_idx, skip_nr; k = p = NULL; prev_idx = 0; - bch_btree_iter_init(&c->root->keys, &iter, NULL); - k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); + bch_btree_iter_stack_init(&c->root->keys, &iter, NULL); + k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); BUG_ON(!k); p = k; @@ -930,7 +930,7 @@ static int bch_dirty_init_thread(void *arg) skip_nr = cur_idx - prev_idx; while (skip_nr) { - k = bch_btree_iter_next_filter(&iter, + k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); if (k) @@ -979,7 +979,7 @@ void bch_sectors_dirty_init(struct bcache_device *d) int i; struct btree *b = NULL; struct bkey *k = NULL; - struct btree_iter iter; + struct btree_iter_stack iter; struct sectors_dirty_init op; struct cache_set *c = d->c; struct bch_dirty_init_state state; diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c index fd852981ef9c..cf433b0cf742 100644 --- a/drivers/md/dm-bio-prison-v2.c +++ b/drivers/md/dm-bio-prison-v2.c @@ -321,8 +321,7 @@ static bool __unlock(struct dm_bio_prison_v2 *prison, { BUG_ON(!cell->exclusive_lock); - bio_list_merge(bios, &cell->bios); - bio_list_init(&cell->bios); + bio_list_merge_init(bios, &cell->bios); if (cell->shared_count) { cell->exclusive_lock = false; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 911f73f7ebba..0fcbf8603846 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -115,8 +115,7 @@ static void __commit(struct work_struct *_ws) */ spin_lock_irq(&b->lock); list_splice_init(&b->work_items, &work_items); - bio_list_merge(&bios, &b->bios); - bio_list_init(&b->bios); + bio_list_merge_init(&bios, &b->bios); b->commit_scheduled = false; spin_unlock_irq(&b->lock); @@ -565,8 +564,7 @@ static void defer_bio(struct cache *cache, struct bio *bio) static void defer_bios(struct cache *cache, struct bio_list *bios) { spin_lock_irq(&cache->lock); - bio_list_merge(&cache->deferred_bios, bios); - bio_list_init(bios); + bio_list_merge_init(&cache->deferred_bios, bios); spin_unlock_irq(&cache->lock); wake_deferred_bio_worker(cache); @@ -1816,8 +1814,7 @@ static void process_deferred_bios(struct work_struct *ws) bio_list_init(&bios); spin_lock_irq(&cache->lock); - bio_list_merge(&bios, &cache->deferred_bios); - bio_list_init(&cache->deferred_bios); + bio_list_merge_init(&bios, &cache->deferred_bios); spin_unlock_irq(&cache->lock); while ((bio = bio_list_pop(&bios))) { @@ -1847,8 +1844,7 @@ static void requeue_deferred_bios(struct cache *cache) struct bio_list bios; bio_list_init(&bios); - bio_list_merge(&bios, &cache->deferred_bios); - bio_list_init(&cache->deferred_bios); + bio_list_merge_init(&bios, &cache->deferred_bios); while ((bio = bio_list_pop(&bios))) { bio->bi_status = BLK_STS_DM_REQUEUE; diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 94b2fc33f64b..3f68672ab7c9 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -1181,8 +1181,7 @@ static void process_deferred_discards(struct clone *clone) struct bio_list discards = BIO_EMPTY_LIST; spin_lock_irq(&clone->lock); - bio_list_merge(&discards, &clone->deferred_discard_bios); - bio_list_init(&clone->deferred_discard_bios); + bio_list_merge_init(&discards, &clone->deferred_discard_bios); spin_unlock_irq(&clone->lock); if (bio_list_empty(&discards)) @@ -1215,8 +1214,7 @@ static void process_deferred_bios(struct clone *clone) struct bio_list bios = BIO_EMPTY_LIST; spin_lock_irq(&clone->lock); - bio_list_merge(&bios, &clone->deferred_bios); - bio_list_init(&clone->deferred_bios); + bio_list_merge_init(&bios, &clone->deferred_bios); spin_unlock_irq(&clone->lock); if (bio_list_empty(&bios)) @@ -1237,11 +1235,9 @@ static void process_deferred_flush_bios(struct clone *clone) * before issuing them or signaling their completion. */ spin_lock_irq(&clone->lock); - bio_list_merge(&bios, &clone->deferred_flush_bios); - bio_list_init(&clone->deferred_flush_bios); - - bio_list_merge(&bio_completions, &clone->deferred_flush_completions); - bio_list_init(&clone->deferred_flush_completions); + bio_list_merge_init(&bios, &clone->deferred_flush_bios); + bio_list_merge_init(&bio_completions, + &clone->deferred_flush_completions); spin_unlock_irq(&clone->lock); if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index e6757a30dcca..08700bfc3e23 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -140,7 +140,7 @@ struct mapped_device { #ifdef CONFIG_BLK_DEV_ZONED unsigned int nr_zones; - unsigned int *zwp_offset; + void *zone_revalidate_map; #endif #ifdef CONFIG_IMA diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 6acfa5bf97a4..8f81e597858d 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1272,8 +1272,7 @@ static void process_deferred_bios(struct era *era) bio_list_init(&marked_bios); spin_lock(&era->deferred_lock); - bio_list_merge(&deferred_bios, &era->deferred_bios); - bio_list_init(&era->deferred_bios); + bio_list_merge_init(&deferred_bios, &era->deferred_bios); spin_unlock(&era->deferred_lock); if (bio_list_empty(&deferred_bios)) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 05d1328d1811..15b681b90153 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -704,8 +704,7 @@ static void process_queued_bios(struct work_struct *work) return; } - bio_list_merge(&bios, &m->queued_bios); - bio_list_init(&m->queued_bios); + bio_list_merge_init(&bios, &m->queued_bios); spin_unlock_irqrestore(&m->lock, flags); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 41f1d731ae5a..2c6fbd87363f 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -2042,7 +2042,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, r = dm_set_zones_restrictions(t, q); if (r) return r; - if (!static_key_enabled(&zoned_enabled.key)) + if (blk_queue_is_zoned(q) && + !static_key_enabled(&zoned_enabled.key)) static_branch_enable(&zoned_enabled); } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 4793ad2aa1f7..f359984c8ef2 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -592,12 +592,6 @@ struct dm_thin_endio_hook { struct dm_bio_prison_cell *cell; }; -static void __merge_bio_list(struct bio_list *bios, struct bio_list *master) -{ - bio_list_merge(bios, master); - bio_list_init(master); -} - static void error_bio_list(struct bio_list *bios, blk_status_t error) { struct bio *bio; @@ -616,7 +610,7 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, bio_list_init(&bios); spin_lock_irq(&tc->lock); - __merge_bio_list(&bios, master); + bio_list_merge_init(&bios, master); spin_unlock_irq(&tc->lock); error_bio_list(&bios, error); @@ -645,8 +639,8 @@ static void requeue_io(struct thin_c *tc) bio_list_init(&bios); spin_lock_irq(&tc->lock); - __merge_bio_list(&bios, &tc->deferred_bio_list); - __merge_bio_list(&bios, &tc->retry_on_resume_list); + bio_list_merge_init(&bios, &tc->deferred_bio_list); + bio_list_merge_init(&bios, &tc->retry_on_resume_list); spin_unlock_irq(&tc->lock); error_bio_list(&bios, BLK_STS_DM_REQUEUE); diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index 94f6f1ccfb7d..ab3ea8337809 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -604,8 +604,7 @@ static void assign_discard_permit(struct limiter *limiter) static void get_waiters(struct limiter *limiter) { - bio_list_merge(&limiter->waiters, &limiter->new_waiters); - bio_list_init(&limiter->new_waiters); + bio_list_merge_init(&limiter->waiters, &limiter->new_waiters); } static inline struct data_vio *get_available_data_vio(struct data_vio_pool *pool) diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c index 57e87f0d7069..dd4fdee2ca0c 100644 --- a/drivers/md/dm-vdo/flush.c +++ b/drivers/md/dm-vdo/flush.c @@ -369,8 +369,7 @@ void vdo_dump_flusher(const struct flusher *flusher) static void initialize_flush(struct vdo_flush *flush, struct vdo *vdo) { bio_list_init(&flush->bios); - bio_list_merge(&flush->bios, &vdo->flusher->waiting_flush_bios); - bio_list_init(&vdo->flusher->waiting_flush_bios); + bio_list_merge_init(&flush->bios, &vdo->flusher->waiting_flush_bios); } static void launch_flush(struct vdo_flush *flush) diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index eb9832b22b14..8e6bcb0d786a 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -60,16 +60,23 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector, struct dm_table *map; int srcu_idx, ret; - if (dm_suspended_md(md)) - return -EAGAIN; + if (!md->zone_revalidate_map) { + /* Regular user context */ + if (dm_suspended_md(md)) + return -EAGAIN; - map = dm_get_live_table(md, &srcu_idx); - if (!map) - return -EIO; + map = dm_get_live_table(md, &srcu_idx); + if (!map) + return -EIO; + } else { + /* Zone revalidation during __bind() */ + map = md->zone_revalidate_map; + } ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data); - dm_put_live_table(md, srcu_idx); + if (!md->zone_revalidate_map) + dm_put_live_table(md, srcu_idx); return ret; } @@ -138,80 +145,47 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) } } -void dm_cleanup_zoned_dev(struct mapped_device *md) +/* + * Count conventional zones of a mapped zoned device. If the device + * only has conventional zones, do not expose it as zoned. + */ +static int dm_check_zoned_cb(struct blk_zone *zone, unsigned int idx, + void *data) { - if (md->disk) { - bitmap_free(md->disk->conv_zones_bitmap); - md->disk->conv_zones_bitmap = NULL; - bitmap_free(md->disk->seq_zones_wlock); - md->disk->seq_zones_wlock = NULL; - } + unsigned int *nr_conv_zones = data; - kvfree(md->zwp_offset); - md->zwp_offset = NULL; - md->nr_zones = 0; -} + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) + (*nr_conv_zones)++; -static unsigned int dm_get_zone_wp_offset(struct blk_zone *zone) -{ - switch (zone->cond) { - case BLK_ZONE_COND_IMP_OPEN: - case BLK_ZONE_COND_EXP_OPEN: - case BLK_ZONE_COND_CLOSED: - return zone->wp - zone->start; - case BLK_ZONE_COND_FULL: - return zone->len; - case BLK_ZONE_COND_EMPTY: - case BLK_ZONE_COND_NOT_WP: - case BLK_ZONE_COND_OFFLINE: - case BLK_ZONE_COND_READONLY: - default: - /* - * Conventional, offline and read-only zones do not have a valid - * write pointer. Use 0 as for an empty zone. - */ - return 0; - } + return 0; } -static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx, - void *data) +static int dm_check_zoned(struct mapped_device *md, struct dm_table *t) { - struct mapped_device *md = data; struct gendisk *disk = md->disk; + unsigned int nr_conv_zones = 0; + int ret; - switch (zone->type) { - case BLK_ZONE_TYPE_CONVENTIONAL: - if (!disk->conv_zones_bitmap) { - disk->conv_zones_bitmap = bitmap_zalloc(disk->nr_zones, - GFP_NOIO); - if (!disk->conv_zones_bitmap) - return -ENOMEM; - } - set_bit(idx, disk->conv_zones_bitmap); - break; - case BLK_ZONE_TYPE_SEQWRITE_REQ: - case BLK_ZONE_TYPE_SEQWRITE_PREF: - if (!disk->seq_zones_wlock) { - disk->seq_zones_wlock = bitmap_zalloc(disk->nr_zones, - GFP_NOIO); - if (!disk->seq_zones_wlock) - return -ENOMEM; - } - if (!md->zwp_offset) { - md->zwp_offset = - kvcalloc(disk->nr_zones, sizeof(unsigned int), - GFP_KERNEL); - if (!md->zwp_offset) - return -ENOMEM; - } - md->zwp_offset[idx] = dm_get_zone_wp_offset(zone); - - break; - default: - DMERR("Invalid zone type 0x%x at sectors %llu", - (int)zone->type, zone->start); - return -ENODEV; + /* Count conventional zones */ + md->zone_revalidate_map = t; + ret = dm_blk_report_zones(disk, 0, UINT_MAX, + dm_check_zoned_cb, &nr_conv_z |
