summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/biodoc.txt4
-rw-r--r--MAINTAINERS1
-rw-r--r--block/Kconfig1
-rw-r--r--block/bio.c5
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-mq-sysfs.c40
-rw-r--r--block/blk-mq-tag.c503
-rw-r--r--block/blk-mq-tag.h42
-rw-r--r--block/blk-mq.c183
-rw-r--r--block/blk-mq.h11
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/cfq-iosched.c13
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/nbd.c411
-rw-r--r--drivers/block/null_blk.c128
-rw-r--r--drivers/lightnvm/Kconfig2
-rw-r--r--drivers/lightnvm/Makefile2
-rw-r--r--drivers/lightnvm/core.c55
-rw-r--r--drivers/lightnvm/lightnvm.h35
-rw-r--r--drivers/lightnvm/sysfs.c198
-rw-r--r--drivers/md/bcache/btree.c6
-rw-r--r--drivers/md/bcache/debug.c6
-rw-r--r--drivers/md/bcache/movinggc.c5
-rw-r--r--drivers/md/bcache/request.c9
-rw-r--r--drivers/md/bcache/writeback.c5
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-log-writes.c6
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/nvme/host/core.c155
-rw-r--r--drivers/nvme/host/fabrics.c25
-rw-r--r--drivers/nvme/host/fabrics.h11
-rw-r--r--drivers/nvme/host/lightnvm.c33
-rw-r--r--drivers/nvme/host/nvme.h30
-rw-r--r--drivers/nvme/host/scsi.c80
-rw-r--r--drivers/nvme/target/admin-cmd.c88
-rw-r--r--drivers/nvme/target/io-cmd.c3
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/block_dev.c18
-rw-r--r--fs/btrfs/inode.c5
-rw-r--r--include/linux/bio.h3
-rw-r--r--include/linux/blk-cgroup.h2
-rw-r--r--include/linux/blk-mq.h28
-rw-r--r--include/linux/blk_types.h21
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/ioprio.h1
-rw-r--r--include/linux/lightnvm.h18
-rw-r--r--include/linux/sbitmap.h373
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--kernel/workqueue.c40
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/sbitmap.c347
53 files changed, 1828 insertions, 1170 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index bcdb2b4c1f12..918e1e0d0e78 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -115,7 +115,7 @@ i. Per-queue limits/values exported to the generic layer by the driver
Various parameters that the generic i/o scheduler logic uses are set at
a per-queue level (e.g maximum request size, maximum number of segments in
-a scatter-gather list, hardsect size)
+a scatter-gather list, logical block size)
Some parameters that were earlier available as global arrays indexed by
major/minor are now directly associated with the queue. Some of these may
@@ -156,7 +156,7 @@ Some new queue property settings:
blk_queue_max_segment_size(q, max_seg_size)
Maximum size of a clustered segment, 64kB default.
- blk_queue_hardsect_size(q, hardsect_size)
+ blk_queue_logical_block_size(q, logical_block_size)
Lowest possible sector size that the hardware can operate
on, 512 bytes default.
diff --git a/MAINTAINERS b/MAINTAINERS
index 255655880881..fb4d381a6ecf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2472,6 +2472,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: block/
F: kernel/trace/blktrace.c
+F: lib/sbitmap.c
BLOCK2MTD DRIVER
M: Joern Engel <joern@lazybastard.org>
diff --git a/block/Kconfig b/block/Kconfig
index 161491d0a879..5136ad4bb6d5 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -4,6 +4,7 @@
menuconfig BLOCK
bool "Enable the block layer" if EXPERT
default y
+ select SBITMAP
help
Provide block layer support for the kernel.
diff --git a/block/bio.c b/block/bio.c
index aa7354088008..db85c5753a76 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1068,7 +1068,7 @@ static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
return 0;
}
-static void bio_free_pages(struct bio *bio)
+void bio_free_pages(struct bio *bio)
{
struct bio_vec *bvec;
int i;
@@ -1076,6 +1076,7 @@ static void bio_free_pages(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i)
__free_page(bvec->bv_page);
}
+EXPORT_SYMBOL(bio_free_pages);
/**
* bio_uncopy_user - finish previously mapped bio
@@ -1274,7 +1275,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
nr_pages += end - start;
/*
- * buffer must be aligned to at least hardsector size for now
+ * buffer must be aligned to at least logical block size for now
*/
if (uaddr & queue_dma_alignment(q))
return ERR_PTR(-EINVAL);
diff --git a/block/blk-core.c b/block/blk-core.c
index 36c7ac328d8c..14d7c0740dc0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -288,7 +288,7 @@ void blk_sync_queue(struct request_queue *q)
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- cancel_delayed_work_sync(&hctx->run_work);
+ cancel_work_sync(&hctx->run_work);
cancel_delayed_work_sync(&hctx->delay_work);
}
} else {
@@ -3097,6 +3097,12 @@ int kblockd_schedule_work(struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
+int kblockd_schedule_work_on(int cpu, struct work_struct *work)
+{
+ return queue_work_on(cpu, kblockd_workqueue, work);
+}
+EXPORT_SYMBOL(kblockd_schedule_work_on);
+
int kblockd_schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
@@ -3301,19 +3307,23 @@ bool blk_poll(struct request_queue *q, blk_qc_t cookie)
{
struct blk_plug *plug;
long state;
+ unsigned int queue_num;
+ struct blk_mq_hw_ctx *hctx;
if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return false;
+ queue_num = blk_qc_t_to_queue_num(cookie);
+ hctx = q->queue_hw_ctx[queue_num];
+ hctx->poll_considered++;
+
plug = current->plug;
if (plug)
blk_flush_plug_list(plug, false);
state = current->state;
while (!need_resched()) {
- unsigned int queue_num = blk_qc_t_to_queue_num(cookie);
- struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num];
int ret;
hctx->poll_invoked++;
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index fe822aa5b8e4..01fb455d3377 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -176,7 +176,17 @@ static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
{
- return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success);
+ return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
+ hctx->poll_considered, hctx->poll_invoked,
+ hctx->poll_success);
+}
+
+static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
+ const char *page, size_t size)
+{
+ hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
+
+ return size;
}
static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
@@ -198,12 +208,14 @@ static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
- for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
- unsigned long d = 1U << (i - 1);
+ for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
+ unsigned int d = 1U << (i - 1);
- page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
+ page += sprintf(page, "%8u\t%lu\n", d, hctx->dispatched[i]);
}
+ page += sprintf(page, "%8u+\t%lu\n", 1U << (i - 1),
+ hctx->dispatched[i]);
return page - start_page;
}
@@ -301,8 +313,9 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
.show = blk_mq_hw_sysfs_cpus_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
- .attr = {.name = "io_poll", .mode = S_IRUGO },
+ .attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
.show = blk_mq_hw_sysfs_poll_show,
+ .store = blk_mq_hw_sysfs_poll_store,
};
static struct attribute *default_hw_ctx_attrs[] = {
@@ -380,9 +393,8 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret;
}
-static void __blk_mq_unregister_disk(struct gendisk *disk)
+static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
- struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
int i, j;
@@ -400,15 +412,15 @@ static void __blk_mq_unregister_disk(struct gendisk *disk)
kobject_del(&q->mq_kobj);
kobject_put(&q->mq_kobj);
- kobject_put(&disk_to_dev(disk)->kobj);
+ kobject_put(&dev->kobj);
q->mq_sysfs_init_done = false;
}
-void blk_mq_unregister_disk(struct gendisk *disk)
+void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
blk_mq_disable_hotplug();
- __blk_mq_unregister_disk(disk);
+ __blk_mq_unregister_dev(dev, q);
blk_mq_enable_hotplug();
}
@@ -430,10 +442,8 @@ static void blk_mq_sysfs_init(struct request_queue *q)
}
}
-int blk_mq_register_disk(struct gendisk *disk)
+int blk_mq_register_dev(struct device *dev, struct request_queue *q)
{
- struct device *dev = disk_to_dev(disk);
- struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx;
int ret, i;
@@ -454,7 +464,7 @@ int blk_mq_register_disk(struct gendisk *disk)
}
if (ret)
- __blk_mq_unregister_disk(disk);
+ __blk_mq_unregister_dev(dev, q);
else
q->mq_sysfs_init_done = true;
out:
@@ -462,7 +472,7 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(blk_mq_register_disk);
+EXPORT_SYMBOL_GPL(blk_mq_register_dev);
void blk_mq_sysfs_unregister(struct request_queue *q)
{
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 729bac3a673b..cef618f6fc92 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -1,58 +1,24 @@
/*
- * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
- * over multiple cachelines to avoid ping-pong between multiple submitters
- * or submitter and completer. Uses rolling wakeups to avoid falling of
- * the scaling cliff when we run out of tags and have to start putting
- * submitters to sleep.
- *
- * Uses active queue tracking to support fairer distribution of tags
- * between multiple submitters when a shared tag map is used.
+ * Tag allocation using scalable bitmaps. Uses active queue tracking to support
+ * fairer distribution of tags between multiple submitters when a shared tag map
+ * is used.
*
* Copyright (C) 2013-2014 Jens Axboe
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/random.h>
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
-static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
-{
- int i;
-
- for (i = 0; i < bt->map_nr; i++) {
- struct blk_align_bitmap *bm = &bt->map[i];
- int ret;
-
- ret = find_first_zero_bit(&bm->word, bm->depth);
- if (ret < bm->depth)
- return true;
- }
-
- return false;
-}
-
bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
{
if (!tags)
return true;
- return bt_has_free_tags(&tags->bitmap_tags);
-}
-
-static inline int bt_index_inc(int index)
-{
- return (index + 1) & (BT_WAIT_QUEUES - 1);
-}
-
-static inline void bt_index_atomic_inc(atomic_t *index)
-{
- int old = atomic_read(index);
- int new = bt_index_inc(old);
- atomic_cmpxchg(index, old, new);
+ return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
}
/*
@@ -72,29 +38,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
*/
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
{
- struct blk_mq_bitmap_tags *bt;
- int i, wake_index;
-
- /*
- * Make sure all changes prior to this are visible from other CPUs.
- */
- smp_mb();
- bt = &tags->bitmap_tags;
- wake_index = atomic_read(&bt->wake_index);
- for (i = 0; i < BT_WAIT_QUEUES; i++) {
- struct bt_wait_state *bs = &bt->bs[wake_index];
-
- if (waitqueue_active(&bs->wait))
- wake_up(&bs->wait);
-
- wake_index = bt_index_inc(wake_index);
- }
-
- if (include_reserve) {
- bt = &tags->breserved_tags;
- if (waitqueue_active(&bt->bs[0].wait))
- wake_up(&bt->bs[0].wait);
- }
+ sbitmap_queue_wake_all(&tags->bitmap_tags);
+ if (include_reserve)
+ sbitmap_queue_wake_all(&tags->breserved_tags);
}
/*
@@ -118,7 +64,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
* and attempt to provide a fair share of the tag depth for each of them.
*/
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_bitmap_tags *bt)
+ struct sbitmap_queue *bt)
{
unsigned int depth, users;
@@ -130,7 +76,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
/*
* Don't try dividing an ant
*/
- if (bt->depth == 1)
+ if (bt->sb.depth == 1)
return true;
users = atomic_read(&hctx->tags->active_queues);
@@ -140,142 +86,36 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
/*
* Allow at least some tags
*/
- depth = max((bt->depth + users - 1) / users, 4U);
+ depth = max((bt->sb.depth + users - 1) / users, 4U);
return atomic_read(&hctx->nr_active) < depth;
}
-static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
- bool nowrap)
-{
- int tag, org_last_tag = last_tag;
-
- while (1) {
- tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
- if (unlikely(tag >= bm->depth)) {
- /*
- * We started with an offset, and we didn't reset the
- * offset to 0 in a failure case, so start from 0 to
- * exhaust the map.
- */
- if (org_last_tag && last_tag && !nowrap) {
- last_tag = org_last_tag = 0;
- continue;
- }
- return -1;
- }
-
- if (!test_and_set_bit(tag, &bm->word))
- break;
-
- last_tag = tag + 1;
- if (last_tag >= bm->depth - 1)
- last_tag = 0;
- }
-
- return tag;
-}
-
-#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
-
-/*
- * Straight forward bitmap tag implementation, where each bit is a tag
- * (cleared == free, and set == busy). The small twist is using per-cpu
- * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
- * contexts. This enables us to drastically limit the space searched,
- * without dirtying an extra shared cacheline like we would if we stored
- * the cache value inside the shared blk_mq_bitmap_tags structure. On top
- * of that, each word of tags is in a separate cacheline. This means that
- * multiple users will tend to stick to different cachelines, at least
- * until the map is exhausted.
- */
-static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
- unsigned int *tag_cache, struct blk_mq_tags *tags)
+static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
{
- unsigned int last_tag, org_last_tag;
- int index, i, tag;
-
if (!hctx_may_queue(hctx, bt))
return -1;
-
- last_tag = org_last_tag = *tag_cache;
- index = TAG_TO_INDEX(bt, last_tag);
-
- for (i = 0; i < bt->map_nr; i++) {
- tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
- BT_ALLOC_RR(tags));
- if (tag != -1) {
- tag += (index << bt->bits_per_word);
- goto done;
- }
-
- /*
- * Jump to next index, and reset the last tag to be the
- * first tag of that index
- */
- index++;
- last_tag = (index << bt->bits_per_word);
-
- if (index >= bt->map_nr) {
- index = 0;
- last_tag = 0;
- }
- }
-
- *tag_cache = 0;
- return -1;
-
- /*
- * Only update the cache from the allocation path, if we ended
- * up using the specific cached tag.
- */
-done:
- if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
- last_tag = tag + 1;
- if (last_tag >= bt->depth - 1)
- last_tag = 0;
-
- *tag_cache = last_tag;
- }
-
- return tag;
+ return __sbitmap_queue_get(bt);
}
-static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
- struct blk_mq_hw_ctx *hctx)
+static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
+ struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags)
{
- struct bt_wait_state *bs;
- int wait_index;
-
- if (!hctx)
- return &bt->bs[0];
-
- wait_index = atomic_read(&hctx->wait_index);
- bs = &bt->bs[wait_index];
- bt_index_atomic_inc(&hctx->wait_index);
- return bs;
-}
-
-static int bt_get(struct blk_mq_alloc_data *data,
- struct blk_mq_bitmap_tags *bt,
- struct blk_mq_hw_ctx *hctx,
- unsigned int *last_tag, struct blk_mq_tags *tags)
-{
- struct bt_wait_state *bs;
+ struct sbq_wait_state *ws;
DEFINE_WAIT(wait);
int tag;
- tag = __bt_get(hctx, bt, last_tag, tags);
+ tag = __bt_get(hctx, bt);
if (tag != -1)
return tag;
if (data->flags & BLK_MQ_REQ_NOWAIT)
return -1;
- bs = bt_wait_ptr(bt, hctx);
+ ws = bt_wait_ptr(bt, hctx);
do {
- prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
- tag = __bt_get(hctx, bt, last_tag, tags);
+ tag = __bt_get(hctx, bt);
if (tag != -1)
break;
@@ -292,7 +132,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
- tag = __bt_get(hctx, bt, last_tag, tags);
+ tag = __bt_get(hctx, bt);
if (tag != -1)
break;
@@ -306,15 +146,14 @@ static int bt_get(struct blk_mq_alloc_data *data,
if (data->flags & BLK_MQ_REQ_RESERVED) {
bt = &data->hctx->tags->breserved_tags;
} else {
- last_tag = &data->ctx->last_tag;
hctx = data->hctx;
bt = &hctx->tags->bitmap_tags;
}
- finish_wait(&bs->wait, &wait);
- bs = bt_wait_ptr(bt, hctx);
+ finish_wait(&ws->wait, &wait);
+ ws = bt_wait_ptr(bt, hctx);
} while (1);
- finish_wait(&bs->wait, &wait);
+ finish_wait(&ws->wait, &wait);
return tag;
}
@@ -323,7 +162,7 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
int tag;
tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
- &data->ctx->last_tag, data->hctx->tags);
+ data->hctx->tags);
if (tag >= 0)
return tag + data->hctx->tags->nr_reserved_tags;
@@ -332,15 +171,15 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
{
- int tag, zero = 0;
+ int tag;
if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
WARN_ON_ONCE(1);
return BLK_MQ_TAG_FAIL;
}
- tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
- data->hctx->tags);
+ tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL,
+ data->hctx->tags);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
@@ -354,55 +193,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
return __blk_mq_get_tag(data);
}
-static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
-{
- int i, wake_index;
-
- wake_index = atomic_read(&bt->wake_index);
- for (i = 0; i < BT_WAIT_QUEUES; i++) {
- struct bt_wait_state *bs = &bt->bs[wake_index];
-
- if (waitqueue_active(&bs->wait)) {
- int o = atomic_read(&bt->wake_index);
- if (wake_index != o)
- atomic_cmpxchg(&bt->wake_index, o, wake_index);
-
- return bs;
- }
-
- wake_index = bt_index_inc(wake_index);
- }
-
- return NULL;
-}
-
-static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
-{
- const int index = TAG_TO_INDEX(bt, tag);
- struct bt_wait_state *bs;
- int wait_cnt;
-
- clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
-
- /* Ensure that the wait list checks occur after clear_bit(). */
- smp_mb();
-
- bs = bt_wake_ptr(bt);
- if (!bs)
- return;
-
- wait_cnt = atomic_dec_return(&bs->wait_cnt);
- if (unlikely(wait_cnt < 0))
- wait_cnt = atomic_inc_return(&bs->wait_cnt);
- if (wait_cnt == 0) {
- atomic_add(bt->wake_cnt, &bs->wait_cnt);
- bt_index_atomic_inc(&bt->wake_index);
- wake_up(&bs->wait);
- }
-}
-
-void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
- unsigned int *last_tag)
+void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
+ unsigned int tag)
{
struct blk_mq_tags *tags = hctx->tags;
@@ -410,67 +202,92 @@ void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
const int real_tag = tag - tags->nr_reserved_tags;
BUG_ON(real_tag >= tags->nr_tags);
- bt_clear_tag(&tags->bitmap_tags, real_tag);
- if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
- *last_tag = real_tag;
+ sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
} else {
BUG_ON(tag >= tags->nr_reserved_tags);
- bt_clear_tag(&tags->breserved_tags, tag);
+ sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
}
}
-static void bt_for_each(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_bitmap_tags *bt, unsigned int off,
- busy_iter_fn *fn, void *data, bool reserved)
+struct bt_iter_data {
+ struct blk_mq_hw_ctx *hctx;
+ busy_iter_fn *fn;
+ void *data;
+ bool reserved;
+};
+
+static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
+ struct bt_iter_data *iter_data = data;
+ struct blk_mq_hw_ctx *hctx = iter_data->hctx;
+ struct blk_mq_tags *tags = hctx->tags;
+ bool reserved = iter_data->reserved;
struct request *rq;
- int bit, i;
- for (i = 0; i < bt->map_nr; i++) {
- struct blk_align_bitmap *bm = &bt->map[i];
+ if (!reserved)
+ bitnr += tags->nr_reserved_tags;
+ rq = tags->rqs[bitnr];
- for (bit = find_first_bit(&bm->word, bm->depth);
- bit < bm->depth;
- bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
- rq = hctx->tags->rqs[off + bit];
- if (rq->q == hctx->queue)
- fn(hctx, rq, data, reserved);
- }
+ if (rq->q == hctx->queue)
+ iter_data->fn(hctx, rq, iter_data->data, reserved);
+ return true;
+}
- off += (1 << bt->bits_per_word);
- }
+static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
+ busy_iter_fn *fn, void *data, bool reserved)
+{
+ struct bt_iter_data iter_data = {
+ .hctx = hctx,
+ .fn = fn,
+ .data = data,
+ .reserved = reserved,
+ };
+
+ sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
}
-static void bt_tags_for_each(struct blk_mq_tags *tags,
- struct blk_mq_bitmap_tags *bt, unsigned int off,
- busy_tag_iter_fn *fn, void *data, bool reserved)
+struct bt_tags_iter_data {
+ struct blk_mq_tags *tags;
+ busy_tag_iter_fn *fn;
+ void *data;
+ bool reserved;
+};
+
+static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
+ struct bt_tags_iter_data *iter_data = data;
+ struct blk_mq_tags *tags = iter_data->tags;
+ bool reserved = iter_data->reserved;
struct request *rq;
- int bit, i;
- if (!tags->rqs)
- return;
- for (i = 0; i < bt->map_nr; i++) {
- struct blk_align_bitmap *bm = &bt->map[i];
-
- for (bit = find_first_bit(&bm->word, bm->depth);
- bit < bm->depth;
- bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
- rq = tags->rqs[off + bit];
- fn(rq, data, reserved);
- }
+ if (!reserved)
+ bitnr += tags->nr_reserved_tags;
+ rq = tags->rqs[bit