summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-11-13 16:20:41 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-12-09 10:31:46 +0100
commit519899291235673c731f94fbc48198871a3eb2e8 (patch)
tree9dc6bcdfd957f66c4a628cfdccf3551f211b5731 /drivers
parent77035e4d27e15f87ea55929c8bb8fb1970129e2f (diff)
downloadlinux-519899291235673c731f94fbc48198871a3eb2e8.tar.gz
linux-519899291235673c731f94fbc48198871a3eb2e8.tar.bz2
linux-519899291235673c731f94fbc48198871a3eb2e8.zip
nvme-pci: reverse request order in nvme_queue_rqs
[ Upstream commit beadf0088501d9dcf2454b05d90d5d31ea3ba55f ] blk_mq_flush_plug_list submits requests in the reverse order that they were submitted, which leads to a rather suboptimal I/O pattern especially in rotational devices. Fix this by rewriting nvme_queue_rqs so that it always pops the requests from the passed in request list, and then adds them to the head of a local submit list. This actually simplifies the code a bit as it removes the complicated list splicing, at the cost of extra updates of the rq_next pointer. As that should be cache hot anyway it should be an easy price to pay. Fixes: d62cbcf62f2f ("nvme: add support for mq_ops->queue_rqs()") Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20241113152050.157179-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/nvme/host/pci.c39
1 files changed, 17 insertions, 22 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e0b502573b42..d525fa1229d7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -905,9 +905,10 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
{
+ struct request *req;
+
spin_lock(&nvmeq->sq_lock);
- while (!rq_list_empty(*rqlist)) {
- struct request *req = rq_list_pop(rqlist);
+ while ((req = rq_list_pop(rqlist))) {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
nvme_sq_copy_cmd(nvmeq, &iod->cmd);
@@ -933,31 +934,25 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
static void nvme_queue_rqs(struct request **rqlist)
{
- struct request *req, *next, *prev = NULL;
+ struct request *submit_list = NULL;
struct request *requeue_list = NULL;
+ struct request **requeue_lastp = &requeue_list;
+ struct nvme_queue *nvmeq = NULL;
+ struct request *req;
- rq_list_for_each_safe(rqlist, req, next) {
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
-
- if (!nvme_prep_rq_batch(nvmeq, req)) {
- /* detach 'req' and add to remainder list */
- rq_list_move(rqlist, &requeue_list, req, prev);
-
- req = prev;
- if (!req)
- continue;
- }
+ while ((req = rq_list_pop(rqlist))) {
+ if (nvmeq && nvmeq != req->mq_hctx->driver_data)
+ nvme_submit_cmds(nvmeq, &submit_list);
+ nvmeq = req->mq_hctx->driver_data;
- if (!next || req->mq_hctx != next->mq_hctx) {
- /* detach rest of list, and submit */
- req->rq_next = NULL;
- nvme_submit_cmds(nvmeq, rqlist);
- *rqlist = next;
- prev = NULL;
- } else
- prev = req;
+ if (nvme_prep_rq_batch(nvmeq, req))
+ rq_list_add(&submit_list, req); /* reverse order */
+ else
+ rq_list_add_tail(&requeue_lastp, req);
}
+ if (nvmeq)
+ nvme_submit_cmds(nvmeq, &submit_list);
*rqlist = requeue_list;
}