summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-03-14 11:22:05 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2025-03-14 11:22:05 -1000
commit580b2032359d978418e5f7178511aef512229345 (patch)
treee769059200e3c649d488984364fb1886791f67f3 /include
parent83158b21ae9a1a5c8285c3d542981bae914e26b6 (diff)
parenta9381351dd6c52bf465233cae5f50da227834607 (diff)
downloadlinux-580b2032359d978418e5f7178511aef512229345.tar.gz
linux-580b2032359d978418e5f7178511aef512229345.tar.bz2
linux-580b2032359d978418e5f7178511aef512229345.zip
Merge tag 'block-6.14-20250313' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe: - NVMe pull request via Keith: - Concurrent pci error and hotplug handling fix (Keith) - Endpoint function fixes (Damien) - Fix for a regression introduced in this cycle with error checking for batched request completions (Shin'ichiro) * tag 'block-6.14-20250313' of git://git.kernel.dk/linux: block: change blk_mq_add_to_batch() third argument type to bool nvme: move error logging from nvme_end_req() to __nvme_end_req() nvmet: pci-epf: Do not add an IRQ vector if not needed nvmet: pci-epf: Set NVMET_PCI_EPF_Q_LIVE when a queue is fully created nvme-pci: fix stuck reset on concurrent DPC and HP
Diffstat (limited to 'include')
-rw-r--r--include/linux/blk-mq.h16
1 files changed, 12 insertions, 4 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 71f4f0cc3dac..aba9c24486aa 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -852,12 +852,20 @@ static inline bool blk_mq_is_reserved_rq(struct request *rq)
return rq->rq_flags & RQF_RESV;
}
-/*
+/**
+ * blk_mq_add_to_batch() - add a request to the completion batch
+ * @req: The request to add to batch
+ * @iob: The batch to add the request
+ * @is_error: Specify true if the request failed with an error
+ * @complete: The completaion handler for the request
+ *
* Batched completions only work when there is no I/O error and no special
* ->end_io handler.
+ *
+ * Return: true when the request was added to the batch, otherwise false
*/
static inline bool blk_mq_add_to_batch(struct request *req,
- struct io_comp_batch *iob, int ioerror,
+ struct io_comp_batch *iob, bool is_error,
void (*complete)(struct io_comp_batch *))
{
/*
@@ -865,7 +873,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
* 1) No batch container
* 2) Has scheduler data attached
* 3) Not a passthrough request and end_io set
- * 4) Not a passthrough request and an ioerror
+ * 4) Not a passthrough request and failed with an error
*/
if (!iob)
return false;
@@ -874,7 +882,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
if (!blk_rq_is_passthrough(req)) {
if (req->end_io)
return false;
- if (ioerror < 0)
+ if (is_error)
return false;
}