summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-11-14 09:57:30 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-11-14 09:57:30 -0800
commitac9f4f306d943c1c551280977ae5321167835088 (patch)
treecc86067ff70c06a005af3c745b9c6d3dcadf7f37
parentb86caedd0b2c6e86c3fbaf5a04e5f9161b5688fd (diff)
parent2d0e88f3fd1dcb37072d499c36162baf5b009d41 (diff)
downloadlinux-ac9f4f306d943c1c551280977ae5321167835088.tar.gz
linux-ac9f4f306d943c1c551280977ae5321167835088.tar.bz2
linux-ac9f4f306d943c1c551280977ae5321167835088.zip
Merge tag 'io_uring-6.18-20251113' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull io_uring fixes from Jens Axboe: - Use the actual segments in a request when for bvec based buffers - Fix an odd case where the iovec might get leaked for a read/write request, if it was newly allocated, overflowed the alloc cache, and hit an early error - Minor tweak to the query API added in this release, returning the number of available entries * tag 'io_uring-6.18-20251113' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: io_uring/rsrc: don't use blk_rq_nr_phys_segments() as number of bvecs io_uring/query: return number of available queries io_uring/rw: ensure allocated iovec gets cleared for early failure
-rw-r--r--include/uapi/linux/io_uring/query.h3
-rw-r--r--io_uring/query.c2
-rw-r--r--io_uring/rsrc.c16
-rw-r--r--io_uring/rw.c3
4 files changed, 17 insertions, 7 deletions
diff --git a/include/uapi/linux/io_uring/query.h b/include/uapi/linux/io_uring/query.h
index 5d754322a27c..3539ccbfd064 100644
--- a/include/uapi/linux/io_uring/query.h
+++ b/include/uapi/linux/io_uring/query.h
@@ -36,6 +36,9 @@ struct io_uring_query_opcode {
__u64 enter_flags;
/* Bitmask of all supported IOSQE_* flags */
__u64 sqe_flags;
+ /* The number of available query opcodes */
+ __u32 nr_query_opcodes;
+ __u32 __pad;
};
#endif
diff --git a/io_uring/query.c b/io_uring/query.c
index 645301bd2c82..cf02893ba911 100644
--- a/io_uring/query.c
+++ b/io_uring/query.c
@@ -20,6 +20,8 @@ static ssize_t io_query_ops(void *data)
e->ring_setup_flags = IORING_SETUP_FLAGS;
e->enter_flags = IORING_ENTER_FLAGS;
e->sqe_flags = SQE_VALID_FLAGS;
+ e->nr_query_opcodes = __IO_URING_QUERY_MAX;
+ e->__pad = 0;
return sizeof(*e);
}
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 2602d76d5ff0..0010c4992490 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -943,8 +943,8 @@ int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
struct req_iterator rq_iter;
struct io_mapped_ubuf *imu;
struct io_rsrc_node *node;
- struct bio_vec bv, *bvec;
- u16 nr_bvecs;
+ struct bio_vec bv;
+ unsigned int nr_bvecs = 0;
int ret = 0;
io_ring_submit_lock(ctx, issue_flags);
@@ -965,8 +965,11 @@ int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
goto unlock;
}
- nr_bvecs = blk_rq_nr_phys_segments(rq);
- imu = io_alloc_imu(ctx, nr_bvecs);
+ /*
+ * blk_rq_nr_phys_segments() may overestimate the number of bvecs
+ * but avoids needing to iterate over the bvecs
+ */
+ imu = io_alloc_imu(ctx, blk_rq_nr_phys_segments(rq));
if (!imu) {
kfree(node);
ret = -ENOMEM;
@@ -977,16 +980,15 @@ int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
imu->len = blk_rq_bytes(rq);
imu->acct_pages = 0;
imu->folio_shift = PAGE_SHIFT;
- imu->nr_bvecs = nr_bvecs;
refcount_set(&imu->refs, 1);
imu->release = release;
imu->priv = rq;
imu->is_kbuf = true;
imu->dir = 1 << rq_data_dir(rq);
- bvec = imu->bvec;
rq_for_each_bvec(bv, rq, rq_iter)
- *bvec++ = bv;
+ imu->bvec[nr_bvecs++] = bv;
+ imu->nr_bvecs = nr_bvecs;
node->buf = imu;
data->nodes[index] = node;
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 5b2241a5813c..abe68ba9c9dc 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -463,7 +463,10 @@ int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
void io_readv_writev_cleanup(struct io_kiocb *req)
{
+ struct io_async_rw *rw = req->async_data;
+
lockdep_assert_held(&req->ctx->uring_lock);
+ io_vec_free(&rw->vec);
io_rw_recycle(req, 0);
}