diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-02-14 11:30:53 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-02-14 11:30:53 -0800 |
| commit | ea717324741471665110b4475a52c08a56026a9e (patch) | |
| tree | 42dacb310f49e126339993f1d905358e17a651d8 /io_uring | |
| parent | 04f41cbf03ec7221ab0b179e336f4c805ee55520 (diff) | |
| parent | d6211ebbdaa541af197b50b8dd8f22642ce0b87f (diff) | |
| download | linux-ea717324741471665110b4475a52c08a56026a9e.tar.gz linux-ea717324741471665110b4475a52c08a56026a9e.tar.bz2 linux-ea717324741471665110b4475a52c08a56026a9e.zip | |
Merge tag 'io_uring-6.14-20250214' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe:
- fixes for a potential data corruption issue with IORING_OP_URING_CMD,
where not all the SQE data is stable. Will be revisited in the
future, for now it ends up with just always copying it beyond prep to
provide the same guarantees as all other opcodes
- make the waitid opcode setup async data like any other opcodes (no
real fix here, just a consistency thing)
- fix for waitid io_tw_state abuse
- when a buffer group is type is changed, do so by allocating a new
buffer group entry and discard the old one, rather than migrating
* tag 'io_uring-6.14-20250214' of git://git.kernel.dk/linux:
io_uring/uring_cmd: unconditionally copy SQEs at prep time
io_uring/waitid: setup async data in the prep handler
io_uring/uring_cmd: remove dead req_has_async_data() check
io_uring/uring_cmd: switch sqe to async_data on EAGAIN
io_uring/uring_cmd: don't assume io_uring_cmd_data layout
io_uring/kbuf: reallocate buf lists on upgrade
io_uring/waitid: don't abuse io_tw_state
Diffstat (limited to 'io_uring')
| -rw-r--r-- | io_uring/kbuf.c | 16 | ||||
| -rw-r--r-- | io_uring/uring_cmd.c | 28 | ||||
| -rw-r--r-- | io_uring/waitid.c | 18 |
3 files changed, 30 insertions, 32 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 04bf493eecae..8e72de7712ac 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -415,6 +415,13 @@ void io_destroy_buffers(struct io_ring_ctx *ctx) } } +static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) +{ + scoped_guard(mutex, &ctx->mmap_lock) + WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl); + io_put_bl(ctx, bl); +} + int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); @@ -636,12 +643,13 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) /* if mapped buffer ring OR classic exists, don't allow */ if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list)) return -EEXIST; - } else { - free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); - if (!bl) - return -ENOMEM; + io_destroy_bl(ctx, bl); } + free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); + if (!bl) + return -ENOMEM; + mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT; ring_size = flex_array_size(br, bufs, reg.ring_entries); diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 1f6a82128b47..e6701b7aa147 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -54,9 +54,6 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, continue; if (cmd->flags & IORING_URING_CMD_CANCELABLE) { - /* ->sqe isn't available if no async data */ - if (!req_has_async_data(req)) - cmd->sqe = NULL; file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL | IO_URING_F_COMPLETE_DEFER); ret = true; @@ -179,12 +176,13 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req, return -ENOMEM; cache->op_data = NULL; - if (!(req->flags & REQ_F_FORCE_ASYNC)) { - /* defer memcpy until we need it */ - ioucmd->sqe = sqe; - return 0; - } - + /* + * Unconditionally cache the SQE for now - this is only needed for + * requests that go async, but prep handlers must ensure that any + * sqe data is stable beyond prep. Since uring_cmd is special in + * that it doesn't read in per-op data, play it safe and ensure that + * any SQE data is stable beyond prep. This can later get relaxed. + */ memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx)); ioucmd->sqe = cache->sqes; return 0; @@ -249,16 +247,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) } ret = file->f_op->uring_cmd(ioucmd, issue_flags); - if (ret == -EAGAIN) { - struct io_uring_cmd_data *cache = req->async_data; - - if (ioucmd->sqe != (void *) cache) - memcpy(cache->sqes, ioucmd->sqe, uring_sqe_size(req->ctx)); - return -EAGAIN; - } else if (ret == -EIOCBQUEUED) { - return -EIOCBQUEUED; - } - + if (ret == -EAGAIN || ret == -EIOCBQUEUED) + return ret; if (ret < 0) req_set_fail(req); io_req_uring_cleanup(req, issue_flags); diff --git a/io_uring/waitid.c b/io_uring/waitid.c index 853e97a7b0ec..15a7daf3ff4f 100644 --- a/io_uring/waitid.c +++ b/io_uring/waitid.c @@ -118,7 +118,6 @@ static int io_waitid_finish(struct io_kiocb *req, int ret) static void io_waitid_complete(struct io_kiocb *req, int ret) { struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); - struct io_tw_state ts = {}; /* anyone completing better be holding a reference */ WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK)); @@ -131,7 +130,6 @@ static void io_waitid_complete(struct io_kiocb *req, int ret) if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - io_req_task_complete(req, &ts); } static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) @@ -153,6 +151,7 @@ static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) list_del_init(&iwa->wo.child_wait.entry); spin_unlock_irq(&iw->head->lock); io_waitid_complete(req, -ECANCELED); + io_req_queue_tw_complete(req, -ECANCELED); return true; } @@ -258,6 +257,7 @@ static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts) } io_waitid_complete(req, ret); + io_req_task_complete(req, ts); } static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode, @@ -285,10 +285,16 @@ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode, int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); + struct io_waitid_async *iwa; if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags) return -EINVAL; + iwa = io_uring_alloc_async_data(NULL, req); + if (!unlikely(iwa)) + return -ENOMEM; + iwa->req = req; + iw->which = READ_ONCE(sqe->len); iw->upid = READ_ONCE(sqe->fd); iw->options = READ_ONCE(sqe->file_index); @@ -299,16 +305,10 @@ int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_waitid(struct io_kiocb *req, unsigned int issue_flags) { struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); + struct io_waitid_async *iwa = req->async_data; struct io_ring_ctx *ctx = req->ctx; - struct io_waitid_async *iwa; int ret; - iwa = io_uring_alloc_async_data(NULL, req); - if (!iwa) - return -ENOMEM; - - iwa->req = req; - ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info, iw->options, NULL); if (ret) |
