summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2025-09-23 04:25:22 -0600
committerJens Axboe <axboe@kernel.dk>2025-10-20 10:37:48 -0600
commit7be20254a743be4f02414b9d56cc3fe5f84e6500 (patch)
tree08aae95dac6af07ce8999f7177525651a507b169 /io_uring/io_uring.c
parent211ddde0823f1442e4ad052a2f30f050145ccada (diff)
downloadlinux-7be20254a743be4f02414b9d56cc3fe5f84e6500.tar.gz
linux-7be20254a743be4f02414b9d56cc3fe5f84e6500.tar.bz2
linux-7be20254a743be4f02414b9d56cc3fe5f84e6500.zip
io_uring: unify task_work cancelation checks
Rather than do per-tw checking, which needs to dip into the task_struct for checking flags, do it upfront before running task_work. This places a 'cancel' member in io_tw_token_t, which is assigned before running task_work for that given ctx. This is both more efficient in doing it upfront rather than for every task_work, and it means that io_should_terminate_tw() can be made private in io_uring.c rather than need to be called by various callbacks of task_work. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c27
1 files changed, 20 insertions, 7 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 820ef0527666..c397118da85e 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -265,6 +265,20 @@ static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
complete(&ctx->ref_comp);
}
+/*
+ * Terminate the request if either of these conditions are true:
+ *
+ * 1) It's being executed by the original task, but that task is marked
+ * with PF_EXITING as it's exiting.
+ * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
+ * our fallback task_work.
+ * 3) The ring has been closed and is going away.
+ */
+static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
+{
+ return (current->flags & (PF_EXITING | PF_KTHREAD)) || percpu_ref_is_dying(&ctx->refs);
+}
+
static __cold void io_fallback_req_func(struct work_struct *work)
{
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
@@ -275,8 +289,10 @@ static __cold void io_fallback_req_func(struct work_struct *work)
percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock);
- llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
+ llist_for_each_entry_safe(req, tmp, node, io_task_work.node) {
+ ts.cancel = io_should_terminate_tw(req->ctx);
req->io_task_work.func(req, ts);
+ }
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs);
@@ -1147,6 +1163,7 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
ctx = req->ctx;
mutex_lock(&ctx->uring_lock);
percpu_ref_get(&ctx->refs);
+ ts.cancel = io_should_terminate_tw(ctx);
}
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
@@ -1205,11 +1222,6 @@ struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
{
struct llist_node *node;
- if (unlikely(current->flags & PF_EXITING)) {
- io_fallback_tw(tctx, true);
- return NULL;
- }
-
node = llist_del_all(&tctx->task_list);
if (node) {
node = llist_reverse_order(node);
@@ -1399,6 +1411,7 @@ static int __io_run_local_work(struct io_ring_ctx *ctx, io_tw_token_t tw,
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
again:
+ tw.cancel = io_should_terminate_tw(ctx);
min_events -= ret;
ret = __io_run_local_work_loop(&ctx->retry_llist.first, tw, max_events);
if (ctx->retry_llist.first)
@@ -1458,7 +1471,7 @@ void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw)
struct io_ring_ctx *ctx = req->ctx;
io_tw_lock(ctx, tw);
- if (unlikely(io_should_terminate_tw(ctx)))
+ if (unlikely(tw.cancel))
io_req_defer_failed(req, -EFAULT);
else if (req->flags & REQ_F_FORCE_ASYNC)
io_queue_iowq(req);