summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2025-04-24 10:28:14 -0600
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-05-02 07:50:47 +0200
commitb675b4c863f1dcfef44b6dc6f4c06212ae44288e (patch)
treed68509b694f13271c371ae72da6517a27b00d27a /io_uring/io_uring.c
parent20a3f73dd424e4ac5a2d5c78430721cae6352dda (diff)
downloadlinux-b675b4c863f1dcfef44b6dc6f4c06212ae44288e.tar.gz
linux-b675b4c863f1dcfef44b6dc6f4c06212ae44288e.tar.bz2
linux-b675b4c863f1dcfef44b6dc6f4c06212ae44288e.zip
io_uring: fix 'sync' handling of io_fallback_tw()
commit edd43f4d6f50ec3de55a0c9e9df6348d1da51965 upstream. A previous commit added a 'sync' parameter to io_fallback_tw(), which if true, means the caller wants to wait on the fallback thread handling it. But the logic is somewhat messed up, ensure that ctxs are swapped and flushed appropriately. Cc: stable@vger.kernel.org Fixes: dfbe5561ae93 ("io_uring: flush offloaded and delayed task_work on exit") Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index efa7849b82c1..9883fd16cde4 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1247,21 +1247,22 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
while (node) {
req = container_of(node, struct io_kiocb, io_task_work.node);
node = node->next;
- if (sync && last_ctx != req->ctx) {
+ if (last_ctx != req->ctx) {
if (last_ctx) {
- flush_delayed_work(&last_ctx->fallback_work);
+ if (sync)
+ flush_delayed_work(&last_ctx->fallback_work);
percpu_ref_put(&last_ctx->refs);
}
last_ctx = req->ctx;
percpu_ref_get(&last_ctx->refs);
}
- if (llist_add(&req->io_task_work.node,
- &req->ctx->fallback_llist))
- schedule_delayed_work(&req->ctx->fallback_work, 1);
+ if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist))
+ schedule_delayed_work(&last_ctx->fallback_work, 1);
}
if (last_ctx) {
- flush_delayed_work(&last_ctx->fallback_work);
+ if (sync)
+ flush_delayed_work(&last_ctx->fallback_work);
percpu_ref_put(&last_ctx->refs);
}
}