diff options
| author | Felix Moessbauer <felix.moessbauer@siemens.com> | 2024-09-10 19:11:56 +0200 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2024-10-17 15:21:14 +0200 |
| commit | fce514611fae9c3822d3502087d2e92bb7b63de7 (patch) | |
| tree | c6b47c54b271622db2b27fdd452c6ee394f7c154 /io_uring | |
| parent | d8920d253498e7a6b00de79e65fc36456fb7e0f3 (diff) | |
| download | linux-fce514611fae9c3822d3502087d2e92bb7b63de7.tar.gz linux-fce514611fae9c3822d3502087d2e92bb7b63de7.tar.bz2 linux-fce514611fae9c3822d3502087d2e92bb7b63de7.zip | |
io_uring/io-wq: do not allow pinning outside of cpuset
commit 0997aa5497c714edbb349ca366d28bd550ba3408 upstream.
The io worker threads are userland threads that just never exit to the
userland. By that, they are also assigned to a cgroup (the group of the
creating task).
When changing the affinity of the io_wq thread via syscall, we must only
allow cpumasks within the limits defined by the cpuset controller of the
cgroup (if enabled).
Fixes: da64d6db3bd3 ("io_uring: One wqe per wq")
Signed-off-by: Felix Moessbauer <felix.moessbauer@siemens.com>
Link: https://lore.kernel.org/r/20240910171157.166423-2-felix.moessbauer@siemens.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'io_uring')
| -rw-r--r-- | io_uring/io-wq.c | 25 |
1 files changed, 19 insertions, 6 deletions
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 139cd49b2c27..c74bcc8d2f06 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/rculist_nulls.h> #include <linux/cpu.h> +#include <linux/cpuset.h> #include <linux/task_work.h> #include <linux/audit.h> #include <uapi/linux/io_uring.h> @@ -1362,22 +1363,34 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node) int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask) { + cpumask_var_t allowed_mask; + int ret = 0; int i; if (!tctx || !tctx->io_wq) return -EINVAL; + if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL)) + return -ENOMEM; + cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask); + rcu_read_lock(); for_each_node(i) { struct io_wqe *wqe = tctx->io_wq->wqes[i]; - - if (mask) - cpumask_copy(wqe->cpu_mask, mask); - else - cpumask_copy(wqe->cpu_mask, cpumask_of_node(i)); + if (mask) { + if (cpumask_subset(mask, allowed_mask)) + cpumask_copy(wqe->cpu_mask, mask); + else + ret = -EINVAL; + } else { + if (!cpumask_and(wqe->cpu_mask, cpumask_of_node(i), allowed_mask)) + cpumask_copy(wqe->cpu_mask, allowed_mask); + } } rcu_read_unlock(); - return 0; + + free_cpumask_var(allowed_mask); + return ret; } /* |
