summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-05-29 09:38:38 -0600
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-03-22 12:50:45 -0700
commita0b21f2aca04d4a5dd9f4a0aaf04685970a6b59a (patch)
treec52e1cafb3f439a41a5305db04f0603470d82c29 /io_uring/io_uring.c
parent2905c4fe7e52e05c7ca252e0bd59f483a5c6cb76 (diff)
downloadlinux-a0b21f2aca04d4a5dd9f4a0aaf04685970a6b59a.tar.gz
linux-a0b21f2aca04d4a5dd9f4a0aaf04685970a6b59a.tar.bz2
linux-a0b21f2aca04d4a5dd9f4a0aaf04685970a6b59a.zip
io_uring: don't attempt to mmap larger than what the user asks for
Commit 06fe9b1df1086b42718d632aa57e8f7cd1a66a21 upstream. If IORING_FEAT_SINGLE_MMAP is ignored, as can happen if an application uses an ancient liburing or does setup manually, then 3 mmap's are required to map the ring into userspace. The kernel will still have collapsed the mappings, however userspace may ask for mapping them individually. If so, then we should not use the full number of ring pages, as it may exceed the partial mapping. Doing so will yield an -EFAULT from vm_insert_pages(), as we pass in more pages than what the application asked for. Cap the number of pages to match what the application asked for, for the particular mapping operation. Reported-by: Lucas Mülling <lmulling@proton.me> Link: https://github.com/axboe/liburing/issues/1157 Fixes: 3ab1db3c6039 ("io_uring: get rid of remap_pfn_range() for mapping rings/sqes") Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index b506064ca29d..53ec5bb245c8 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3613,6 +3613,7 @@ static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
struct io_ring_ctx *ctx = file->private_data;
size_t sz = vma->vm_end - vma->vm_start;
long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned int npages;
unsigned long pfn;
void *ptr;
@@ -3623,8 +3624,8 @@ static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
switch (offset & IORING_OFF_MMAP_MASK) {
case IORING_OFF_SQ_RING:
case IORING_OFF_CQ_RING:
- return io_uring_mmap_pages(ctx, vma, ctx->ring_pages,
- ctx->n_ring_pages);
+ npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
case IORING_OFF_SQES:
return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
ctx->n_sqe_pages);