diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-28 16:30:12 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-28 16:30:12 -0700 |
| commit | c3018a2c6adae9b32f7b9259f5b38257ba9a758e (patch) | |
| tree | 3a91bbbc8ef5d0f81a60d9322d1fcefd4720a1c3 /io_uring | |
| parent | e5cf61fa6e2fb9ae6339eaa892612488c966baaf (diff) | |
| parent | d9f595b9a65e9c9eb03e21f3db98fde158d128db (diff) | |
| download | linux-c3018a2c6adae9b32f7b9259f5b38257ba9a758e.tar.gz linux-c3018a2c6adae9b32f7b9259f5b38257ba9a758e.tar.bz2 linux-c3018a2c6adae9b32f7b9259f5b38257ba9a758e.zip | |
Merge tag 'for-6.17/io_uring-20250728' of git://git.kernel.dk/linux
Pull io_uring updates from Jens Axboe:
- Optimization to avoid reference counts on non-cloned registered
buffers. This is how these buffers were handled prior to having
cloning support, and we can still use that approach as long as the
buffers haven't been cloned to another ring.
- Cleanup and improvement for uring_cmd, where btrfs was the only user
of storing allocated data for the lifetime of the uring_cmd. Clean
that up so we can get rid of the need to do that.
- Avoid unnecessary memory copies in uring_cmd usage. This is
particularly important as a lot of uring_cmd usage necessitates the
use of 128b SQEs.
- A few updates for recv multishot, where it's now possible to add
fairness limits for limiting how much is transferred for each retry
loop. Additionally, recv multishot now supports an overall cap as
well, where once reached the multishot recv will terminate. The
latter is useful for buffer management and juggling many recv streams
at the same time.
- Add support for returning the TX timestamps via a new socket command.
This feature can work in either singleshot or multishot mode, where
the latter triggers a completion whenever new timestamps are
available. This is an alternative to using the existing error queue.
- Add support for an io_uring "mock" file, which is the start of being
able to do 100% targeted testing in terms of exercising io_uring
request handling. The idea is to have a file type that can be
anything the tester would like, and behave exactly how you want it to
behave in terms of hitting the code paths you want.
- Improve zcrx by using sgtables to de-duplicate and improve dma
address handling.
- Prep work for supporting larger pages for zcrx.
- Various little improvements and fixes.
* tag 'for-6.17/io_uring-20250728' of git://git.kernel.dk/linux: (42 commits)
io_uring/zcrx: fix leaking pages on sg init fail
io_uring/zcrx: don't leak pages on account failure
io_uring/zcrx: fix null ifq on area destruction
io_uring: fix breakage in EXPERT menu
io_uring/cmd: remove struct io_uring_cmd_data
btrfs/ioctl: store btrfs_uring_encoded_data in io_btrfs_cmd
io_uring/cmd: introduce IORING_URING_CMD_REISSUE flag
io_uring/zcrx: account area memory
io_uring: export io_[un]account_mem
io_uring/net: Support multishot receive len cap
io_uring: deduplicate wakeup handling
io_uring/net: cast min_not_zero() type
io_uring/poll: cleanup apoll freeing
io_uring/net: allow multishot receive per-invocation cap
io_uring/net: move io_sr_msg->retry_flags to io_sr_msg->flags
io_uring/net: use passed in 'len' in io_recv_buf_select()
io_uring/zcrx: prepare fallback for larger pages
io_uring/zcrx: assert area type in io_zcrx_iov_page
io_uring/zcrx: allocate sgtable for umem areas
io_uring/zcrx: introduce io_populate_area_dma
...
Diffstat (limited to 'io_uring')
| -rw-r--r-- | io_uring/Makefile | 1 | ||||
| -rw-r--r-- | io_uring/cmd_net.c | 82 | ||||
| -rw-r--r-- | io_uring/io_uring.c | 90 | ||||
| -rw-r--r-- | io_uring/io_uring.h | 28 | ||||
| -rw-r--r-- | io_uring/mock_file.c | 363 | ||||
| -rw-r--r-- | io_uring/net.c | 79 | ||||
| -rw-r--r-- | io_uring/nop.c | 8 | ||||
| -rw-r--r-- | io_uring/opdef.c | 1 | ||||
| -rw-r--r-- | io_uring/opdef.h | 1 | ||||
| -rw-r--r-- | io_uring/poll.c | 44 | ||||
| -rw-r--r-- | io_uring/poll.h | 1 | ||||
| -rw-r--r-- | io_uring/rsrc.c | 10 | ||||
| -rw-r--r-- | io_uring/rsrc.h | 2 | ||||
| -rw-r--r-- | io_uring/rw.c | 2 | ||||
| -rw-r--r-- | io_uring/uring_cmd.c | 93 | ||||
| -rw-r--r-- | io_uring/uring_cmd.h | 9 | ||||
| -rw-r--r-- | io_uring/zcrx.c | 267 | ||||
| -rw-r--r-- | io_uring/zcrx.h | 2 |
18 files changed, 867 insertions, 216 deletions
diff --git a/io_uring/Makefile b/io_uring/Makefile index d97c6b51d584..b3f1bd492804 100644 --- a/io_uring/Makefile +++ b/io_uring/Makefile @@ -21,3 +21,4 @@ obj-$(CONFIG_EPOLL) += epoll.o obj-$(CONFIG_NET_RX_BUSY_POLL) += napi.o obj-$(CONFIG_NET) += net.o cmd_net.o obj-$(CONFIG_PROC_FS) += fdinfo.o +obj-$(CONFIG_IO_URING_MOCK_FILE) += mock_file.o diff --git a/io_uring/cmd_net.c b/io_uring/cmd_net.c index e99170c7d41a..3866fe6ff541 100644 --- a/io_uring/cmd_net.c +++ b/io_uring/cmd_net.c @@ -1,5 +1,6 @@ #include <asm/ioctls.h> #include <linux/io_uring/net.h> +#include <linux/errqueue.h> #include <net/sock.h> #include "uring_cmd.h" @@ -51,6 +52,85 @@ static inline int io_uring_cmd_setsockopt(struct socket *sock, optlen); } +static bool io_process_timestamp_skb(struct io_uring_cmd *cmd, struct sock *sk, + struct sk_buff *skb, unsigned issue_flags) +{ + struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); + struct io_uring_cqe cqe[2]; + struct io_timespec *iots; + struct timespec64 ts; + u32 tstype, tskey; + int ret; + + BUILD_BUG_ON(sizeof(struct io_uring_cqe) != sizeof(struct io_timespec)); + + ret = skb_get_tx_timestamp(skb, sk, &ts); + if (ret < 0) + return false; + + tskey = serr->ee.ee_data; + tstype = serr->ee.ee_info; + + cqe->user_data = 0; + cqe->res = tskey; + cqe->flags = IORING_CQE_F_MORE; + cqe->flags |= tstype << IORING_TIMESTAMP_TYPE_SHIFT; + if (ret == SOF_TIMESTAMPING_TX_HARDWARE) + cqe->flags |= IORING_CQE_F_TSTAMP_HW; + + iots = (struct io_timespec *)&cqe[1]; + iots->tv_sec = ts.tv_sec; + iots->tv_nsec = ts.tv_nsec; + return io_uring_cmd_post_mshot_cqe32(cmd, issue_flags, cqe); +} + +static int io_uring_cmd_timestamp(struct socket *sock, + struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + struct sock *sk = sock->sk; + struct sk_buff_head *q = &sk->sk_error_queue; + struct sk_buff *skb, *tmp; + struct sk_buff_head list; + int ret; + + if (!(issue_flags & IO_URING_F_CQE32)) + return -EINVAL; + ret = io_cmd_poll_multishot(cmd, issue_flags, EPOLLERR); + if (unlikely(ret)) + return ret; + + if (skb_queue_empty_lockless(q)) + return -EAGAIN; + __skb_queue_head_init(&list); + + scoped_guard(spinlock_irq, &q->lock) { + skb_queue_walk_safe(q, skb, tmp) { + /* don't support skbs with payload */ + if (!skb_has_tx_timestamp(skb, sk) || skb->len) + continue; + __skb_unlink(skb, q); + __skb_queue_tail(&list, skb); + } + } + + while (1) { + skb = skb_peek(&list); + if (!skb) + break; + if (!io_process_timestamp_skb(cmd, sk, skb, issue_flags)) + break; + __skb_dequeue(&list); + consume_skb(skb); + } + + if (!unlikely(skb_queue_empty(&list))) { + scoped_guard(spinlock_irqsave, &q->lock) + skb_queue_splice(q, &list); + } + return -EAGAIN; +} + int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) { struct socket *sock = cmd->file->private_data; @@ -76,6 +156,8 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) return io_uring_cmd_getsockopt(sock, cmd, issue_flags); case SOCKET_URING_OP_SETSOCKOPT: return io_uring_cmd_setsockopt(sock, cmd, issue_flags); + case SOCKET_URING_OP_TX_TIMESTAMP: + return io_uring_cmd_timestamp(sock, cmd, issue_flags); default: return -EOPNOTSUPP; } diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 5111ec040c53..4ef69dd58734 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -114,11 +114,11 @@ #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ - REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \ - REQ_F_ASYNC_DATA) + REQ_F_INFLIGHT | REQ_F_CREDS | REQ_F_ASYNC_DATA) #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | IO_REQ_LINK_FLAGS | \ - REQ_F_REISSUE | IO_REQ_CLEAN_FLAGS) + REQ_F_REISSUE | REQ_F_POLLED | \ + IO_REQ_CLEAN_FLAGS) #define IO_TCTX_REFS_CACHE_NR (1U << 10) @@ -147,7 +147,7 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, bool cancel_all, bool is_sqpoll_thread); -static void io_queue_sqe(struct io_kiocb *req); +static void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags); static void __io_req_caches_free(struct io_ring_ctx *ctx); static __read_mostly DEFINE_STATIC_KEY_FALSE(io_key_has_sqarray); @@ -392,11 +392,6 @@ static void io_clean_op(struct io_kiocb *req) if (def->cleanup) def->cleanup(req); } - if ((req->flags & REQ_F_POLLED) && req->apoll) { - kfree(req->apoll->double_poll); - kfree(req->apoll); - req->apoll = NULL; - } if (req->flags & REQ_F_INFLIGHT) atomic_dec(&req->tctx->inflight_tracked); if (req->flags & REQ_F_CREDS) @@ -793,6 +788,21 @@ bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow) return true; } +static bool io_fill_cqe_aux32(struct io_ring_ctx *ctx, + struct io_uring_cqe src_cqe[2]) +{ + struct io_uring_cqe *cqe; + + if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_CQE32))) + return false; + if (unlikely(!io_get_cqe(ctx, &cqe))) + return false; + + memcpy(cqe, src_cqe, 2 * sizeof(*cqe)); + trace_io_uring_complete(ctx, NULL, cqe); + return true; +} + static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { @@ -904,6 +914,31 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags) return posted; } +/* + * A helper for multishot requests posting additional CQEs. + * Should only be used from a task_work including IO_URING_F_MULTISHOT. + */ +bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe cqe[2]) +{ + struct io_ring_ctx *ctx = req->ctx; + bool posted; + + lockdep_assert(!io_wq_current_is_worker()); + lockdep_assert_held(&ctx->uring_lock); + + cqe[0].user_data = req->cqe.user_data; + if (!ctx->lockless_cq) { + spin_lock(&ctx->completion_lock); + posted = io_fill_cqe_aux32(ctx, cqe); + spin_unlock(&ctx->completion_lock); + } else { + posted = io_fill_cqe_aux32(ctx, cqe); + } + + ctx->submit_state.cq_flush = true; + return posted; +} + static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) { struct io_ring_ctx *ctx = req->ctx; @@ -1377,7 +1412,7 @@ void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw) else if (req->flags & REQ_F_FORCE_ASYNC) io_queue_iowq(req); else - io_queue_sqe(req); + io_queue_sqe(req, 0); } void io_req_task_queue_fail(struct io_kiocb *req, int ret) @@ -1938,14 +1973,34 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd) return file; } -static void io_queue_async(struct io_kiocb *req, int ret) +static int io_req_sqe_copy(struct io_kiocb *req, unsigned int issue_flags) +{ + const struct io_cold_def *def = &io_cold_defs[req->opcode]; + + if (req->flags & REQ_F_SQE_COPIED) + return 0; + req->flags |= REQ_F_SQE_COPIED; + if (!def->sqe_copy) + return 0; + if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_INLINE))) + return -EFAULT; + def->sqe_copy(req); + return 0; +} + +static void io_queue_async(struct io_kiocb *req, unsigned int issue_flags, int ret) __must_hold(&req->ctx->uring_lock) { if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { +fail: io_req_defer_failed(req, ret); return; } + ret = io_req_sqe_copy(req, issue_flags); + if (unlikely(ret)) + goto fail; + switch (io_arm_poll_handler(req, 0)) { case IO_APOLL_READY: io_kbuf_recycle(req, 0); @@ -1960,19 +2015,21 @@ static void io_queue_async(struct io_kiocb *req, int ret) } } -static inline void io_queue_sqe(struct io_kiocb *req) +static inline void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags) __must_hold(&req->ctx->uring_lock) { + unsigned int issue_flags = IO_URING_F_NONBLOCK | + IO_URING_F_COMPLETE_DEFER | extra_flags; int ret; - ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); + ret = io_issue_sqe(req, issue_flags); /* * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts */ if (unlikely(ret)) - io_queue_async(req, ret); + io_queue_async(req, issue_flags, ret); } static void io_queue_sqe_fallback(struct io_kiocb *req) @@ -1987,6 +2044,8 @@ static void io_queue_sqe_fallback(struct io_kiocb *req) req->flags |= REQ_F_LINK; io_req_defer_failed(req, req->cqe.res); } else { + /* can't fail with IO_URING_F_INLINE */ + io_req_sqe_copy(req, IO_URING_F_INLINE); if (unlikely(req->ctx->drain_active)) io_drain_req(req); else @@ -2198,6 +2257,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, */ if (unlikely(link->head)) { trace_io_uring_link(req, link->last); + io_req_sqe_copy(req, IO_URING_F_INLINE); link->last->link = req; link->last = req; @@ -2221,7 +2281,7 @@ fallback: return 0; } - io_queue_sqe(req); + io_queue_sqe(req, IO_URING_F_INLINE); return 0; } diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 66c1ca73f55e..abc6de227f74 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -81,6 +81,7 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res); bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags); +bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe src_cqe[2]); void __io_commit_cqring_flush(struct io_ring_ctx *ctx); void io_req_track_inflight(struct io_kiocb *req); @@ -293,11 +294,22 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx) smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); } +static inline void __io_wq_wake(struct wait_queue_head *wq) +{ + /* + * + * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter + * set in the mask so that if we recurse back into our own poll + * waitqueue handlers, we know we have a dependency between eventfd or + * epoll and should terminate multishot poll at that point. + */ + if (wq_has_sleeper(wq)) + __wake_up(wq, TASK_NORMAL, 0, poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); +} + static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) { - if (wq_has_sleeper(&ctx->poll_wq)) - __wake_up(&ctx->poll_wq, TASK_NORMAL, 0, - poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); + __io_wq_wake(&ctx->poll_wq); } static inline void io_cqring_wake(struct io_ring_ctx *ctx) @@ -306,15 +318,9 @@ static inline void io_cqring_wake(struct io_ring_ctx *ctx) * Trigger waitqueue handler on all waiters on our waitqueue. This * won't necessarily wake up all the tasks, io_should_wake() will make * that decision. - * - * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter - * set in the mask so that if we recurse back into our own poll - * waitqueue handlers, we know we have a dependency between eventfd or - * epoll and should terminate multishot poll at that point. */ - if (wq_has_sleeper(&ctx->cq_wait)) - __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, - poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); + + __io_wq_wake(&ctx->cq_wait); } static inline bool io_sqring_full(struct io_ring_ctx *ctx) diff --git a/io_uring/mock_file.c b/io_uring/mock_file.c new file mode 100644 index 000000000000..45d3735b2708 --- /dev/null +++ b/io_uring/mock_file.c @@ -0,0 +1,363 @@ +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/anon_inodes.h> +#include <linux/ktime.h> +#include <linux/hrtimer.h> +#include <linux/poll.h> + +#include <linux/io_uring/cmd.h> +#include <linux/io_uring_types.h> +#include <uapi/linux/io_uring/mock_file.h> + +struct io_mock_iocb { + struct kiocb *iocb; + struct hrtimer timer; + int res; +}; + +struct io_mock_file { + size_t size; + u64 rw_delay_ns; + bool pollable; + struct wait_queue_head poll_wq; +}; + +#define IO_VALID_COPY_CMD_FLAGS IORING_MOCK_COPY_FROM + +static int io_copy_regbuf(struct iov_iter *reg_iter, void __user *ubuf) +{ + size_t ret, copied = 0; + size_t buflen = PAGE_SIZE; + void *tmp_buf; + + tmp_buf = kzalloc(buflen, GFP_KERNEL); + if (!tmp_buf) + return -ENOMEM; + + while (iov_iter_count(reg_iter)) { + size_t len = min(iov_iter_count(reg_iter), buflen); + + if (iov_iter_rw(reg_iter) == ITER_SOURCE) { + ret = copy_from_iter(tmp_buf, len, reg_iter); + if (ret <= 0) + break; + if (copy_to_user(ubuf, tmp_buf, ret)) + break; + } else { + if (copy_from_user(tmp_buf, ubuf, len)) + break; + ret = copy_to_iter(tmp_buf, len, reg_iter); + if (ret <= 0) + break; + } + ubuf += ret; + copied += ret; + } + + kfree(tmp_buf); + return copied; +} + +static int io_cmd_copy_regbuf(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + const struct io_uring_sqe *sqe = cmd->sqe; + const struct iovec __user *iovec; + unsigned flags, iovec_len; + struct iov_iter iter; + void __user *ubuf; + int dir, ret; + + ubuf = u64_to_user_ptr(READ_ONCE(sqe->addr3)); + iovec = u64_to_user_ptr(READ_ONCE(sqe->addr)); + iovec_len = READ_ONCE(sqe->len); + flags = READ_ONCE(sqe->file_index); + + if (unlikely(sqe->ioprio || sqe->__pad1)) + return -EINVAL; + if (flags & ~IO_VALID_COPY_CMD_FLAGS) + return -EINVAL; + + dir = (flags & IORING_MOCK_COPY_FROM) ? ITER_SOURCE : ITER_DEST; + ret = io_uring_cmd_import_fixed_vec(cmd, iovec, iovec_len, dir, &iter, + issue_flags); + if (ret) + return ret; + ret = io_copy_regbuf(&iter, ubuf); + return ret ? ret : -EFAULT; +} + +static int io_mock_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + switch (cmd->cmd_op) { + case IORING_MOCK_CMD_COPY_REGBUF: + return io_cmd_copy_regbuf(cmd, issue_flags); + } + return -ENOTSUPP; +} + +static enum hrtimer_restart io_mock_rw_timer_expired(struct hrtimer *timer) +{ + struct io_mock_iocb *mio = container_of(timer, struct io_mock_iocb, timer); + struct kiocb *iocb = mio->iocb; + + WRITE_ONCE(iocb->private, NULL); + iocb->ki_complete(iocb, mio->res); + kfree(mio); + return HRTIMER_NORESTART; +} + +static ssize_t io_mock_delay_rw(struct kiocb *iocb, size_t len) +{ + struct io_mock_file *mf = iocb->ki_filp->private_data; + struct io_mock_iocb *mio; + + mio = kzalloc(sizeof(*mio), GFP_KERNEL); + if (!mio) + return -ENOMEM; + + mio->iocb = iocb; + mio->res = len; + hrtimer_setup(&mio->timer, io_mock_rw_timer_expired, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_start(&mio->timer, ns_to_ktime(mf->rw_delay_ns), + HRTIMER_MODE_REL); + return -EIOCBQUEUED; +} + +static ssize_t io_mock_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + struct io_mock_file *mf = iocb->ki_filp->private_data; + size_t len = iov_iter_count(to); + size_t nr_zeroed; + + if (iocb->ki_pos + len > mf->size) + return -EINVAL; + nr_zeroed = iov_iter_zero(len, to); + if (!mf->rw_delay_ns || nr_zeroed != len) + return nr_zeroed; + + return io_mock_delay_rw(iocb, len); +} + +static ssize_t io_mock_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + struct io_mock_file *mf = iocb->ki_filp->private_data; + size_t len = iov_iter_count(from); + + if (iocb->ki_pos + len > mf->size) + return -EINVAL; + if (!mf->rw_delay_ns) { + iov_iter_advance(from, len); + return len; + } + + return io_mock_delay_rw(iocb, len); +} + +static loff_t io_mock_llseek(struct file *file, loff_t offset, int whence) +{ + struct io_mock_file *mf = file->private_data; + + return fixed_size_llseek(file, offset, whence, mf->size); +} + +static __poll_t io_mock_poll(struct file *file, struct poll_table_struct *pt) +{ + struct io_mock_file *mf = file->private_data; + __poll_t mask = 0; + + poll_wait(file, &mf->poll_wq, pt); + + mask |= EPOLLOUT | EPOLLWRNORM; + mask |= EPOLLIN | EPOLLRDNORM; + return mask; +} + +static int io_mock_release(struct inode *inode, struct file *file) +{ + struct io_mock_file *mf = file->private_data; + + kfree(mf); + return 0; +} + +static const struct file_operations io_mock_fops = { + .owner = THIS_MODULE, + .release = io_mock_release, + .uring_cmd = io_mock_cmd, + .read_iter = io_mock_read_iter, + .write_iter = io_mock_write_iter, + .llseek = io_mock_llseek, +}; + +static const struct file_operations io_mock_poll_fops = { + .owner = THIS_MODULE, + .release = io_mock_release, + .uring_cmd = io_mock_cmd, + .read_iter = io_mock_read_iter, + .write_iter = io_mock_write_iter, + .llseek = io_mock_llseek, + .poll = io_mock_poll, +}; + +#define IO_VALID_CREATE_FLAGS (IORING_MOCK_CREATE_F_SUPPORT_NOWAIT | \ + IORING_MOCK_CREATE_F_POLL) + +static int io_create_mock_file(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + const struct file_operations *fops = &io_mock_fops; + const struct io_uring_sqe *sqe = cmd->sqe; + struct io_uring_mock_create mc, __user *uarg; + struct io_mock_file *mf = NULL; + struct file *file = NULL; + size_t uarg_size; + int fd = -1, ret; + + /* + * It's a testing only driver that allows exercising edge cases + * that wouldn't be possible to hit otherwise. + */ + add_taint(TAINT_TEST, LOCKDEP_STILL_OK); + + uarg = u64_to_user_ptr(READ_ONCE(sqe->addr)); + uarg_size = READ_ONCE(sqe->len); + + if (sqe->ioprio || sqe->__pad1 || sqe->addr3 || sqe->file_index) + return -EINVAL; + if (uarg_size != sizeof(mc)) + return -EINVAL; + + memset(&mc, 0, sizeof(mc)); + if (copy_from_user(&mc, uarg, uarg_size)) + return -EFAULT; + if (!mem_is_zero(mc.__resv, sizeof(mc.__resv))) + return -EINVAL; + if (mc.flags & ~IO_VALID_CREATE_FLAGS) + return -EINVAL; + if (mc.file_size > SZ_1G) + return -EINVAL; + if (mc.rw_delay_ns > NSEC_PER_SEC) + return -EINVAL; + + mf = kzalloc(sizeof(*mf), GFP_KERNEL_ACCOUNT); + if (!mf) + return -ENOMEM; + + ret = fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + if (fd < 0) + goto fail; + + init_waitqueue_head(&mf->poll_wq); + mf->size = mc.file_size; + mf->rw_delay_ns = mc.rw_delay_ns; + if (mc.flags & IORING_MOCK_CREATE_F_POLL) { + fops = &io_mock_poll_fops; + mf->pollable = true; + } + + file = anon_inode_create_getfile("[io_uring_mock]", fops, + mf, O_RDWR | O_CLOEXEC, NULL); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto fail; + } + + file->f_mode |= FMODE_READ | FMODE_CAN_READ | + FMODE_WRITE | FMODE_CAN_WRITE | + FMODE_LSEEK; + if (mc.flags & IORING_MOCK_CREATE_F_SUPPORT_NOWAIT) + file->f_mode |= FMODE_NOWAIT; + + mc.out_fd = fd; + if (copy_to_user(uarg, &mc, uarg_size)) { + fput(file); + ret = -EFAULT; + goto fail; + } + + fd_install(fd, file); + return 0; +fail: + if (fd >= 0) + put_unused_fd(fd); + kfree(mf); + return ret; +} + +static int io_probe_mock(struct io_uring_cmd *cmd) +{ + const struct io_uring_sqe *sqe = cmd->sqe; + struct io_uring_mock_probe mp, __user *uarg; + size_t uarg_size; + + uarg = u64_to_user_ptr(READ_ONCE(sqe->addr)); + uarg_size = READ_ONCE(sqe->len); + + if (sqe->ioprio || sqe->__pad1 || sqe->addr3 || sqe->file_index || + uarg_size != sizeof(mp)) + return -EINVAL; + + memset(&mp, 0, sizeof(mp)); + if (copy_from_user(&mp, uarg, uarg_size)) + return -EFAULT; + if (!mem_is_zero(&mp, sizeof(mp))) + return -EINVAL; + + mp.features = IORING_MOCK_FEAT_END; + + if (copy_to_user(uarg, &mp, uarg_size)) + return -EFAULT; + return 0; +} + +static int iou_mock_mgr_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + switch (cmd->cmd_op) { + case IORING_MOCK_MGR_CMD_PROBE: + return io_probe_mock(cmd); + case IORING_MOCK_MGR_CMD_CREATE: + return io_create_mock_file(cmd, issue_flags); + } + return -EOPNOTSUPP; +} + +static const struct file_operations iou_mock_dev_fops = { + .owner = THIS_MODULE, + .uring_cmd = iou_mock_mgr_cmd, +}; + +static struct miscdevice iou_mock_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "io_uring_mock", + .fops = &iou_mock_dev_fops, +}; + +static int __init io_mock_init(void) +{ + int ret; + + ret = misc_register(&iou_mock_miscdev); + if (ret < 0) { + pr_err("Could not initialize io_uring mock device\n"); + return ret; + } + return 0; +} + +static void __exit io_mock_exit(void) +{ + misc_deregister(&iou_mock_miscdev); +} + +module_init(io_mock_init) +module_exit(io_mock_exit) + +MODULE_AUTHOR("Pavel Begunkov <asml.silence@gmail.com>"); +MODULE_DESCRIPTION("io_uring mock file"); +MODULE_LICENSE("GPL"); diff --git a/io_uring/net.c b/io_uring/net.c index bec8c6ed0a93..35585bdc59f3 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -75,15 +75,29 @@ struct io_sr_msg { u16 flags; /* initialised and used only by !msg send variants */ u16 buf_group; - unsigned short retry_flags; + /* per-invocation mshot limit */ + unsigned mshot_len; + /* overall mshot byte limit */ + unsigned mshot_total_len; void __user *msg_control; /* used only for send zerocopy */ struct io_kiocb *notif; }; +/* + * The UAPI flags are the lower 8 bits, as that's all sqe->ioprio will hold + * anyway. Use the upper 8 bits for internal uses. + */ enum sr_retry_flags { - IO_SR_MSG_RETRY = 1, - IO_SR_MSG_PARTIAL_MAP = 2, + IORING_RECV_RETRY = (1U << 15), + IORING_RECV_PARTIAL_MAP = (1U << 14), + IORING_RECV_MSHOT_CAP = (1U << 13), + IORING_RECV_MSHOT_LIM = (1U << 12), + IORING_RECV_MSHOT_DONE = (1U << 11), + + IORING_RECV_RETRY_CLEAR = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP, + IORING_RECV_NO_RETRY = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP | + IORING_RECV_MSHOT_CAP | IORING_RECV_MSHOT_DONE, }; /* @@ -192,8 +206,8 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req, req->flags &= ~REQ_F_BL_EMPTY; sr->done_io = 0; - sr->retry_flags = 0; - sr->len = 0; /* get from the provided buffer */ + sr->flags &= ~IORING_RECV_RETRY_CLEAR; + sr->len = sr->mshot_len; } static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg, @@ -402,7 +416,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); sr->done_io = 0; - sr->retry_flags = 0; sr->len = READ_ONCE(sqe->len); sr->flags = READ_ONCE(sqe->ioprio); if (sr->flags & ~SENDMSG_FLAGS) @@ -756,9 +769,8 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); sr->done_io = 0; - sr->retry_flags = 0; - if (unlikely(sqe->file_index || sqe->addr2)) + if (unlikely(sqe->addr2)) return -EINVAL; sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); @@ -783,15 +795,25 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) sr->buf_group = req->buf_index; req->buf_list = NULL; } + sr->mshot_total_len = sr->mshot_len = 0; if (sr->flags & IORING_RECV_MULTISHOT) { if (!(req->flags & REQ_F_BUFFER_SELECT)) return -EINVAL; if (sr->msg_flags & MSG_WAITALL) return -EINVAL; - if (req->opcode == IORING_OP_RECV && sr->len) + if (req->opcode == IORING_OP_RECV) { + sr->mshot_len = sr->len; + sr->mshot_total_len = READ_ONCE(sqe->optlen); + if (sr->mshot_total_len) + sr->flags |= IORING_RECV_MSHOT_LIM; + } else if (sqe->optlen) { return -EINVAL; + } req->flags |= REQ_F_APOLL_MULTISHOT; + } else if (sqe->optlen) { + return -EINVAL; } + if (sr->flags & IORING_RECVSEND_BUNDLE) { if (req->opcode == IORING_OP_RECVMSG) return -EINVAL; @@ -823,13 +845,28 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, if (kmsg->msg.msg_inq > 0) cflags |= IORING_CQE_F_SOCK_NONEMPTY; + if (*ret > 0 && sr->flags & IORING_RECV_MSHOT_LIM) { + /* + * If sr->len hits zero, the limit has been reached. Mark + * mshot as finished, and flag MSHOT_DONE as well to prevent + * a potential bundle from being retried. + */ + sr->mshot_total_len -= min_t(int, *ret, sr->mshot_total_len); + if (!sr->mshot_total_len) { + sr->flags |= IORING_RECV_MSHOT_DONE; + mshot_finished = true; + } + } + if (sr->flags & IORING_RECVSEND_BUNDLE) { size_t this_ret = *ret - sr->done_io; cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret), issue_flags); - if (sr->retry_flags & IO_SR_MSG_RETRY) + if (sr->flags & IORING_RECV_RETRY) cflags = req->cqe.flags | (cflags & CQE_F_MASK); + if (sr->mshot_len && *ret >= sr->mshot_len) + sr->flags |= IORING_RECV_MSHOT_CAP; /* bundle with no more immediate buffers, we're done */ if (req->flags & REQ_F_BL_EMPTY) goto finish; @@ -837,12 +874,13 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, * If more is available AND it was a full transfer, retry and * append to this one */ - if (!sr->retry_flags && kmsg->msg.msg_inq > 1 && this_ret > 0 && + if (!(sr->flags & IORING_RECV_NO_RETRY) && + kmsg->msg.msg_inq > 1 && this_ret > 0 && !iov_iter_count(&kmsg->msg.msg_iter)) { req->cqe.flags = cflags & ~CQE_F_MASK; sr->len = kmsg->msg.msg_inq; sr->done_io += this_ret; - sr->retry_flags |= IO_SR_MSG_RETRY; + sr->flags |= IORING_RECV_RETRY; return false; } } else { @@ -859,10 +897,13 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, io_mshot_prep_retry(req, kmsg); /* Known not-empty or unknown state, retry */ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) { - if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY) + if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY && + !(sr->flags & IORING_RECV_MSHOT_CAP)) { return false; + } /* mshot retries exceeded, force a requeue */ sr->nr_multishot_loops = 0; + sr->flags &= ~IORING_RECV_MSHOT_CAP; |
