summaryrefslogtreecommitdiff
path: root/io_uring/rsrc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-02 13:37:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-02 13:37:55 -0700
commit42df1cbf6a4726934cc5dac12bf263aa73c49fa3 (patch)
tree129e32104dccc660c3c8fca03b8bba418e572d09 /io_uring/rsrc.c
parent98e247464088a11ce2328a214fdb87d4c06f8db6 (diff)
parent14b146b688ad9593f5eee93d51a34d09a47e50b5 (diff)
downloadlinux-42df1cbf6a4726934cc5dac12bf263aa73c49fa3.tar.gz
linux-42df1cbf6a4726934cc5dac12bf263aa73c49fa3.tar.bz2
linux-42df1cbf6a4726934cc5dac12bf263aa73c49fa3.zip
Merge tag 'for-5.20/io_uring-zerocopy-send-2022-07-29' of git://git.kernel.dk/linux-block
Pull io_uring zerocopy support from Jens Axboe: "This adds support for efficient support for zerocopy sends through io_uring. Both ipv4 and ipv6 is supported, as well as both TCP and UDP. The core network changes to support this is in a stable branch from Jakub that both io_uring and net-next has pulled in, and the io_uring changes are layered on top of that. All of the work has been done by Pavel" * tag 'for-5.20/io_uring-zerocopy-send-2022-07-29' of git://git.kernel.dk/linux-block: (34 commits) io_uring: notification completion optimisation io_uring: export req alloc from core io_uring/net: use unsigned for flags io_uring/net: make page accounting more consistent io_uring/net: checks errors of zc mem accounting io_uring/net: improve io_get_notif_slot types selftests/io_uring: test zerocopy send io_uring: enable managed frags with register buffers io_uring: add zc notification flush requests io_uring: rename IORING_OP_FILES_UPDATE io_uring: flush notifiers after sendzc io_uring: sendzc with fixed buffers io_uring: allow to pass addr into sendzc io_uring: account locked pages for non-fixed zc io_uring: wire send zc request type io_uring: add notification slot registration io_uring: add rsrc referencing for notifiers io_uring: complete notifiers in tw io_uring: cache struct io_notif io_uring: add zc notification infrastructure ...
Diffstat (limited to 'io_uring/rsrc.c')
-rw-r--r--io_uring/rsrc.c67
1 files changed, 57 insertions, 10 deletions
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 7f66b0e25674..59704b9ac537 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -15,12 +15,14 @@
#include "io_uring.h"
#include "openclose.h"
#include "rsrc.h"
+#include "notif.h"
struct io_rsrc_update {
struct file *file;
u64 arg;
u32 nr_args;
u32 offset;
+ int type;
};
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
@@ -42,17 +44,13 @@ void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
}
}
-static inline void __io_unaccount_mem(struct user_struct *user,
- unsigned long nr_pages)
-{
- atomic_long_sub(nr_pages, &user->locked_vm);
-}
-
-static inline int __io_account_mem(struct user_struct *user,
- unsigned long nr_pages)
+int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
{
unsigned long page_limit, cur_pages, new_pages;
+ if (!nr_pages)
+ return 0;
+
/* Don't allow more pages than we can safely lock */
page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -657,7 +655,7 @@ __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
return -EINVAL;
}
-int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_rsrc_update *up = io_kiocb_to_cmd(req);
@@ -671,6 +669,7 @@ int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!up->nr_args)
return -EINVAL;
up->arg = READ_ONCE(sqe->addr);
+ up->type = READ_ONCE(sqe->ioprio);
return 0;
}
@@ -713,7 +712,7 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
return ret;
}
-int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
+static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_rsrc_update *up = io_kiocb_to_cmd(req);
struct io_ring_ctx *ctx = req->ctx;
@@ -742,6 +741,54 @@ int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}
+static int io_notif_update(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_rsrc_update *up = io_kiocb_to_cmd(req);
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned len = up->nr_args;
+ unsigned idx_end, idx = up->offset;
+ int ret = 0;
+
+ io_ring_submit_lock(ctx, issue_flags);
+ if (unlikely(check_add_overflow(idx, len, &idx_end))) {
+ ret = -EOVERFLOW;
+ goto out;
+ }
+ if (unlikely(idx_end > ctx->nr_notif_slots)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (; idx < idx_end; idx++) {
+ struct io_notif_slot *slot = &ctx->notif_slots[idx];
+
+ if (!slot->notif)
+ continue;
+ if (up->arg)
+ slot->tag = up->arg;
+ io_notif_slot_flush_submit(slot, issue_flags);
+ }
+out:
+ io_ring_submit_unlock(ctx, issue_flags);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return IOU_OK;
+}
+
+int io_rsrc_update(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_rsrc_update *up = io_kiocb_to_cmd(req);
+
+ switch (up->type) {
+ case IORING_RSRC_UPDATE_FILES:
+ return io_files_update(req, issue_flags);
+ case IORING_RSRC_UPDATE_NOTIF:
+ return io_notif_update(req, issue_flags);
+ }
+ return -EINVAL;
+}
+
int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
struct io_rsrc_node *node, void *rsrc)
{