// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/net.h>
#include <linux/compat.h>
#include <net/compat.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
#include "kbuf.h"
#include "alloc_cache.h"
#include "net.h"
#include "notif.h"
#include "rsrc.h"
#if defined(CONFIG_NET)
struct io_shutdown {
struct file *file;
int how;
};
struct io_accept {
struct file *file;
struct sockaddr __user *addr;
int __user *addr_len;
int flags;
u32 file_slot;
unsigned long nofile;
};
struct io_socket {
struct file *file;
int domain;
int type;
int protocol;
int flags;
u32 file_slot;
unsigned long nofile;
};
struct io_connect {
struct file *file;
struct sockaddr __user *addr;
int addr_len;
bool in_progress;
bool seen_econnaborted;
};
struct io_sr_msg {
struct file *file;
union {
struct compat_msghdr __user *umsg_compat;
struct user_msghdr __user *umsg;
void __user *buf;
};
unsigned len;
unsigned done_io;
unsigned msg_flags;
u16 flags;
/* initialised and used only by !msg send variants */
u16 addr_len;
u16 buf_group;
void __user *addr;
void __user *msg_control;
/* used only for send zerocopy */
struct io_kiocb *notif;
};
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
sqe->buf_index || sqe->splice_fd_in))
return -EINVAL;
shutdown->how = READ_ONCE(sqe->len);
return 0;
}
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
struct socket *sock;
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
ret = __sys_shutdown_sock(sock, shutdown->how);
io_req_set_res(req, ret, 0);
return IOU_OK;
}
static bool io_net_retry(struct socket *sock, int flags)
{
if (!(flags & MSG_WAITALL))
return false;
return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
}
static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr *hdr = req->async_data;
if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
return;
/* Let normal cleanup path reap it if we fail adding to the cache */
if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
}
}
static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_cache_entry *entry;
struct io_async_msghdr *hdr;
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
entry = io_alloc_cache_get(&ctx->netmsg_cache);
if (entry) {
hdr = container_of(entry, struct io_async_msghdr, cache);
hdr->free_iov = NULL;
req->flags |= REQ_F_ASYNC_DATA;
req->async_data = hdr;
return hdr;
}
}
if (!io_alloc_async_data(req)) {
hdr = req->async_data;
hdr->free_iov = NULL;
return hdr;
}
return NULL;
}
static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
{
/* ->prep_async is always called from the submission context */
return io_msg_alloc_async(req, 0);
}
static int io_setup_async_msg(struct io_kiocb *req,
struct io_async_msghdr *kmsg,
unsigned int issue_flags)
{
struct io_async_msghdr *async_msg;
if (req_has_async_data(req))
return -EAGAIN;
async_msg = io_msg_alloc_async(req, issue_flags);
if (!async_msg) {
kfree(kmsg->free_iov);
return -ENOMEM;
}
req->flags |= REQ_F_NEED_CLEANUP;
memcpy(async_msg, kmsg, sizeof(*kmsg));
if (async_msg->msg.msg_name)
async_msg->msg.msg_name = &async_msg->addr;
if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
return -EAGAIN;
/* if were using fast_iov, set it to the new one */
if (!kmsg->free_iov) {
size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
}
return -EAGAIN;
}
#ifdef CONFIG_COMPAT
static int io_compat_msg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg,
struct compat_msghdr *msg, int ddir)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct compat_iovec __user *uiov;
int ret;
if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
return -EFAULT;
uiov = compat_ptr(msg->msg_iov);
if (req->flags & REQ_F_BUFFER_SELECT) {
compat_ssize_t clen;
iomsg->free_iov = NULL;
if (msg->msg_iovlen == 0) {
sr->len = 0;
} else if (msg->msg_iovlen > 1) {
return -EINVAL;
} else {
if (!access_ok(uiov, sizeof(*uiov)))
return -EFAULT;
if (__get_user(clen, &uiov->iov_len))
return -EFAULT;
if (clen < 0)
return -EINVAL;
sr->len = clen;
}
return 0;
}
iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
UIO_FASTIOV, &iomsg->free_iov,
&iomsg->msg.msg_iter, true);
if (unlikely(ret < 0))
return ret;
return 0;
}
#endif
static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
struct user_msghdr *msg, int ddir)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int ret;
if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
return -EFAULT;
if (req->flags & REQ_F_BUFFER_SELECT) {
if (msg->msg_iovlen == 0) {
sr->len = iomsg->fast_iov[0].iov_len = 0;
iomsg->fast_iov[0].iov_base = NULL;
iomsg->free_iov = NULL;
} else if (msg->msg_iovlen > 1) {
return -EINVAL;
} else {
if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
sizeof(*msg->msg_iov)))
return -EFAULT;
sr->len = iomsg->fast_iov[0].iov_len;
iomsg->free_iov = NULL;
}
return 0;
}
iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
&iomsg->free_iov, &iomsg->msg.msg_iter, false);
if (unlikely(ret < 0))
return ret;
return 0;
}
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct user_msghdr msg;
int ret;
iomsg->msg.msg_name = &iomsg->addr;
iomsg->msg.msg_iter.nr_segs = 0;
#ifdef CONFIG_COMPAT
if (unlikely(req->ctx->compat)) {
struct compat_msghdr cmsg;
ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
if (unlikely(ret))
return ret;
ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
sr->msg_control = iomsg->msg.msg_control_user;
return ret;
}
#endif
ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
if (unlikely(ret))
return ret;
ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
/* save msg_control as sys_sendmsg() overwrites it */
sr->msg_control = iomsg->msg.msg_control_user;
return ret;
}
int io_send_prep_async(struct io_kiocb *req)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io;
int ret;
if (!zc->addr || req_has_async_data(req))
return 0;
io = io_msg_alloc_async_prep(re
|