// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/dma-map-ops.h>
#include <linux/mm.h>
#include <linux/nospec.h>
#include <linux/io_uring.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff_ref.h>
#include <net/page_pool/helpers.h>
#include <net/page_pool/memory_provider.h>
#include <net/netlink.h>
#include <net/netdev_rx_queue.h>
#include <net/tcp.h>
#include <net/rps.h>
#include <trace/events/page_pool.h>
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
#include "kbuf.h"
#include "memmap.h"
#include "zcrx.h"
#include "rsrc.h"
#define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp)
{
return pp->mp_priv;
}
static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov)
{
struct net_iov_area *owner = net_iov_owner(niov);
return container_of(owner, struct io_zcrx_area, nia);
}
static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
{
struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
return area->mem.pages[net_iov_idx(niov)];
}
static void io_release_area_mem(struct io_zcrx_mem *mem)
{
if (mem->pages) {
unpin_user_pages(mem->pages, mem->nr_folios);
kvfree(mem->pages);
}
}
static int io_import_area(struct io_zcrx_ifq *ifq,
struct io_zcrx_mem *mem,
struct io_uring_zcrx_area_reg *area_reg)
{
struct page **pages;
int nr_pages;
int ret;
ret = io_validate_user_buf_range(area_reg->addr, area_reg->len);
if (ret)
return ret;
if (!area_reg->addr)
return -EFAULT;
if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK)
return -EINVAL;
pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len,
&nr_pages);
if (IS_ERR(pages))
return PTR_ERR(pages);
mem->pages = pages;
mem->nr_folios = nr_pages;
mem->size = area_reg->len;
return 0;
}
static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
struct io_zcrx_area *area, int nr_mapped)
{
int i;
for (i = 0; i < nr_mapped; i++) {
struct net_iov *niov = &area->nia.niovs[i];
dma_addr_t dma;
dma = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
DMA_FROM_DEVICE, IO_DMA_ATTR);
net_mp_niov_set_dma_addr(niov, 0);
}
}
static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
{
guard(mutex)(&ifq->dma_lock);
if (area->is_mapped)
__io_zcrx_unmap_area(ifq, area, area->nia.num_niovs);
area->is_mapped = false;
}
static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
{
int i;
guard(mutex)(&ifq->dma_lock);
if (area->is_mapped)
return 0;
for (i = 0; i < area->nia.num_niovs; i++) {
struct net_iov *niov = &area->nia.niovs[i];
dma_addr_t dma;
dma = dma_map_page_attrs(ifq->dev, area->mem.pages[i], 0,
PAGE_SIZE, DMA_FROM_DEVICE, IO_DMA_ATTR);
if (dma_mapping_error(ifq->dev, dma))
break;
if (net_mp_niov_set_dma_addr(niov, dma)) {
dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
DMA_FROM_DEVICE, IO_DMA_ATTR);
break;
}
}
if (i != area->nia.num_niovs) {
__io_zcrx_unmap_area(ifq, area, i);
return -EINVAL;
}
area->is_mapped = true;
return 0;
}
static void io_zcrx_sync_for_device(const struct page_pool *pool,
struct net_iov *niov)
{
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
dma_addr_t dma_addr;
if (!dma_dev_need_sync(pool->p.dev))
return;
dma_addr = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
__dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
PAGE_SIZE, pool->p.dma_dir);
#endif
}
#define IO_RQ_MAX_ENTRIES 32768
#define IO_SKBS_PER_CALL_LIMIT 20
struct io_zcrx_args {
struct io_kiocb *req;
struct io_zcrx_ifq *ifq;
struct socket *sock;
unsigned nr_skbs;
};
static const struct memory_provider_ops io_uring_pp_zc_ops;
static inline atomic_t *io_get_user_counter(struct net_iov *niov)
{
struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
return &area->user_refs[net_iov_idx(niov)];
}
static bool io_zcrx_put_niov_uref(struct net_iov *niov)
{
atomic_t *uref = io_get_user_counter(niov);
if (unlikely(!atomic_read(uref)))
return false;
atomic_dec(uref);
return