// SPDX-License-Identifier: GPL-2.0
/*
* PCI Peer 2 Peer DMA support.
*
* Copyright (c) 2016-2018, Logan Gunthorpe
* Copyright (c) 2016-2017, Microsemi Corporation
* Copyright (c) 2017, Christoph Hellwig
* Copyright (c) 2018, Eideticom Inc.
*/
#define pr_fmt(fmt) "pci-p2pdma: " fmt
#include <linux/ctype.h>
#include <linux/dma-map-ops.h>
#include <linux/pci-p2pdma.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
#include <linux/memremap.h>
#include <linux/percpu-refcount.h>
#include <linux/random.h>
#include <linux/seq_buf.h>
#include <linux/xarray.h>
struct pci_p2pdma {
struct gen_pool *pool;
bool p2pmem_published;
struct xarray map_types;
};
struct pci_p2pdma_pagemap {
struct pci_dev *provider;
u64 bus_offset;
struct dev_pagemap pgmap;
};
static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
{
return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
}
static ssize_t size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_p2pdma *p2pdma;
size_t size = 0;
rcu_read_lock();
p2pdma = rcu_dereference(pdev->p2pdma);
if (p2pdma && p2pdma->pool)
size = gen_pool_size(p2pdma->pool);
rcu_read_unlock();
return sysfs_emit(buf, "%zd\n", size);
}
static DEVICE_ATTR_RO(size);
static ssize_t available_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_p2pdma *p2pdma;
size_t avail = 0;
rcu_read_lock();
p2pdma = rcu_dereference(pdev->p2pdma);
if (p2pdma && p2pdma->pool)
avail = gen_pool_avail(p2pdma->pool);
rcu_read_unlock();
return sysfs_emit(buf, "%zd\n", avail);
}
static DEVICE_ATTR_RO(available);
static ssize_t published_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_p2pdma *p2pdma;
bool published = false;
rcu_read_lock();
p2pdma = rcu_dereference(pdev->p2pdma);
if (p2pdma)
published = p2pdma->p2pmem_published;
rcu_read_unlock();
return sysfs_emit(buf, "%d\n", published);
}
static DEVICE_ATTR_RO(published);
static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, struct vm_area_struct *vma)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
size_t len = vma->vm_end - vma->vm_start;
struct pci_p2pdma *p2pdma;
struct percpu_ref *ref;
unsigned long vaddr;
void *kaddr;
int ret;
/* prevent private mappings from being established */
if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
pci_info_ratelimited(pdev,
"%s: fail, attempted private mapping\n",
current->comm);
return -EINVAL;
}
if (vma->vm_pgoff) {
pci_info_ratelimited(pdev,
"%s: fail, attempted mapping with non-zero offset\n",
current->comm);
return -EINVAL;
}
rcu_read_lock();
p2pdma = rcu_dereference(pdev->p2pdma);
if (!p2pdma) {
ret = -ENODEV;
goto out;
}
kaddr = (void *)gen_pool_alloc_owner(p2pdma->pool, len, (void **)&ref);
if (!kaddr) {
ret = -ENOMEM;
goto out;
}
/*
* vm_insert_page() can sleep, so a reference is taken to mapping
* such that rcu_read_unlock() can be done before inserting the
* pages
*/
if (unlikely(!percpu_ref_tryget_live_rcu(ref))) {
ret = -ENODEV;
goto out_free_mem;
}
rcu_read_unlock();
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
ret = vm_insert_page(vma, vaddr, virt_to_page(kaddr));
if (ret) {
gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len);
return ret;
}
percpu_ref_get(ref);
put_page(virt_to_page(kaddr));
kaddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
percpu_ref_put(ref);
return 0;
out_free_me