// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*/
/*
* This code implements the DMA subsystem. It provides a HW-neutral interface
* for other kernel code to use asynchronous memory copy capabilities,
* if present, and allows different HW DMA drivers to register as providing
* this capability.
*
* Due to the fact we are accelerating what is already a relatively fast
* operation, the code goes to great lengths to avoid additional overhead,
* such as locking.
*
* LOCKING:
*
* The subsystem keeps a global list of dma_device structs it is protected by a
* mutex, dma_list_mutex.
*
* A subsystem can get access to a channel by calling dmaengine_get() followed
* by dma_find_channel(), or if it has need for an exclusive channel it can call
* dma_request_channel(). Once a channel is allocated a reference is taken
* against its corresponding driver to disable removal.
*
* Each device has a channels list, which runs unlocked but is never modified
* once the device is registered, it's just setup by the driver.
*
* See Documentation/driver-api/dmaengine for more details
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
#include <linux/rculist.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
#include <linux/of_dma.h>
#include <linux/mempool.h>
#include <linux/numa.h>
#include "dmaengine.h"
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDA(dma_ida);
static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count;
/* --- debugfs implementation --- */
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
static struct dentry *rootdir;
static void dmaengine_debug_register(struct dma_device *dma_dev)
{
dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
rootdir);
if (IS_ERR(dma_dev->dbg_dev_root))
dma_dev->dbg_dev_root = NULL;
}
static void dmaengine_debug_unregister(struct dma_device *dma_dev)
{
debugfs_remove_recursive(dma_dev->dbg_dev_root);
dma_dev->dbg_dev_root = NULL;
}
static void dmaengine_dbg_summary_show(struct seq_file *s,
struct dma_device *dma_dev)
{
struct dma_chan *chan;
list_for_each_entry(chan, &dma_dev->channels, device_node) {
if (chan->client_count) {
seq_printf(s, " %-13s| %s", dma_chan_name(chan),
chan->dbg_client_name ?: "in-use");
if (chan->router)
seq_printf(s, " (via router: %s)\n",
dev_name(chan->router->dev));
else
seq_puts(s, "\n");
}
}
}
static int dmaengine_summary_show(struct seq_file *s, void *data)
{
struct dma_device *dma_dev = NULL;
mutex_lock(&dma_list_mutex);
list_for_each_entry(dma_dev, &dma_device_list, global_node) {
seq_printf(s, "dma%d (%s): number of channels: %u\n",
dma_dev->dev_id, dev_name(dma_dev->dev),
dma_dev->chancnt);
if (dma_dev->dbg_summary_show)
dma_dev->dbg_summary_show(s, dma_dev);
else
dmaengine_dbg_summary_show(s, dma_dev);
if (!list_is_last(&dma_dev->global_node, &dma_device_list))
seq_puts(s, "\n");
}
mutex_unlock(&dma_list_mutex);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
static void __init dmaengine_debugfs_init(void)
{
rootdir = debugfs_create_dir("dmaengine", NULL);
/* /sys/kernel/debug/dmaengine/summary */
debugfs_create_file("summary", 0444, rootdir, NULL,
&dmaengine_summary_fops);
}
#else
static inline void dmaengine_debugfs_init(void) { }
static inline int dmaengine_debug_register(struct dma_device *dma_dev)
{
return 0;
}
static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
#endif /* DEBUG_FS */
/* --- sysfs implementation --- */
#define DMA_SLAVE_NAME "slave"
/**
* dev_to_dma_chan - convert a device pointer to its sysfs container object
* @dev: device node
*
* Must be called under dma_list_mutex.
*/
static struct dma_chan *dev_to_dma_chan(struct device *dev)
{
struct dma_chan_dev *chan_dev;
chan_dev = container_of(dev, typeof(*chan_dev), device);
return chan_dev->chan;
}
static ssize_t memcpy_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dma_chan *chan;
unsigned long count = 0;
int i;
int err;
mutex_lock(&dma_list_mutex);
chan = dev_to_dma_chan(dev);
if (chan) {
for_each_possible_cpu(i)
count += per_cpu_ptr(chan->local, i)->memcpy_count;
err = sysfs_emit(buf, "%lu\n", count);
} else
err = -ENODEV;
mutex_unlock(&dma_list_mutex);
return err;
}
static DEVICE_ATTR_RO(memcpy_count);
static ssize_t bytes_transferred_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dma_chan *chan;
unsigned long count = 0;
int i;
int err;
mutex_lock(&a
|