/*
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
*
* extracted from shdma.c
*
* Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/shdma-base.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "../dmaengine.h"
/* DMA descriptor control */
enum shdma_desc_status {
DESC_IDLE,
DESC_PREPARED,
DESC_SUBMITTED,
DESC_COMPLETED, /* completed, have to call callback */
DESC_WAITING, /* callback called, waiting for ack / re-submit */
};
#define NR_DESCS_PER_CHANNEL 32
#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
/*
* For slave DMA we assume, that there is a finite number of DMA slaves in the
* system, and that each such slave can only use a finite number of channels.
* We use slave channel IDs to make sure, that no such slave channel ID is
* allocated more than once.
*/
static unsigned int slave_num = 256;
module_param(slave_num, uint, 0444);
/* A bitmask with slave_num bits */
static unsigned long *shdma_slave_used;
/* Called under spin_lock_irq(&schan->chan_lock") */
static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
{
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
struct shdma_desc *sdesc;
/* DMA work check */
if (ops->channel_busy(schan))
return;
/* Find the first not transferred descriptor */
list_for_each_entry(sdesc, &schan->ld_queue, node)
if (sdesc->mark == DESC_SUBMITTED) {
ops->start_xfer(schan, sdesc);
break;
}
}
static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct shdma_desc *chunk, *c, *desc =
container_of(tx, struct shdma_desc, async_tx);
struct shdma_chan *schan = to_shdma_chan(tx->chan);
dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
bool power_up;
spin_lock_irq(&schan->chan_lock);
power_up = list_empty(&schan->ld_queue);
cookie = dma_cookie_assign(tx);
/* Mark all chunks of this descriptor as submitted, move to the queue */
list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
/*
* All chunks are on the global ld_free, so, we have to find
* the end of the chain ourselves
*/
if (chunk != desc && (chunk->mark == DESC_IDLE ||
chunk->async_tx.cookie > 0 ||
chunk->async_tx.cookie == -EBUSY ||
&chunk->node == &schan->ld_free))
break;
chunk->mark = DESC_SUBMITTED;
if (chunk->chunks == 1) {
chunk->async_tx.callback = callback;
chunk->async_tx.callback_param = tx->callback_param;
} else {
/* Callback goes to the last chunk */
chunk->async_tx.callback = NULL;
}
chunk->cookie = cookie;
list_move_tail(&chunk->node, &schan->ld_queue);
dev_dbg(schan->dev, "submit #%d@%p on %d\n",
tx->cookie, &chunk->async_tx, schan->id);
}
if (power_up) {
int ret;
schan->pm_state = SHDMA_PM_BUSY;
ret = pm_runtime_get(schan->dev);
spin_unlock_irq(&schan->chan_lock);
if (ret < 0)
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
pm_runtime_barrier(schan->dev);
spin_lock_irq(&schan->chan_lock);
/* Have we been reset, while waiting? */
if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
struct shdma_dev *sdev =
to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
dev_dbg(schan->dev, "Bring up channel %d\n",
schan->id);
/*
* TODO: .xfer_setup() might fail on some platforms.
* Make it int then, on error remove chunks from the
* queue again
*/
ops->setup_xfer(schan, schan->slave_id);
if (schan->pm_state == SHDMA_PM_PENDING)
shdma_chan_xfer_ld_queue(schan);
schan->pm_state = SHDMA_PM_ESTABLISHED;
}
} else {
/*
* Tell .device_issue_pending() not to run the queue, interrupts
* will do it anyway
*/
schan->pm_state = SHDMA_PM_PENDING;
}
spin_unlock_irq(&schan->chan_lock);
return cookie;
}
/* Called with desc_lock held */
static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
{
struct shdma_desc *sdesc;
list_for_each_entry(sdesc, &schan->ld_free, node)
if (sdesc->mark != DESC_PREPARED) {
BUG_ON(sdesc->mark != DESC_IDLE);
list_del(&sdesc->node);
return sdesc;
}
return NULL;
}
static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
{
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;