summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-11 07:14:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-11 07:14:01 -0700
commitc7a19c795b4b0a3232c157ed29eea85077e95da6 (patch)
treed3916dcdea74b55453694c9f31b95b6d906e3202 /drivers/dma
parent5fd41f2a10b38ab84b4c2436140ce490d34291fa (diff)
parenta0bbe990c161ddf56444efe80d9d6e15fdc47aca (diff)
downloadlinux-c7a19c795b4b0a3232c157ed29eea85077e95da6.tar.gz
linux-c7a19c795b4b0a3232c157ed29eea85077e95da6.tar.bz2
linux-c7a19c795b4b0a3232c157ed29eea85077e95da6.zip
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dma updates from Vinod Koul: "Some notable changes are: - new driver for AMBA AXI NBPF by Guennadi - new driver for sun6i controller by Maxime - pl330 drivers fixes from Lar's - sh-dma updates and fixes from Laurent, Geert and Kuninori - Documentation updates from Geert - drivers fixes and updates spread over dw, edma, freescale, mpc512x etc.." * 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (72 commits) dmaengine: sun6i: depends on RESET_CONTROLLER dma: at_hdmac: fix invalid remaining bytes detection dmaengine: nbpfaxi: don't build this driver where it cannot be used dmaengine: nbpf_error_get_channel() can be static dma: pl08x: Use correct specifier for size_t values dmaengine: Remove the context argument to the prep_dma_cyclic operation dmaengine: nbpfaxi: convert to tasklet dmaengine: nbpfaxi: fix a theoretical race dmaengine: add a driver for AMBA AXI NBPF DMAC IP cores dmaengine: add device tree binding documentation for the nbpfaxi driver dmaengine: edma: Do not register second device when booted with DT dmaengine: edma: Do not change the error code returned from edma_alloc_slot dmaengine: rcar-dmac: Add device tree bindings documentation dmaengine: shdma: Allocate cyclic sg list dynamically dmaengine: shdma: Make channel filter ignore unrelated devices dmaengine: sh: Rework Kconfig and Makefile dmaengine: sun6i: Fix memory leaks dmaengine: sun6i: Free the interrupt before killing the tasklet dmaengine: sun6i: Remove switch statement from buswidth convertion routine dmaengine: of: kconfig: select DMA_ENGINE when DMA_OF is selected ...
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig17
-rw-r--r--drivers/dma/Makefile6
-rw-r--r--drivers/dma/TODO1
-rw-r--r--drivers/dma/amba-pl08x.c6
-rw-r--r--drivers/dma/at_hdmac.c15
-rw-r--r--drivers/dma/bcm2835-dma.c2
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dw/core.c42
-rw-r--r--drivers/dma/edma.c23
-rw-r--r--drivers/dma/ep93xx_dma.c4
-rw-r--r--drivers/dma/fsl-edma.c8
-rw-r--r--drivers/dma/fsldma.c297
-rw-r--r--drivers/dma/fsldma.h32
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c17
-rw-r--r--drivers/dma/ipu/ipu_idmac.c14
-rw-r--r--drivers/dma/mmp_pdma.c2
-rw-r--r--drivers/dma/mmp_tdma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c13
-rw-r--r--drivers/dma/mxs-dma.c10
-rw-r--r--drivers/dma/nbpfaxi.c1517
-rw-r--r--drivers/dma/of-dma.c35
-rw-r--r--drivers/dma/omap-dma.c3
-rw-r--r--drivers/dma/pl330.c964
-rw-r--r--drivers/dma/qcom_bam_dma.c20
-rw-r--r--drivers/dma/s3c24xx-dma.c3
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/Kconfig24
-rw-r--r--drivers/dma/sh/Makefile16
-rw-r--r--drivers/dma/sh/rcar-audmapp.c114
-rw-r--r--drivers/dma/sh/shdma-arm.h4
-rw-r--r--drivers/dma/sh/shdma-base.c103
-rw-r--r--drivers/dma/sh/shdma.h2
-rw-r--r--drivers/dma/sh/shdmac.c15
-rw-r--r--drivers/dma/sirf-dma.c2
-rw-r--r--drivers/dma/ste_dma40.c3
-rw-r--r--drivers/dma/sun6i-dma.c1053
-rw-r--r--drivers/dma/tegra20-apb-dma.c2
38 files changed, 3526 insertions, 873 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 8f6afbf9ba54..9b1ea0ef59af 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -393,6 +393,22 @@ config XILINX_VDMA
channels, Memory Mapped to Stream (MM2S) and Stream to
Memory Mapped (S2MM) for the data transfers.
+config DMA_SUN6I
+ tristate "Allwinner A31 SoCs DMA support"
+ depends on MACH_SUN6I || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the DMA engine for Allwinner A31 SoCs.
+
+config NBPFAXI_DMA
+ tristate "Renesas Type-AXI NBPF DMA support"
+ select DMA_ENGINE
+ depends on ARM || COMPILE_TEST
+ help
+ Support for "Type-AXI" NBPF DMA IPs from Renesas
+
config DMA_ENGINE
bool
@@ -406,6 +422,7 @@ config DMA_ACPI
config DMA_OF
def_bool y
depends on OF
+ select DMA_ENGINE
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index bd9e7fa928bd..c6adb925f0b9 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,5 +1,5 @@
-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
+subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
+subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
@@ -48,3 +48,5 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
obj-y += xilinx/
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
+obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
index 734ed0206cd5..b8045cd42ee1 100644
--- a/drivers/dma/TODO
+++ b/drivers/dma/TODO
@@ -7,7 +7,6 @@ TODO for slave dma
- imx-dma
- imx-sdma
- mxs-dma.c
- - dw_dmac
- intel_mid_dma
4. Check other subsystems for dma drivers and merge/move to dmaengine
5. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 8114731a1c62..e34024b000a4 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1040,7 +1040,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
if (early_bytes) {
dev_vdbg(&pl08x->adev->dev,
- "%s byte width LLIs (remain 0x%08x)\n",
+ "%s byte width LLIs (remain 0x%08zx)\n",
__func__, bd.remainder);
prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
num_llis++, &total_bytes);
@@ -1653,7 +1653,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
@@ -1662,7 +1662,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
dma_addr_t slave_addr;
dev_dbg(&pl08x->adev->dev,
- "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
+ "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n",
__func__, period_len, buf_len,
direction == DMA_MEM_TO_DEV ? "to" : "from",
plchan->name);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c13a3bb0f594..ca9dd2613283 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -294,14 +294,16 @@ static int atc_get_bytes_left(struct dma_chan *chan)
ret = -EINVAL;
goto out;
}
- atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
- << (desc_first->tx_width);
- if (atchan->remain_desc < 0) {
+
+ count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
+ << desc_first->tx_width;
+ if (atchan->remain_desc < count) {
ret = -EINVAL;
goto out;
- } else {
- ret = atchan->remain_desc;
}
+
+ atchan->remain_desc -= count;
+ ret = atchan->remain_desc;
} else {
/*
* Get residual bytes when current
@@ -893,12 +895,11 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
* @period_len: number of bytes for each period
* @direction: transfer direction, to or from device
* @flags: tx descriptor status flags
- * @context: transfer context (ignored)
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index a03602164e3e..68007974961a 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -335,7 +335,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
enum dma_slave_buswidth dev_width;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 94c380f07538..6a9d89c93b1f 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -433,7 +433,7 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
struct jz4740_dma_desc *desc;
@@ -614,4 +614,4 @@ module_platform_driver(jz4740_dma_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("JZ4740 DMA driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index a27ded53ab4f..1af731b83b3f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -279,6 +279,19 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
channel_set_bit(dw, CH_EN, dwc->mask);
}
+static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
+{
+ struct dw_desc *desc;
+
+ if (list_empty(&dwc->queue))
+ return;
+
+ list_move(dwc->queue.next, &dwc->active_list);
+ desc = dwc_first_active(dwc);
+ dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
+ dwc_dostart(dwc, desc);
+}
+
/*----------------------------------------------------------------------*/
static void
@@ -335,10 +348,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
* the completed ones.
*/
list_splice_init(&dwc->active_list, &list);
- if (!list_empty(&dwc->queue)) {
- list_move(dwc->queue.next, &dwc->active_list);
- dwc_dostart(dwc, dwc_first_active(dwc));
- }
+ dwc_dostart_first_queued(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -467,10 +477,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* Try to continue after resetting the channel... */
dwc_chan_disable(dw, dwc);
- if (!list_empty(&dwc->queue)) {
- list_move(dwc->queue.next, &dwc->active_list);
- dwc_dostart(dwc, dwc_first_active(dwc));
- }
+ dwc_dostart_first_queued(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
}
@@ -677,17 +684,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
* possible, perhaps even appending to those already submitted
* for DMA. But this is hard to do in a race-free manner.
*/
- if (list_empty(&dwc->active_list)) {
- dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &dwc->active_list);
- dwc_dostart(dwc, dwc_first_active(dwc));
- } else {
- dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &dwc->queue);
- }
+ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
+ list_add_tail(&desc->desc_node, &dwc->queue);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1092,9 +1091,12 @@ dwc_tx_status(struct dma_chan *chan,
static void dwc_issue_pending(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ unsigned long flags;
- if (!list_empty(&dwc->queue))
- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (list_empty(&dwc->active_list))
+ dwc_dostart_first_queued(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
static int dwc_alloc_chan_resources(struct dma_chan *chan)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index b512caf46944..7b65633f495e 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/of.h>
#include <linux/platform_data/edma.h>
@@ -256,8 +257,13 @@ static int edma_terminate_all(struct edma_chan *echan)
* echan->edesc is NULL and exit.)
*/
if (echan->edesc) {
+ int cyclic = echan->edesc->cyclic;
echan->edesc = NULL;
edma_stop(echan->ch_num);
+ /* Move the cyclic channel back to default queue */
+ if (cyclic)
+ edma_assign_channel_eventq(echan->ch_num,
+ EVENTQ_DEFAULT);
}
vchan_get_all_descriptors(&echan->vchan, &head);
@@ -592,7 +598,7 @@ struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long tx_flags, void *context)
+ unsigned long tx_flags)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
@@ -718,12 +724,15 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
edesc->absync = ret;
/*
- * Enable interrupts for every period because callback
- * has to be called for every period.
+ * Enable period interrupt only if it is requested
*/
- edesc->pset[i].param.opt |= TCINTEN;
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[i].param.opt |= TCINTEN;
}
+ /* Place the cyclic channel to highest priority queue */
+ edma_assign_channel_eventq(echan->ch_num, EVENTQ_0);
+
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
@@ -993,7 +1002,7 @@ static int edma_dma_device_slave_caps(struct dma_chan *dchan,
caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
caps->cmd_pause = true;
caps->cmd_terminate = true;
- caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
return 0;
}
@@ -1040,7 +1049,7 @@ static int edma_probe(struct platform_device *pdev)
ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
if (ecc->dummy_slot < 0) {
dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
- return -EIO;
+ return ecc->dummy_slot;
}
dma_cap_zero(ecc->dma_slave.cap_mask);
@@ -1125,7 +1134,7 @@ static int edma_init(void)
}
}
- if (EDMA_CTLRS == 2) {
+ if (!of_have_populated_dt() && EDMA_CTLRS == 2) {
pdev1 = platform_device_register_full(&edma_dev_info1);
if (IS_ERR(pdev1)) {
platform_driver_unregister(&edma_driver);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index cb4bf682a708..7650470196c4 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1092,7 +1092,6 @@ fail:
* @period_len: length of a single period
* @dir: direction of the operation
* @flags: tx descriptor status flags
- * @context: operation context (ignored)
*
* Prepares a descriptor for cyclic DMA operation. This means that once the
* descriptor is submitted, we will be submitting in a @period_len sized
@@ -1105,8 +1104,7 @@ fail:
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction dir, unsigned long flags,
- void *context)
+ enum dma_transfer_direction dir, unsigned long flags)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index b396a7fb53ab..3c5711d5fe97 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -248,11 +248,12 @@ static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable)
{
u32 ch = fsl_chan->vchan.chan.chan_id;
- void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR];
+ void __iomem *muxaddr;
unsigned chans_per_mux, ch_off;
chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+ muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
if (enable)
edma_writeb(fsl_chan->edma,
@@ -516,7 +517,7 @@ err:
static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
@@ -724,6 +725,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
{
struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
struct dma_chan *chan, *_chan;
+ unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
if (dma_spec->args_count != 2)
return NULL;
@@ -732,7 +734,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
if (chan->client_count)
continue;
- if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) {
+ if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);
if (chan) {
chan->device->privatecnt++;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e0fec68aed25..d5d6885ab341 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -396,10 +396,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct fsldma_chan *chan = to_fsl_chan(tx->chan);
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
- unsigned long flags;
dma_cookie_t cookie = -EINVAL;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
+
+#ifdef CONFIG_PM
+ if (unlikely(chan->pm_state != RUNNING)) {
+ chan_dbg(chan, "cannot submit due to suspend\n");
+ spin_unlock_bh(&chan->desc_lock);
+ return -1;
+ }
+#endif
/*
* assign cookies to all of the software descriptors
@@ -412,7 +419,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
/* put this transaction onto the tail of the pending queue */
append_ld_queue(chan, desc);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return cookie;
}
@@ -459,6 +466,88 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
}
/**
+ * fsldma_clean_completed_descriptor - free all descriptors which
+ * has been completed and acked
+ * @chan: Freescale DMA channel
+ *
+ * This function is used on all completed and acked descriptors.
+ * All descriptors should only be freed in this function.
+ */
+static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
+{
+ struct fsl_desc_sw *desc, *_desc;
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
+ if (async_tx_test_ack(&desc->async_tx))
+ fsl_dma_free_descriptor(chan, desc);
+}
+
+/**
+ * fsldma_run_tx_complete_actions - cleanup a single link descriptor
+ * @chan: Freescale DMA channel
+ * @desc: descriptor to cleanup and free
+ * @cookie: Freescale DMA transaction identifier
+ *
+ * This function is used on a descriptor which has been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies.
+ */
+static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc, dma_cookie_t cookie)
+{
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+ dma_cookie_t ret = cookie;
+
+ BUG_ON(txd->cookie < 0);
+
+ if (txd->cookie > 0) {
+ ret = txd->cookie;
+
+ /* Run the link descriptor callback function */
+ if (txd->callback) {
+ chan_dbg(chan, "LD %p callback\n", desc);
+ txd->callback(txd->callback_param);
+ }
+ }
+
+ /* Run any dependencies */
+ dma_run_dependencies(txd);
+
+ return ret;
+}
+
+/**
+ * fsldma_clean_running_descriptor - move the completed descriptor from
+ * ld_running to ld_completed
+ * @chan: Freescale DMA channel
+ * @desc: the descriptor which is completed
+ *
+ * Free the descriptor directly if acked by async_tx api, or move it to
+ * queue ld_completed.
+ */
+static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ /* Remove from the list of transactions */
+ list_del(&desc->node);
+
+ /*
+ * the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx)) {
+ /*
+ * Move this descriptor to the list of descriptors which is
+ * completed, but still awaiting the 'ack' bit to be set.
+ */
+ list_add_tail(&desc->node, &chan->ld_completed);
+ return;
+ }
+
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+}
+
+/**
* fsl_chan_xfer_ld_queue - transfer any pending transactions
* @chan : Freescale DMA channel
*
@@ -526,31 +615,58 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
}
/**
- * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
+ * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
+ * and move them to ld_completed to free until flag 'ack' is set
* @chan: Freescale DMA channel
- * @desc: descriptor to cleanup and free
*
- * This function is used on a descriptor which has been executed by the DMA
- * controller. It will run any callbacks, submit any dependencies, and then
- * free the descriptor.
+ * This function is used on descriptors which have been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies, then
+ * free these descriptors if flag 'ack' is set.
*/
-static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
+static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
{
- struct dma_async_tx_descriptor *txd = &desc->async_tx;
+ struct fsl_desc_sw *desc, *_desc;
+ dma_cookie_t cookie = 0;
+ dma_addr_t curr_phys = get_cdar(chan);
+ int seen_current = 0;
+
+ fsldma_clean_completed_descriptor(chan);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
+ /*
+ * do not advance past the current descriptor loaded into the
+ * hardware channel, subsequent descriptors are either in
+ * process or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /*
+ * stop the search if we reach the current descriptor and the
+ * channel is busy
+ */
+ if (desc->async_tx.phys == curr_phys) {
+ seen_current = 1;
+ if (!dma_is_idle(chan))
+ break;
+ }
+
+ cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
- /* Run the link descriptor callback function */
- if (txd->callback) {
- chan_dbg(chan, "LD %p callback\n", desc);
- txd->callback(txd->callback_param);
+ fsldma_clean_running_descriptor(chan, desc);
}
- /* Run any dependencies */
- dma_run_dependencies(txd);
+ /*
+ * Start any pending transactions automatically
+ *
+ * In the ideal case, we keep the DMA controller busy while we go
+ * ahead and free the descriptors below.
+ */
+ fsl_chan_xfer_ld_queue(chan);
- dma_descriptor_unmap(txd);
- chan_dbg(chan, "LD %p free\n", desc);
- dma_pool_free(chan->desc_pool, desc, txd->phys);
+ if (cookie > 0)
+ chan->common.completed_cookie = cookie;
}
/**
@@ -617,13 +733,14 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- unsigned long flags;
chan_dbg(chan, "free all channel resources\n");
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
+ fsldma_cleanup_descriptors(chan);
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ fsldma_free_desc_list(chan, &chan->ld_completed);
+ spin_unlock_bh(&chan->desc_lock);
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
@@ -842,7 +959,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
{
struct dma_slave_config *config;
struct fsldma_chan *chan;
- unsigned long flags;
int size;
if (!dchan)
@@ -852,7 +968,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
switch (cmd) {
case DMA_TERMINATE_ALL:
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
/* Halt the DMA engine */
dma_halt(chan);
@@ -860,9 +976,10 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
/* Remove and free all of the descriptors in the LD queue */
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
+ fsldma_free_desc_list(chan, &chan->ld_completed);
chan->idle = true;
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return 0;
case DMA_SLAVE_CONFIG:
@@ -904,11 +1021,10 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- unsigned long flags;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
fsl_chan_xfer_ld_queue(chan);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
}
/**
@@ -919,6 +1035,17 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_bh(&chan->desc_lock);
+ fsldma_cleanup_descriptors(chan);
+ spin_unlock_bh(&chan->desc_lock);
+
return dma_cookie_status(dchan, cookie, txstate);
}
@@ -996,52 +1123,18 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
static void dma_