diff options
Diffstat (limited to 'drivers/dma')
41 files changed, 2184 insertions, 553 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d2286c7f7222..0b1dfb5bf2d9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -218,6 +218,20 @@ config FSL_EDMA multiplexing capability for DMA request sources(slot). This module can be found on Freescale Vybrid and LS-1 SoCs. +config FSL_QDMA + tristate "NXP Layerscape qDMA engine support" + depends on ARM || ARM64 + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select DMA_ENGINE_RAID + select ASYNC_TX_ENABLE_CHANNEL_SWITCH + help + Support the NXP Layerscape qDMA engine with command queue and legacy mode. + Channel virtualization is supported through enqueuing of DMA jobs to, + or dequeuing DMA jobs from, different work queues. + This module can be found on NXP Layerscape SoCs. + The qdma driver only work on SoCs with a DPAA hardware block. + config FSL_RAID tristate "Freescale RAID engine Support" depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 09571a81353d..6126e1c3a875 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -33,6 +33,7 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o obj-$(CONFIG_FSL_RAID) += fsl_raid.o obj-$(CONFIG_HSU_DMA) += hsu/ obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 01d936c9fe89..a0a9cd76c1d4 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -134,7 +134,6 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) struct at_desc *ret = NULL; unsigned long flags; unsigned int i = 0; - LIST_HEAD(tmp_list); spin_lock_irqsave(&atchan->lock, flags); list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { @@ -1387,8 +1386,6 @@ static int atc_pause(struct dma_chan *chan) int chan_id = atchan->chan_common.chan_id; unsigned long flags; - LIST_HEAD(list); - dev_vdbg(chan2dev(chan), "%s\n", __func__); spin_lock_irqsave(&atchan->lock, flags); @@ -1408,8 +1405,6 @@ static int atc_resume(struct dma_chan *chan) int chan_id = atchan->chan_common.chan_id; unsigned long flags; - LIST_HEAD(list); - dev_vdbg(chan2dev(chan), "%s\n", __func__); if (!atc_chan_is_paused(atchan)) diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 1a44c8086d77..ec8a291d62ba 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -2,9 +2,6 @@ /* * BCM2835 DMA engine support * - * This driver only supports cyclic DMA transfers - * as needed for the I2S module. - * * Author: Florian Meier <florian.meier@koalo.de> * Copyright 2013 * @@ -42,7 +39,6 @@ struct bcm2835_dmadev { struct dma_device ddev; - spinlock_t lock; void __iomem *base; struct device_dma_parameters dma_parms; }; @@ -64,7 +60,6 @@ struct bcm2835_cb_entry { struct bcm2835_chan { struct virt_dma_chan vc; - struct list_head node; struct dma_slave_config cfg; unsigned int dreq; @@ -312,8 +307,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain( return NULL; /* allocate and setup the descriptor. */ - d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry), - gfp); + d = kzalloc(struct_size(d, cb_list, frames), gfp); if (!d) return NULL; @@ -406,39 +400,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg( } } -static int bcm2835_dma_abort(void __iomem *chan_base) +static void bcm2835_dma_abort(struct bcm2835_chan *c) { - unsigned long cs; + void __iomem *chan_base = c->chan_base; long int timeout = 10000; - cs = readl(chan_base + BCM2835_DMA_CS); - if (!(cs & BCM2835_DMA_ACTIVE)) - return 0; + /* + * A zero control block address means the channel is idle. + * (The ACTIVE flag in the CS register is not a reliable indicator.) + */ + if (!readl(chan_base + BCM2835_DMA_ADDR)) + return; /* Write 0 to the active bit - Pause the DMA */ writel(0, chan_base + BCM2835_DMA_CS); /* Wait for any current AXI transfer to complete */ - while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { + while ((readl(chan_base + BCM2835_DMA_CS) & + BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) cpu_relax(); - cs = readl(chan_base + BCM2835_DMA_CS); - } - /* We'll un-pause when we set of our next DMA */ + /* Peripheral might be stuck and fail to signal AXI write responses */ if (!timeout) - return -ETIMEDOUT; - - if (!(cs & BCM2835_DMA_ACTIVE)) - return 0; + dev_err(c->vc.chan.device->dev, + "failed to complete outstanding writes\n"); - /* Terminate the control block chain */ - writel(0, chan_base + BCM2835_DMA_NEXTCB); - - /* Abort the whole DMA */ - writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, - chan_base + BCM2835_DMA_CS); - - return 0; + writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); } static void bcm2835_dma_start_desc(struct bcm2835_chan *c) @@ -476,8 +463,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) spin_lock_irqsave(&c->vc.lock, flags); - /* Acknowledge interrupt */ - writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); + /* + * Clear the INT flag to receive further interrupts. Keep the channel + * active in case the descriptor is cyclic or in case the client has + * already terminated the descriptor and issued a new one. (May happen + * if this IRQ handler is threaded.) If the channel is finished, it + * will remain idle despite the ACTIVE flag being set. + */ + writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, + c->chan_base + BCM2835_DMA_CS); d = c->desc; @@ -485,11 +479,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) if (d->cyclic) { /* call the cyclic callback */ vchan_cyclic_callback(&d->vd); - - /* Keep the DMA engine running */ - writel(BCM2835_DMA_ACTIVE, - c->chan_base + BCM2835_DMA_CS); - } else { + } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { vchan_cookie_complete(&c->desc->vd); bcm2835_dma_start_desc(c); } @@ -507,8 +497,12 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); + /* + * Control blocks are 256 bit in length and must start at a 256 bit + * (32 byte) aligned address (BCM2835 ARM Peripherals, sec. 4.2.1.1). + */ c->cb_pool = dma_pool_create(dev_name(dev), dev, - sizeof(struct bcm2835_dma_cb), 0, 0); + sizeof(struct bcm2835_dma_cb), 32, 0); if (!c->cb_pool) { dev_err(dev, "unable to allocate descriptor pool\n"); return -ENOMEM; @@ -777,39 +771,16 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan, static int bcm2835_dma_terminate_all(struct dma_chan *chan) { struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); - struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); unsigned long flags; - int timeout = 10000; LIST_HEAD(head); spin_lock_irqsave(&c->vc.lock, flags); - /* Prevent this channel being scheduled */ - spin_lock(&d->lock); - list_del_init(&c->node); - spin_unlock(&d->lock); - - /* - * Stop DMA activity: we assume the callback will not be called - * after bcm_dma_abort() returns (even if it does, it will see - * c->desc is NULL and exit.) - */ + /* stop DMA activity */ if (c->desc) { vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; - bcm2835_dma_abort(c->chan_base); - - /* Wait for stopping */ - while (--timeout) { - if (!(readl(c->chan_base + BCM2835_DMA_CS) & - BCM2835_DMA_ACTIVE)) - break; - - cpu_relax(); - } - - if (!timeout) - dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); + bcm2835_dma_abort(c); } vchan_get_all_descriptors(&c->vc, &head); @@ -837,7 +808,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, c->vc.desc_free = bcm2835_dma_desc_free; vchan_init(&c->vc, &d->ddev); - INIT_LIST_HEAD(&c->node); c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); c->ch = chan_id; @@ -940,7 +910,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev) od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); - spin_lock_init(&od->lock); platform_set_drvdata(pdev, od); diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 15b2453d2647..ffc0adc2f6ce 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -367,8 +367,7 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) struct axi_dmac_desc *desc; unsigned int i; - desc = kzalloc(sizeof(struct axi_dmac_desc) + - sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); + desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); if (!desc) return NULL; diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index a8b6225faa12..9ce0a386225b 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -838,9 +838,8 @@ static int jz4780_dma_probe(struct platform_device *pdev) if (!soc_data) return -EINVAL; - jzdma = devm_kzalloc(dev, sizeof(*jzdma) - + sizeof(*jzdma->chan) * soc_data->nb_channels, - GFP_KERNEL); + jzdma = devm_kzalloc(dev, struct_size(jzdma, chan, + soc_data->nb_channels), GFP_KERNEL); if (!jzdma) return -ENOMEM; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 2eea4ef72915..50221d467d86 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -200,15 +200,20 @@ struct dmatest_done { wait_queue_head_t *wait; }; +struct dmatest_data { + u8 **raw; + u8 **aligned; + unsigned int cnt; + unsigned int off; +}; + struct dmatest_thread { struct list_head node; struct dmatest_info *info; struct task_struct *task; struct dma_chan *chan; - u8 **srcs; - u8 **usrcs; - u8 **dsts; - u8 **udsts; + struct dmatest_data src; + struct dmatest_data dst; enum dma_transaction_type type; wait_queue_head_t done_wait; struct dmatest_done test_done; @@ -481,6 +486,53 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); } +static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt) +{ + unsigned int i; + + for (i = 0; i < cnt; i++) + kfree(d->raw[i]); + + kfree(d->aligned); + kfree(d->raw); +} + +static void dmatest_free_test_data(struct dmatest_data *d) +{ + __dmatest_free_test_data(d, d->cnt); +} + +static int dmatest_alloc_test_data(struct dmatest_data *d, + unsigned int buf_size, u8 align) +{ + unsigned int i = 0; + + d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL); + if (!d->raw) + return -ENOMEM; + + d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL); + if (!d->aligned) + goto err; + + for (i = 0; i < d->cnt; i++) { + d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL); + if (!d->raw[i]) + goto err; + + /* align to alignment restriction */ + if (align) + d->aligned[i] = PTR_ALIGN(d->raw[i], align); + else + d->aligned[i] = d->raw[i]; + } + + return 0; +err: + __dmatest_free_test_data(d, i); + return -ENOMEM; +} + /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by @@ -511,8 +563,9 @@ static int dmatest_func(void *data) enum dma_ctrl_flags flags; u8 *pq_coefs = NULL; int ret; - int src_cnt; - int dst_cnt; + unsigned int buf_size; + struct dmatest_data *src; + struct dmatest_data *dst; int i; ktime_t ktime, start, diff; ktime_t filltime = 0; @@ -535,25 +588,27 @@ static int dmatest_func(void *data) params = &info->params; chan = thread->chan; dev = chan->device; + src = &thread->src; + dst = &thread->dst; if (thread->type == DMA_MEMCPY) { align = params->alignment < 0 ? dev->copy_align : params->alignment; - src_cnt = dst_cnt = 1; + src->cnt = dst->cnt = 1; } else if (thread->type == DMA_MEMSET) { align = params->alignment < 0 ? dev->fill_align : params->alignment; - src_cnt = dst_cnt = 1; + src->cnt = dst->cnt = 1; is_memset = true; } else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ - src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); - dst_cnt = 1; + src->cnt = min_odd(params->xor_sources | 1, dev->max_xor); + dst->cnt = 1; align = params->alignment < 0 ? dev->xor_align : params->alignment; } else if (thread->type == DMA_PQ) { /* force odd to ensure dst = src */ - src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); - dst_cnt = 2; + src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); + dst->cnt = 2; align = params->alignment < 0 ? dev->pq_align : params->alignment; @@ -561,75 +616,38 @@ static int dmatest_func(void *data) if (!pq_coefs) goto err_thread_type; - for (i = 0; i < src_cnt; i++) + for (i = 0; i < src->cnt; i++) pq_coefs[i] = 1; } else goto err_thread_type; /* Check if buffer count fits into map count variable (u8) */ - if ((src_cnt + dst_cnt) >= 255) { + if ((src->cnt + dst->cnt) >= 255) { pr_err("too many buffers (%d of 255 supported)\n", - src_cnt + dst_cnt); + src->cnt + dst->cnt); goto err_free_coefs; } - if (1 << align > params->buf_size) { + buf_size = params->buf_size; + if (1 << align > buf_size) { pr_err("%u-byte buffer too small for %d-byte alignment\n", - params->buf_size, 1 << align); + buf_size, 1 << align); goto err_free_coefs; } - thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); - if (!thread->srcs) + if (dmatest_alloc_test_data(src, buf_size, align) < 0) goto err_free_coefs; - thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); - if (!thread->usrcs) - goto err_usrcs; - - for (i = 0; i < src_cnt; i++) { - thread->usrcs[i] = kmalloc(params->buf_size + align, - GFP_KERNEL); - if (!thread->usrcs[i]) - goto err_srcbuf; - - /* align srcs to alignment restriction */ - if (align) - thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align); - else - thread->srcs[i] = thread->usrcs[i]; - } - thread->srcs[i] = NULL; - - thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL); - if (!thread->dsts) - goto err_dsts; - - thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL); - if (!thread->udsts) - goto err_udsts; - - for (i = 0; i < dst_cnt; i++) { - thread->udsts[i] = kmalloc(params->buf_size + align, - GFP_KERNEL); - if (!thread->udsts[i]) - goto err_dstbuf; - - /* align dsts to alignment restriction */ - if (align) - thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align); - else - thread->dsts[i] = thread->udsts[i]; - } - thread->dsts[i] = NULL; + if (dmatest_alloc_test_data(dst, buf_size, align) < 0) + goto err_src; set_user_nice(current, 10); - srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL); + srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL); if (!srcs) - goto err_dstbuf; + goto err_dst; - dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL); + dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL); if (!dma_pq) goto err_srcs_array; @@ -644,21 +662,21 @@ static int dmatest_func(void *data) struct dma_async_tx_descriptor *tx = NULL; struct dmaengine_unmap_data *um; dma_addr_t *dsts; - unsigned int src_off, dst_off, len; + unsigned int len; total_tests++; if (params->transfer_size) { - if (params->transfer_size >= params->buf_size) { + if (params->transfer_size >= buf_size) { pr_err("%u-byte transfer size must be lower than %u-buffer size\n", - params->transfer_size, params->buf_size); + params->transfer_size, buf_size); break; } len = params->transfer_size; } else if (params->norandom) { - len = params->buf_size; + len = buf_size; } else { - len = dmatest_random() % params->buf_size + 1; + len = dmatest_random() % buf_size + 1; } /* Do not alter transfer size explicitly defined by user */ @@ -670,59 +688,59 @@ static int dmatest_func(void *data) total_len += len; if (params->norandom) { - src_off = 0; - dst_off = 0; + src->off = 0; + dst->off = 0; } else { - src_off = dmatest_random() % (params->buf_size - len + 1); - dst_off = dmatest_random() % (params->buf_size - len + 1); + src->off = dmatest_random() % (buf_size - len + 1); + dst->off = dmatest_random() % (buf_size - len + 1); - src_off = (src_off >> align) << align; - dst_off = (dst_off >> align) << align; + src->off = (src->off >> align) << align; + dst->off = (dst->off >> align) << align; } if (!params->noverify) { start = ktime_get(); - dmatest_init_srcs(thread->srcs, src_off, len, - params->buf_size, is_memset); - dmatest_init_dsts(thread->dsts, dst_off, len, - params->buf_size, is_memset); + dmatest_init_srcs(src->aligned, src->off, len, + buf_size, is_memset); + dmatest_init_dsts(dst->aligned, dst->off, len, + buf_size, is_memset); diff = ktime_sub(ktime_get(), start); filltime = ktime_add(filltime, diff); } - um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt, + um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt, GFP_KERNEL); if (!um) { failed_tests++; result("unmap data NULL", total_tests, - src_off, dst_off, len, ret); + src->off, dst->off, len, ret); continue; } - um->len = params->buf_size; - for (i = 0; i < src_cnt; i++) { - void *buf = thread->srcs[i]; + um->len = buf_size; + for (i = 0; i < src->cnt; i++) { + void *buf = src->aligned[i]; struct page *pg = virt_to_page(buf); unsigned long pg_off = offset_in_page(buf); um->addr[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_TO_DEVICE); - srcs[i] = um->addr[i] + src_off; + srcs[i] = um->addr[i] + src->off; ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { dmaengine_unmap_put(um); result("src mapping error", total_tests, - src_off, dst_off, len, ret); + src->off, dst->off, len, ret); failed_tests++; continue; } um->to_cnt++; } /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ - dsts = &um->addr[src_cnt]; - for (i = 0; i < dst_cnt; i++) { - void *buf = thread->dsts[i]; + dsts = &um->addr[src->cnt]; + for (i = 0; i < dst->cnt; i++) { + void *buf = dst->aligned[i]; struct page *pg = virt_to_page(buf); unsigned long pg_off = offset_in_page(buf); @@ -732,7 +750,7 @@ static int dmatest_func(void *data) if (ret) { dmaengine_unmap_put(um); result("dst mapping error", total_tests, - src_off, dst_off, len, ret); + src->off, dst->off, len, ret); failed_tests++; continue; } @@ -741,30 +759,30 @@ static int dmatest_func(void *data) if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, - dsts[0] + dst_off, + dsts[0] + dst->off, srcs[0], len, flags); else if (thread->type == DMA_MEMSET) tx = dev->device_prep_dma_memset(chan, - dsts[0] + dst_off, - *(thread->srcs[0] + src_off), + dsts[0] + dst->off, + *(src->aligned[0] + src->off), len, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, - dsts[0] + dst_off, - srcs, src_cnt, + dsts[0] + dst->off, + srcs, src->cnt, len, flags); else if (thread->type == DMA_PQ) { - for (i = 0; i < dst_cnt; i++) - dma_pq[i] = dsts[i] + dst_off; + for (i = 0; i < dst->cnt; i++) + dma_pq[i] = dsts[i] + dst->off; tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, - src_cnt, pq_coefs, + src->cnt, pq_coefs, len, flags); } if (!tx) { dmaengine_unmap_put(um); - result("prep error", total_tests, src_off, - dst_off, len, ret); + result("prep error", total_tests, src->off, + dst->off, len, ret); msleep(100); failed_tests++; continue; @@ -777,8 +795,8 @@ static int dmatest_func(void *data) if (dma_submit_error(cookie)) { dmaengine_unmap_put(um); - result("submit error", total_tests, src_off, - dst_off, len, ret); + result("submit error", total_tests, src->off, + dst->off, len, ret); msleep(100); failed_tests++; continue; @@ -793,58 +811,58 @@ static int dmatest_func(void *data) dmaengine_unmap_put(um); if (!done->done) { - result("test timed out", total_tests, src_off, dst_off, + result("test timed out", total_tests, src->off, dst->off, len, 0); failed_tests++; continue; } else if (status != DMA_COMPLETE) { result(status == DMA_ERROR ? "completion error status" : - "completion busy status", total_tests, src_off, - dst_off, len, ret); + "completion busy status", total_tests, src->off, + dst->off, len, ret); failed_tests++; continue; } if (params->noverify) { - verbose_result("test passed", total_tests, src_off, - dst_off, len, 0); + verbose_result("test passed", total_tests, src->off, + dst->off, len, 0); continue; } start = ktime_get(); pr_debug("%s: verifying source buffer...\n", current->comm); - error_count = dmatest_verify(thread->srcs, 0, src_off, + error_count = dmatest_verify(src->aligned, 0, src->off, 0, PATTERN_SRC, true, is_memset); - error_count += dmatest_verify(thread->srcs, src_off, - src_off + len, src_off, + error_count += dmatest_verify(src->aligned, src->off, + src->off + len, src->off, PATTERN_SRC | PATTERN_COPY, true, is_memset); - error_count += dmatest_verify(thread->srcs, src_off + len, - params->buf_size, src_off + len, + error_count += dmatest_verify(src->aligned, src->off + len, + buf_size, src->off + len, PATTERN_SRC, true, is_memset); pr_debug("%s: verifying dest buffer...\n", current->comm); - error_count += dmatest_verify(thread->dsts, 0, dst_off, + error_count += dmatest_verify(dst->aligned, 0, dst->off, 0, PATTERN_DST, false, is_memset); |
