/*
* offload engine driver for the Intel Xscale series of i/o processors
* Copyright © 2006, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
/*
* This driver supports the asynchrounous DMA copy and RAID engines available
* on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/memory.h>
#include <linux/ioport.h>
#include <linux/raid/pq.h>
#include <linux/slab.h>
#include <mach/adma.h>
#include "dmaengine.h"
#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
#define to_iop_adma_device(dev) \
container_of(dev, struct iop_adma_device, common)
#define tx_to_iop_adma_slot(tx) \
container_of(tx, struct iop_adma_desc_slot, async_tx)
/**
* iop_adma_free_slots - flags descriptor slots for reuse
* @slot: Slot to free
* Caller must hold &iop_chan->lock while calling this function
*/
static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
{
int stride = slot->slots_per_op;
while (stride--) {
slot->slots_per_op = 0;
slot = list_entry(slot->slot_node.next,
struct iop_adma_desc_slot,
slot_node);
}
}
static dma_cookie_t
iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
{
struct dma_async_tx_descriptor *tx = &desc->async_tx;
BUG_ON(tx->cookie < 0);
if (tx->cookie > 0) {
cookie = tx->cookie;
tx->cookie = 0;
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx);
if (desc->group_head)
desc->group_head = NULL;
}
/* run dependent operations */
dma_run_dependencies(tx);
return cookie;
}
static int
iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *iop_chan)
{
/* the client is allowed to attach dependent operations
* until 'ack' is set
*/
if (!async_tx_test_ack(&desc->async_tx))
return 0;
/* leave the last descriptor in the chain
* so we can append to it
*/
if (desc->chain_node.next == &iop_chan->chain)
return 1;
dev_dbg(iop_chan->device->common.dev,
"\tfree slot: %d slots_per_op: %d\n",
desc->idx, desc->slots_per_op);
list_del(&desc->chain_node);
iop_adma_free_slots(desc);
return 0;
}
static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
{
struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
dma_cookie_t cookie = 0;
u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
int busy = iop_chan_is_busy(iop_chan);
int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
/* free completed slots from the chain starting with
* the oldest descriptor
*/
list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
chain_node) {
pr_debug("\tcookie: %d slot: %d busy: %d "
"this_desc: %#x next_desc: %#x ack: %d\n",
iter->async_tx.cookie, iter->idx, busy,
iter->async_tx.phys, iop_desc_get_next_desc(iter),
async_tx_test_ack(&iter->async_tx));
prefetch(_iter);
prefetch(&_iter->async_tx);
/* do not advance past the current descriptor loaded into the
* hardware channel, subsequent descriptors are either in
* process or have not been submitted
*/
if (seen_current)
break;
/* stop the search if we reach the current descriptor and the
* channel is busy, or if it appears that the current descriptor
* needs to be re-read (i.e. has been appended to)
*/
if (iter->async_tx.phys == current_desc) {
BUG_ON(seen_current++);
if (busy || iop_desc_get_next_desc(iter))
break;
}
/* detect the start of a group transaction */
if (!slot_cnt && !slots_per_op) {
slot_cnt = iter->slot_cnt;
slots_per_op = iter->slots_per_op;
if (slot_cnt <= slots_per_op) {
slot_cnt = 0;
slots_per_op = 0;
}
}
if (slot_cnt) {
pr_debug("\tgroup++\n");
if (!grp_start)
grp_start = iter;
slot_cnt -= slots_per_op;
}
/* all the members of a group are complete */
if (slots_per_op != 0 && slot_cnt == 0) {
struct iop_adma_desc_slot *grp_iter, *_grp_iter;
int end_of_chain = 0;
pr_debug("\tgroup end\n");
/* collect the total results */
if (grp_start->xor_check_result) {
u32 zero_sum_result = 0;
slot_cnt = grp_start->slot_cnt;
grp_iter = grp_start;
list_for_each_entry_from(grp_iter,
&iop_chan->chain, chain_node) {
zero_sum_result |=
iop_desc_get_zero_result(grp_iter);
pr_debug("\titer%d result: %d\n",
grp_iter->idx, zero_sum_result);
slot_cnt -= slots_per_op;
if (slot_cnt == 0)
break;
}
pr_debug("\tgrp_start->xor_check_result: %p\n",
grp_start->xor_check_result);
*grp_start->xor_check_result = zero_sum_result;
}
/* clean up the group */
slot_cnt = grp_start->slot_cnt;
grp_iter = grp_start;
list_for_each_entry_safe_from(grp_iter, _grp_iter,
&iop_chan->chain, chain_node) {
cookie = iop_adma_run_tx_complete_actions(
grp_iter, iop_chan, cookie);
slot_cnt -= slots_per_op;
end_of_chain = iop_adma_clean_slot(grp_iter,
iop_chan);
if (slot_cnt == 0 || end_of_chain)
break;
}
/* the group should be complete at this point */
BUG_ON(slot_cnt);
slots_per_op = 0;
grp_start = NULL;
if (end_of_chain)
break;
else
continue;
} else if (slots_per_op) /* wait for group completion */
continue;
/* write back zero sum results (sing
|