/*
* Copyright © 2006-2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*/
#include <linux/iova.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/cpu.h>
/* The anchor node sits above the top of the usable address space */
#define IOVA_ANCHOR ~0UL
static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn,
unsigned long size);
static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn);
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(struct timer_list *t);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn)
{
/*
* IOVA granularity will normally be equal to the smallest
* supported IOMMU page size; both *must* be capable of
* representing individual CPU pages exactly.
*/
BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached_node = &iovad->anchor.node;
iovad->cached32_node = &iovad->anchor.node;
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->flush_cb = NULL;
iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
static void free_iova_flush_queue(struct iova_domain *iovad)
{
if (!iovad->fq)
return;
if (timer_pending(&iovad->fq_timer))
del_timer(&iovad->fq_timer);
fq_destroy_all_entries(iovad);
free_percpu(iovad->fq);
iovad->fq = NULL;
iovad->flush_cb = NULL;
iovad->entry_dtor = NULL;
}
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
{
int cpu;
atomic64_set(&iovad->fq_flush_start_cnt, 0);
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
iovad->fq = alloc_percpu(struct iova_fq);
if (!iovad->fq)
return -ENOMEM;
iovad->flush_cb = flush_cb;
iovad->entry_dtor = entry_dtor;
for_each_possible_cpu(cpu) {
struct iova_fq *fq;
fq = per_cpu_ptr(iovad->fq, cpu);
fq->head = 0;
fq->tail = 0;
spin_lock_init(&fq->lock);
}
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
atomic_set(&iovad->fq_timer_on, 0);
return 0;
}
EXPORT_SYMBOL_GPL(init_iova_flush_queue);
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
{
if (limit_pfn <= iovad->dma_32bit_pfn)
return iovad->cached32_node;
return iovad->cached_node;
}
static void
__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
{
if (new->pfn_hi < iovad->dma_32bit_pfn)
iovad->cached32_node = &new->node;
else
iovad->cached_node = &new->node;
}
static void
__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
{
struct iova *cached_iova;
cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
if (free->pfn_hi < iovad->dma_32bit_pfn &&
free->pfn_lo >= cached_iova->pfn_lo)
iovad->cached32_node = rb_next(&free->node);
cached_iova = rb_entry(iovad->cached_node, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo)
iovad->cached_node = rb_next(&free->node);
}
/* Insert the iova into domain rbtree by holding writer lock */
static void
iova_insert_rbtree(struct rb_root *root, struct iova *iova,
struct rb_node *start)
{
struct rb_node **new, *parent = NULL;
new = (start) ? &start : &(root->rb_node);
/* Figure out where to put new node */
while (*new) {
struct iova *this = rb_entry(*new, struct iova, node);
parent = *new;
if (iova->pfn_lo < this->pfn_lo)