// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2009, Intel Corporation.
*
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*/
#include <linux/iova.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/cpu.h>
#include <linux/workqueue.h>
/* The anchor node sits above the top of the usable address space */
#define IOVA_ANCHOR ~0UL
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn,
unsigned long size);
static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn);
static void free_iova_rcaches(struct iova_domain *iovad);
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_global_cached_iovas(struct iova_domain *iovad);
static struct iova *to_iova(struct rb_node *node)
{
return rb_entry(node, struct iova, node);
}
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn)
{
/*
* IOVA granularity will normally be equal to the smallest
* supported IOMMU page size; both *must* be capable of
* representing individual CPU pages exactly.
*/
BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached_node = &iovad->anchor.node;
iovad->cached32_node = &iovad->anchor.node;
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
{
if (limit_pfn <= iovad->dma_32bit_pfn)
return iovad->cached32_node;
return iovad->cached_node;
}
static void
__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
{
if (new->pfn_hi < iovad->dma_32bit_pfn)
iovad->cached32_node = &new->node;
else
iovad->cached_node = &new->node;
}
static void
__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
{
struct iova *cached_iova;
cached_iova = to_iova(iovad->cached32_node);
if (free == cached_iova ||
(free->pfn_hi < iovad->dma_32bit_pfn &&
free->pfn_lo >= cached_iova->pfn_lo))
iovad->cached32_node = rb_next(&free->node);
if (free->pfn_lo < iovad->dma_32bit_pfn)
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
cached_iova = to_iova(iovad->cached_node);
if (free->pfn_lo >= cached_iova->pfn_lo)
iovad->cached_node = rb_next(&free->node);
}
static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
{
struct rb_node *node, *next;
/*
* Ideally what we'd like to judge here is whether limit_pfn is close
* enough to the highest-allocated IOVA that starting the allocation
* walk from the anchor node will be quicker than this initial work to
* find an exact starting point (especially if that ends up being the
* anchor node anyway). This is an incredibly crude approximation which
* only really helps the most likely case, but is at least trivially easy.
*/
if (limit_pfn > iovad->dma_32bit_pfn)
return &iovad->anchor.node;
node = iovad->rbroot.rb_node;
while (to_iova(node)->pfn_hi < limit_pfn)
node = node->rb_right;
search_left:
while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
node = node->rb_left;
if (!node->rb_left)
return node;
next = node->rb_left;
while (next->rb_right) {
next = next->rb_right;
if (to_iova(next)->pfn_lo >= limit_pfn) {
node = next;
goto search_left;
}
}
return node;
}
/* Insert the iova into domain rbtree by holding writer lock */
static void
iova_insert_rbtree(struct rb_root *root, struct iova *iova,
struct rb_node *start)
{
struct rb_node **new, *parent = NULL;
new = (start) ? &start : &(root->rb_node);
/* Figure out where to put new node */
while (*new) {
struct iova *this = to_iova(*new);
parent = *new;
if (iova->pfn_lo < this->pfn_lo)
new = &((*new)->rb_left);
else if (iova->pfn_lo > this->pfn_lo)
new = &((*new)->rb_right);
else {
WARN_ON(1); /* this should not happen */
return;
}
}
/* Add new node and rebalance tree. */
rb_link_node(&iova->node, parent, new);
rb_insert_color(&iova->node, root);
}
static int __alloc_and_insert_iova_range