// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
* Rewrite, cleanup, new allocation schemes, virtual merging:
* Copyright (C) 2004 Olof Johansson, IBM Corporation
* and Ben. Herrenschmidt, IBM Corporation
*
* Dynamic DMA mapping support, bus-independent parts.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
#include <linux/crash_dump.h>
#include <linux/hash.h>
#include <linux/fault-inject.h>
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <asm/io.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/kdump.h>
#include <asm/fadump.h>
#include <asm/vio.h>
#include <asm/tce.h>
#include <asm/mmu_context.h>
#define DBG(...)
#ifdef CONFIG_IOMMU_DEBUGFS
static int iommu_debugfs_weight_get(void *data, u64 *val)
{
struct iommu_table *tbl = data;
*val = bitmap_weight(tbl->it_map, tbl->it_size);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
static void iommu_debugfs_add(struct iommu_table *tbl)
{
char name[10];
struct dentry *liobn_entry;
sprintf(name, "%08lx", tbl->it_index);
liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
}
static void iommu_debugfs_del(struct iommu_table *tbl)
{
char name[10];
struct dentry *liobn_entry;
sprintf(name, "%08lx", tbl->it_index);
liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
debugfs_remove(liobn_entry);
}
#else
static void iommu_debugfs_add(struct iommu_table *tbl){}
static void iommu_debugfs_del(struct iommu_table *tbl){}
#endif
static int novmerge;
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
static int __init setup_iommu(char *str)
{
if (!strcmp(str, "novmerge"))
novmerge = 1;
else if (!strcmp(str, "vmerge"))
novmerge = 0;
return 1;
}
__setup("iommu=", setup_iommu);
static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
/*
* We precalculate the hash to avoid doing it on every allocation.
*
* The hash is important to spread CPUs across all the pools. For example,
* on a POWER7 with 4 way SMT we want interrupts on the primary threads and
* with 4 pools all primary threads would map to the same pool.
*/
static int __init setup_iommu_pool_hash(void)
{
unsigned int i;
for_each_possible_cpu(i)
per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
return 0;
}
subsys_initcall(setup_iommu_pool_hash);
#ifdef CONFIG_FAIL_IOMMU
static DECLARE_FAULT_ATTR(fail_iommu);
static int __init setup_fail_iommu(char *str)
{
return setup_fault_attr(&fail_iommu, str);
}
__setup("fail_iommu=", setup_fail_iommu);
static bool should_fail_iommu(struct device *dev)
{
return dev->archdata.fail