summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-24 19:48:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-24 19:48:57 -0700
commit34af78c4e616c359ed428d79fe4758a35d2c5473 (patch)
treed7c27bfe3c9aa5b063bd630e27a70ff4a0db9069 /drivers
parent6f2689a7662809ff39f2b24e452d11569c21ea2f (diff)
parente17c6debd4b2d2d474074f83946f8c6522587566 (diff)
downloadlinux-34af78c4e616c359ed428d79fe4758a35d2c5473.tar.gz
linux-34af78c4e616c359ed428d79fe4758a35d2c5473.tar.bz2
linux-34af78c4e616c359ed428d79fe4758a35d2c5473.zip
Merge tag 'iommu-updates-v5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel: - IOMMU Core changes: - Removal of aux domain related code as it is basically dead and will be replaced by iommu-fd framework - Split of iommu_ops to carry domain-specific call-backs separatly - Cleanup to remove useless ops->capable implementations - Improve 32-bit free space estimate in iova allocator - Intel VT-d updates: - Various cleanups of the driver - Support for ATS of SoC-integrated devices listed in ACPI/SATC table - ARM SMMU updates: - Fix SMMUv3 soft lockup during continuous stream of events - Fix error path for Qualcomm SMMU probe() - Rework SMMU IRQ setup to prepare the ground for PMU support - Minor cleanups and refactoring - AMD IOMMU driver: - Some minor cleanups and error-handling fixes - Rockchip IOMMU driver: - Use standard driver registration - MSM IOMMU driver: - Minor cleanup and change to standard driver registration - Mediatek IOMMU driver: - Fixes for IOTLB flushing logic * tag 'iommu-updates-v5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (47 commits) iommu/amd: Improve amd_iommu_v2_exit() iommu/amd: Remove unused struct fault.devid iommu/amd: Clean up function declarations iommu/amd: Call memunmap in error path iommu/arm-smmu: Account for PMU interrupts iommu/vt-d: Enable ATS for the devices in SATC table iommu/vt-d: Remove unused function intel_svm_capable() iommu/vt-d: Add missing "__init" for rmrr_sanity_check() iommu/vt-d: Move intel_iommu_ops to header file iommu/vt-d: Fix indentation of goto labels iommu/vt-d: Remove unnecessary prototypes iommu/vt-d: Remove unnecessary includes iommu/vt-d: Remove DEFER_DEVICE_DOMAIN_INFO iommu/vt-d: Remove domain and devinfo mempool iommu/vt-d: Remove iova_cache_get/put() iommu/vt-d: Remove finding domain in dmar_insert_one_dev_info() iommu/vt-d: Remove intel_iommu::domains iommu/mediatek: Always tlb_flush_all when each PM resume iommu/mediatek: Add tlb_lock in tlb_flush_all iommu/mediatek: Remove the power status checking in tlb flush all ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c2
-rw-r--r--drivers/iommu/amd/amd_iommu.h4
-rw-r--r--drivers/iommu/amd/init.c18
-rw-r--r--drivers/iommu/amd/iommu.c23
-rw-r--r--drivers/iommu/amd/iommu_v2.c37
-rw-r--r--drivers/iommu/apple-dart.c20
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c45
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c113
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h5
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c28
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/exynos-iommu.c14
-rw-r--r--drivers/iommu/fsl_pamu_domain.c10
-rw-r--r--drivers/iommu/intel/debugfs.c6
-rw-r--r--drivers/iommu/intel/dmar.c5
-rw-r--r--drivers/iommu/intel/iommu.c999
-rw-r--r--drivers/iommu/intel/pasid.c173
-rw-r--r--drivers/iommu/intel/pasid.h4
-rw-r--r--drivers/iommu/intel/svm.c220
-rw-r--r--drivers/iommu/iommu.c339
-rw-r--r--drivers/iommu/iova.c78
-rw-r--r--drivers/iommu/ipmmu-vmsa.c32
-rw-r--r--drivers/iommu/msm_iommu.c74
-rw-r--r--drivers/iommu/mtk_iommu.c62
-rw-r--r--drivers/iommu/mtk_iommu_v1.c14
-rw-r--r--drivers/iommu/omap-iommu.c14
-rw-r--r--drivers/iommu/rockchip-iommu.c21
-rw-r--r--drivers/iommu/s390-iommu.c14
-rw-r--r--drivers/iommu/sprd-iommu.c18
-rw-r--r--drivers/iommu/sun50i-iommu.c18
-rw-r--r--drivers/iommu/tegra-gart.c24
-rw-r--r--drivers/iommu/tegra-smmu.c20
-rw-r--r--drivers/iommu/virtio-iommu.c14
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c4
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c11
35 files changed, 588 insertions, 1899 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index d0d52c1d4aee..992cc285f2fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -133,7 +133,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
* or equal to the system's PAGE_SIZE, with a preference if
* both are equal.
*/
- pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
+ pgsize_bitmap = tdev->iommu.domain->pgsize_bitmap;
if (pgsize_bitmap & PAGE_SIZE) {
tdev->iommu.pgshift = PAGE_SHIFT;
} else {
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index bb95edf74415..1ab31074f5b3 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -15,7 +15,6 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(u16 devid);
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
-extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
@@ -117,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
extern bool translation_pre_enabled(struct amd_iommu *iommu);
-extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
- struct device *dev);
+extern bool amd_iommu_is_attach_deferred(struct device *dev);
extern int __init add_special_device(u8 type, u8 id, u16 *devid,
bool cmd_line);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 7bfe37e52e21..b4a798c7b347 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -671,7 +671,7 @@ void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
* This function resets the command buffer if the IOMMU stopped fetching
* commands from it.
*/
-void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
+static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
{
iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
@@ -990,6 +990,7 @@ static bool copy_device_table(void)
get_order(dev_table_size));
if (old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
+ memunmap(old_devtb);
return false;
}
@@ -1020,6 +1021,7 @@ static bool copy_device_table(void)
if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
(int_tab_len != DTE_INTTABLEN)) {
pr_err("Wrong old irq remapping flag: %#x\n", devid);
+ memunmap(old_devtb);
return false;
}
@@ -1953,9 +1955,11 @@ static int __init amd_iommu_init_pci(void)
for_each_iommu(iommu) {
ret = iommu_init_pci(iommu);
- if (ret)
- break;
-
+ if (ret) {
+ pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
+ iommu->index, ret);
+ goto out;
+ }
/* Need to setup range after PCI init */
iommu_set_cwwb_range(iommu);
}
@@ -1971,6 +1975,11 @@ static int __init amd_iommu_init_pci(void)
* active.
*/
ret = amd_iommu_init_api();
+ if (ret) {
+ pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n",
+ ret);
+ goto out;
+ }
init_device_table_dma();
@@ -1980,6 +1989,7 @@ static int __init amd_iommu_init_pci(void)
if (!ret)
print_iommu_info();
+out:
return ret;
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index a18b549951bb..a1ada7bff44e 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2221,8 +2221,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, head);
}
-bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
- struct device *dev)
+bool amd_iommu_is_attach_deferred(struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
@@ -2275,13 +2274,6 @@ static int amd_iommu_def_domain_type(struct device *dev)
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
- .domain_free = amd_iommu_domain_free,
- .attach_dev = amd_iommu_attach_device,
- .detach_dev = amd_iommu_detach_device,
- .map = amd_iommu_map,
- .iotlb_sync_map = amd_iommu_iotlb_sync_map,
- .unmap = amd_iommu_unmap,
- .iova_to_phys = amd_iommu_iova_to_phys,
.probe_device = amd_iommu_probe_device,
.release_device = amd_iommu_release_device,
.probe_finalize = amd_iommu_probe_finalize,
@@ -2290,9 +2282,18 @@ const struct iommu_ops amd_iommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
- .flush_iotlb_all = amd_iommu_flush_iotlb_all,
- .iotlb_sync = amd_iommu_iotlb_sync,
.def_domain_type = amd_iommu_def_domain_type,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = amd_iommu_attach_device,
+ .detach_dev = amd_iommu_detach_device,
+ .map = amd_iommu_map,
+ .unmap = amd_iommu_unmap,
+ .iotlb_sync_map = amd_iommu_iotlb_sync_map,
+ .iova_to_phys = amd_iommu_iova_to_phys,
+ .flush_iotlb_all = amd_iommu_flush_iotlb_all,
+ .iotlb_sync = amd_iommu_iotlb_sync,
+ .free = amd_iommu_domain_free,
+ }
};
/*****************************************************************************
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 58da08cc3d01..e56b137ceabd 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -24,7 +24,6 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
-#define MAX_DEVICES 0x10000
#define PRI_QUEUE_SIZE 512
struct pri_queue {
@@ -71,7 +70,6 @@ struct fault {
struct pasid_state *state;
struct mm_struct *mm;
u64 address;
- u16 devid;
u32 pasid;
u16 tag;
u16 finish;
@@ -125,6 +123,15 @@ static void free_device_state(struct device_state *dev_state)
{
struct iommu_group *group;
+ /* Get rid of any remaining pasid states */
+ free_pasid_states(dev_state);
+
+ /*
+ * Wait until the last reference is dropped before freeing
+ * the device state.
+ */
+ wait_event(dev_state->wq, !atomic_read(&dev_state->count));
+
/*
* First detach device from domain - No more PRI requests will arrive
* from that device after it is unbound from the IOMMUv2 domain.
@@ -537,7 +544,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
ret = NOTIFY_DONE;
/* In kdump kernel pci dev is not initialized yet -> send INVALID */
- if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
+ if (amd_iommu_is_attach_deferred(&pdev->dev)) {
amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
PPR_INVALID, tag);
goto out;
@@ -850,15 +857,7 @@ void amd_iommu_free_device(struct pci_dev *pdev)
spin_unlock_irqrestore(&state_lock, flags);
- /* Get rid of any remaining pasid states */
- free_pasid_states(dev_state);
-
put_device_state(dev_state);
- /*
- * Wait until the last reference is dropped before freeing
- * the device state.
- */
- wait_event(dev_state->wq, !atomic_read(&dev_state->count));
free_device_state(dev_state);
}
EXPORT_SYMBOL(amd_iommu_free_device);
@@ -955,8 +954,8 @@ out:
static void __exit amd_iommu_v2_exit(void)
{
- struct device_state *dev_state;
- int i;
+ struct device_state *dev_state, *next;
+ unsigned long flags;
if (!amd_iommu_v2_supported())
return;
@@ -969,18 +968,18 @@ static void __exit amd_iommu_v2_exit(void)
* The loop below might call flush_workqueue(), so call
* destroy_workqueue() after it
*/
- for (i = 0; i < MAX_DEVICES; ++i) {
- dev_state = get_device_state(i);
-
- if (dev_state == NULL)
- continue;
+ spin_lock_irqsave(&state_lock, flags);
+ list_for_each_entry_safe(dev_state, next, &state_list, list) {
WARN_ON_ONCE(1);
put_device_state(dev_state);
- amd_iommu_free_device(dev_state->pdev);
+ list_del(&dev_state->list);
+ free_device_state(dev_state);
}
+ spin_unlock_irqrestore(&state_lock, flags);
+
destroy_workqueue(iommu_wq);
}
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 565ef5598811..decafb07ad08 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -765,15 +765,6 @@ static void apple_dart_get_resv_regions(struct device *dev,
static const struct iommu_ops apple_dart_iommu_ops = {
.domain_alloc = apple_dart_domain_alloc,
- .domain_free = apple_dart_domain_free,
- .attach_dev = apple_dart_attach_dev,
- .detach_dev = apple_dart_detach_dev,
- .map_pages = apple_dart_map_pages,
- .unmap_pages = apple_dart_unmap_pages,
- .flush_iotlb_all = apple_dart_flush_iotlb_all,
- .iotlb_sync = apple_dart_iotlb_sync,
- .iotlb_sync_map = apple_dart_iotlb_sync_map,
- .iova_to_phys = apple_dart_iova_to_phys,
.probe_device = apple_dart_probe_device,
.release_device = apple_dart_release_device,
.device_group = apple_dart_device_group,
@@ -782,6 +773,17 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.get_resv_regions = apple_dart_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = apple_dart_attach_dev,
+ .detach_dev = apple_dart_detach_dev,
+ .map_pages = apple_dart_map_pages,
+ .unmap_pages = apple_dart_unmap_pages,
+ .flush_iotlb_all = apple_dart_flush_iotlb_all,
+ .iotlb_sync = apple_dart_iotlb_sync,
+ .iotlb_sync_map = apple_dart_iotlb_sync_map,
+ .iova_to_phys = apple_dart_iova_to_phys,
+ .free = apple_dart_domain_free,
+ }
};
static irqreturn_t apple_dart_irq(int irq, void *dev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 6dc6d8b6b368..627a3ed5ee8f 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1558,6 +1558,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
dev_info(smmu->dev, "\t0x%016llx\n",
(unsigned long long)evt[i]);
+ cond_resched();
}
/*
@@ -2841,17 +2842,9 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
- .domain_free = arm_smmu_domain_free,
- .attach_dev = arm_smmu_attach_dev,
- .map_pages = arm_smmu_map_pages,
- .unmap_pages = arm_smmu_unmap_pages,
- .flush_iotlb_all = arm_smmu_flush_iotlb_all,
- .iotlb_sync = arm_smmu_iotlb_sync,
- .iova_to_phys = arm_smmu_iova_to_phys,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
- .enable_nesting = arm_smmu_enable_nesting,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
@@ -2865,6 +2858,16 @@ static struct iommu_ops arm_smmu_ops = {
.page_response = arm_smmu_page_response,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = arm_smmu_attach_dev,
+ .map_pages = arm_smmu_map_pages,
+ .unmap_pages = arm_smmu_unmap_pages,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
+ .iotlb_sync = arm_smmu_iotlb_sync,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .enable_nesting = arm_smmu_enable_nesting,
+ .free = arm_smmu_domain_free,
+ }
};
/* Probing and initialisation functions */
@@ -2911,32 +2914,20 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
return 0;
}
-static void arm_smmu_cmdq_free_bitmap(void *data)
-{
- unsigned long *bitmap = data;
- bitmap_free(bitmap);
-}
-
static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
{
- int ret = 0;
struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
- atomic_long_t *bitmap;
atomic_set(&cmdq->owner_prod, 0);
atomic_set(&cmdq->lock, 0);
- bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL);
- if (!bitmap) {
- dev_err(smmu->dev, "failed to allocate cmdq bitmap\n");
- ret = -ENOMEM;
- } else {
- cmdq->valid_map = bitmap;
- devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap);
- }
+ cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents,
+ GFP_KERNEL);
+ if (!cmdq->valid_map)
+ return -ENOMEM;
- return ret;
+ return 0;
}
static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
@@ -2981,10 +2972,10 @@ static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
{
unsigned int i;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
- size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
void *strtab = smmu->strtab_cfg.strtab;
- cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
+ cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
+ sizeof(*cfg->l1_desc), GFP_KERNEL);
if (!cfg->l1_desc)
return -ENOMEM;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 4bc75c4ce402..568cce590ccc 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -807,7 +807,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
* Request context fault interrupt. Do this last to avoid the
* handler seeing a half-initialised domain state.
*/
- irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
+ irq = smmu->irqs[cfg->irptndx];
if (smmu->impl && smmu->impl->context_fault)
context_fault = smmu->impl->context_fault;
@@ -858,7 +858,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
arm_smmu_write_context_bank(smmu, cfg->cbndx);
if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
- irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
+ irq = smmu->irqs[cfg->irptndx];
devm_free_irq(smmu->dev, irq, domain);
}
@@ -1583,25 +1583,27 @@ static int arm_smmu_def_domain_type(struct device *dev)
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
- .domain_free = arm_smmu_domain_free,
- .attach_dev = arm_smmu_attach_dev,
- .map_pages = arm_smmu_map_pages,
- .unmap_pages = arm_smmu_unmap_pages,
- .flush_iotlb_all = arm_smmu_flush_iotlb_all,
- .iotlb_sync = arm_smmu_iotlb_sync,
- .iova_to_phys = arm_smmu_iova_to_phys,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,
.device_group = arm_smmu_device_group,
- .enable_nesting = arm_smmu_enable_nesting,
- .set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = arm_smmu_attach_dev,
+ .map_pages = arm_smmu_map_pages,
+ .unmap_pages = arm_smmu_unmap_pages,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
+ .iotlb_sync = arm_smmu_iotlb_sync,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .enable_nesting = arm_smmu_enable_nesting,
+ .set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
+ .free = arm_smmu_domain_free,
+ }
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -1951,8 +1953,8 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
return ret;
}
-static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
- struct arm_smmu_device *smmu)
+static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
+ u32 *global_irqs, u32 *pmu_irqs)
{
struct device *dev = smmu->dev;
struct acpi_iort_node *node =
@@ -1968,7 +1970,8 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
return ret;
/* Ignore the configuration access interrupt */
- smmu->num_global_irqs = 1;
+ *global_irqs = 1;
+ *pmu_irqs = 0;
if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
@@ -1976,25 +1979,24 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
return 0;
}
#else
-static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
- struct arm_smmu_device *smmu)
+static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
+ u32 *global_irqs, u32 *pmu_irqs)
{
return -ENODEV;
}
#endif
-static int arm_smmu_device_dt_probe(struct platform_device *pdev,
- struct arm_smmu_device *smmu)
+static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
+ u32 *global_irqs, u32 *pmu_irqs)
{
const struct arm_smmu_match_data *data;
- struct device *dev = &pdev->dev;
+ struct device *dev = smmu->dev;
bool legacy_binding;
- if (of_property_read_u32(dev->of_node, "#global-interrupts",
- &smmu->num_global_irqs)) {
- dev_err(dev, "missing #global-interrupts property\n");
- return -ENODEV;
- }
+ if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
+ return dev_err_probe(dev, -ENODEV,
+ "missing #global-interrupts property\n");
+ *pmu_irqs = 0;
data = of_device_get_match_data(dev);
smmu->version = data->version;
@@ -2073,6 +2075,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
+ u32 global_irqs, pmu_irqs;
irqreturn_t (*global_fault)(int irq, void *dev);
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
@@ -2083,10 +2086,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->dev = dev;
if (dev->of_node)
- err = arm_smmu_device_dt_probe(pdev, smmu);
+ err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs);
else
- err = arm_smmu_device_acpi_probe(pdev, smmu);
-
+ err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs);
if (err)
return err;
@@ -2105,31 +2107,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (IS_ERR(smmu))
return PTR_ERR(smmu);
- num_irqs = 0;
- while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
- num_irqs++;
- if (num_irqs > smmu->num_global_irqs)
- smmu->num_context_irqs++;
- }
+ num_irqs = platform_irq_count(pdev);
- if (!smmu->num_context_irqs) {
- dev_err(dev, "found %d interrupts but expected at least %d\n",
- num_irqs, smmu->num_global_irqs + 1);
- return -ENODEV;
- }
+ smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
+ if (smmu->num_context_irqs <= 0)
+ return dev_err_probe(dev, -ENODEV,
+ "found %d interrupts but expected at least %d\n",
+ num_irqs, global_irqs + pmu_irqs + 1);
- smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
- GFP_KERNEL);
- if (!smmu->irqs) {
- dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
- return -ENOMEM;
- }
+ smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
+ sizeof(*smmu->irqs), GFP_KERNEL);
+ if (!smmu->irqs)
+ return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
+ smmu->num_context_irqs);
- for (i = 0; i < num_irqs; ++i) {
- int irq = platform_get_irq(pdev, i);
+ for (i = 0; i < smmu->num_context_irqs; i++) {
+ int irq = platform_get_irq(pdev, global_irqs + pmu_irqs + i);
if (irq < 0)
- return -ENODEV;
+ return irq;
smmu->irqs[i] = irq;
}
@@ -2165,17 +2161,18 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
else
global_fault = arm_smmu_global_fault;
- for (i = 0; i < smmu->num_global_irqs; ++i) {
- err = devm_request_irq(smmu->dev, smmu->irqs[i],
- global_fault,
- IRQF_SHARED,
- "arm-smmu global fault",
- smmu);
- if (err) {
- dev_err(dev, "failed to request global IRQ %d (%u)\n",
- i, smmu->irqs[i]);
- return err;
- }
+ for (i = 0; i < global_irqs; i++) {
+ int irq = platform_get_irq(pdev, i);
+
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, global_fault, IRQF_SHARED,
+ "arm-smmu global fault", smmu);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to request global IRQ %d (%u)\n",
+ i, irq);
}
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 432de2f742c3..2b9b42fb6f30 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -318,11 +318,10 @@ struct arm_smmu_device {
unsigned long pa_size;
unsigned long pgsize_bitmap;
- u32 num_global_irqs;
- u32 num_context_irqs;
+ int num_context_irqs;
+ int num_clks;
unsigned int *irqs;
struct clk_bulk_data *clks;
- int num_clks;
spinlock_t global_sync_lock;
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index b91874cb6cf3..4c077c38fbd6 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -590,19 +590,21 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops qcom_iommu_ops = {
.capable = qcom_iommu_capable,
.domain_alloc = qcom_iommu_domain_alloc,
- .domain_free = qcom_iommu_domain_free,
- .attach_dev = qcom_iommu_attach_dev,
- .detach_dev = qcom_iommu_detach_dev,
- .map = qcom_iommu_map,
- .unmap = qcom_iommu_unmap,
- .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
- .iotlb_sync = qcom_iommu_iotlb_sync,
- .iova_to_phys = qcom_iommu_iova_to_phys,
.probe_device = qcom_iommu_probe_device,
.release_device = qcom_iommu_release_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = qcom_iommu_attach_dev,
+ .detach_dev = qcom_iommu_detach_dev,
+ .map = qcom_iommu_map,
+ .unmap = qcom_iommu_unmap,
+ .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
+ .iotlb_sync = qcom_iommu_iotlb_sync,
+ .iova_to_phys = qcom_iommu_iova_to_phys,
+ .free = qcom_iommu_domain_free,
+ }
};
static int qcom_iommu_sec_ptbl_init(struct device *dev)
@@ -827,20 +829,20 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
ret = devm_of_platform_populate(dev);
if (ret) {
dev_err(dev, "Failed to populate iommu contexts\n");
- return ret;
+ goto err_pm_disable;
}
ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
dev_name(dev));
if (ret) {
dev_err(dev, "Failed to register iommu in sysfs\n");
- return ret;
+ goto err_pm_disable;
}
ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
- return ret;
+ goto err_pm_disable;
}
bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
@@ -852,6 +854,10 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
}
return 0;
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+ return ret;
}
static int qcom_iommu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d85d54f2b549..b22034975301 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/