summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/amd_iommu.c100
-rw-r--r--drivers/iommu/amd_iommu_types.h9
-rw-r--r--drivers/iommu/arm-smmu-v3.c38
-rw-r--r--drivers/iommu/arm-smmu.c39
-rw-r--r--drivers/iommu/exynos-iommu.c24
-rw-r--r--drivers/iommu/fsl_pamu_domain.c22
-rw-r--r--drivers/iommu/intel-iommu.c68
-rw-r--r--drivers/iommu/iommu.c386
-rw-r--r--drivers/iommu/iova.c6
-rw-r--r--drivers/iommu/ipmmu-vmsa.c59
-rw-r--r--drivers/iommu/msm_iommu.c34
-rw-r--r--drivers/iommu/mtk_iommu.c24
-rw-r--r--drivers/iommu/mtk_iommu_v1.c50
-rw-r--r--drivers/iommu/omap-iommu.c99
-rw-r--r--drivers/iommu/qcom_iommu.c24
-rw-r--r--drivers/iommu/rockchip-iommu.c26
-rw-r--r--drivers/iommu/s390-iommu.c22
-rw-r--r--drivers/iommu/tegra-gart.c24
-rw-r--r--drivers/iommu/tegra-smmu.c31
-rw-r--r--drivers/iommu/virtio-iommu.c41
-rw-r--r--include/linux/iommu.h21
21 files changed, 537 insertions, 610 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2883ac389abb..1b36c40d0712 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -349,7 +349,7 @@ static bool check_device(struct device *dev)
{
int devid;
- if (!dev || !dev->dma_mask)
+ if (!dev)
return false;
devid = get_device_id(dev);
@@ -366,21 +366,9 @@ static bool check_device(struct device *dev)
return true;
}
-static void init_iommu_group(struct device *dev)
-{
- struct iommu_group *group;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return;
-
- iommu_group_put(group);
-}
-
static int iommu_init_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
int devid;
if (dev->archdata.iommu)
@@ -390,8 +378,6 @@ static int iommu_init_device(struct device *dev)
if (devid < 0)
return devid;
- iommu = amd_iommu_rlookup_table[devid];
-
dev_data = find_dev_data(devid);
if (!dev_data)
return -ENOMEM;
@@ -414,8 +400,6 @@ static int iommu_init_device(struct device *dev)
dev->archdata.iommu = dev_data;
- iommu_device_link(&iommu->iommu, dev);
-
return 0;
}
@@ -433,18 +417,15 @@ static void iommu_ignore_device(struct device *dev)
setup_aliases(dev);
}
-static void iommu_uninit_device(struct device *dev)
+static void amd_iommu_uninit_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
int devid;
devid = get_device_id(dev);
if (devid < 0)
return;
- iommu = amd_iommu_rlookup_table[devid];
-
dev_data = search_dev_data(devid);
if (!dev_data)
return;
@@ -452,13 +433,6 @@ static void iommu_uninit_device(struct device *dev)
if (dev_data->domain)
detach_device(dev);
- iommu_device_unlink(&iommu->iommu, dev);
-
- iommu_group_remove_device(dev);
-
- /* Remove dma-ops */
- dev->dma_ops = NULL;
-
/*
* We keep dev_data around for unplugged devices and reuse it when the
* device is re-plugged - not doing so would introduce a ton of races.
@@ -2119,8 +2093,8 @@ out_err:
static int attach_device(struct device *dev,
struct protection_domain *domain)
{
- struct pci_dev *pdev;
struct iommu_dev_data *dev_data;
+ struct pci_dev *pdev;
unsigned long flags;
int ret;
@@ -2139,8 +2113,10 @@ static int attach_device(struct device *dev,
pdev = to_pci_dev(dev);
if (domain->flags & PD_IOMMUV2_MASK) {
+ struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
+
ret = -EINVAL;
- if (!dev_data->passthrough)
+ if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
goto out;
if (dev_data->iommu_v2) {
@@ -2222,54 +2198,50 @@ out:
spin_unlock_irqrestore(&domain->lock, flags);
}
-static int amd_iommu_add_device(struct device *dev)
+static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
- struct iommu_dev_data *dev_data;
- struct iommu_domain *domain;
+ struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
int ret, devid;
- if (!check_device(dev) || get_dev_data(dev))
- return 0;
+ if (!check_device(dev))
+ return ERR_PTR(-ENODEV);
devid = get_device_id(dev);
if (devid < 0)
- return devid;
+ return ERR_PTR(devid);
iommu = amd_iommu_rlookup_table[devid];
+ if (get_dev_data(dev))
+ return &iommu->iommu;
+
ret = iommu_init_device(dev);
if (ret) {
if (ret != -ENOTSUPP)
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
-
+ iommu_dev = ERR_PTR(ret);
iommu_ignore_device(dev);
- dev->dma_ops = NULL;
- goto out;
+ } else {
+ iommu_dev = &iommu->iommu;
}
- init_iommu_group(dev);
- dev_data = get_dev_data(dev);
+ iommu_completion_wait(iommu);
- BUG_ON(!dev_data);
+ return iommu_dev;
+}
- if (dev_data->iommu_v2)
- iommu_request_dm_for_dev(dev);
+static void amd_iommu_probe_finalize(struct device *dev)
+{
+ struct iommu_domain *domain;
/* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev);
- if (domain->type == IOMMU_DOMAIN_IDENTITY)
- dev_data->passthrough = true;
- else if (domain->type == IOMMU_DOMAIN_DMA)
+ if (domain->type == IOMMU_DOMAIN_DMA)
iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
-
-out:
- iommu_completion_wait(iommu);
-
- return 0;
}
-static void amd_iommu_remove_device(struct device *dev)
+static void amd_iommu_release_device(struct device *dev)
{
struct amd_iommu *iommu;
int devid;
@@ -2283,7 +2255,7 @@ static void amd_iommu_remove_device(struct device *dev)
iommu = amd_iommu_rlookup_table[devid];
- iommu_uninit_device(dev);
+ amd_iommu_uninit_device(dev);
iommu_completion_wait(iommu);
}
@@ -2758,6 +2730,20 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
amd_iommu_flush_iotlb_all(domain);
}
+static int amd_iommu_def_domain_type(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+
+ dev_data = get_dev_data(dev);
+ if (!dev_data)
+ return 0;
+
+ if (dev_data->iommu_v2)
+ return IOMMU_DOMAIN_IDENTITY;
+
+ return 0;
+}
+
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
@@ -2767,8 +2753,9 @@ const struct iommu_ops amd_iommu_ops = {
.map = amd_iommu_map,
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
- .add_device = amd_iommu_add_device,
- .remove_device = amd_iommu_remove_device,
+ .probe_device = amd_iommu_probe_device,
+ .release_device = amd_iommu_release_device,
+ .probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions,
@@ -2777,6 +2764,7 @@ const struct iommu_ops amd_iommu_ops = {
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
.iotlb_sync = amd_iommu_iotlb_sync,
+ .def_domain_type = amd_iommu_def_domain_type,
};
/*****************************************************************************
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 7a8fdec138bd..30a5d412255a 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -395,10 +395,10 @@
#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
extern bool amd_iommu_dump;
-#define DUMP_printk(format, arg...) \
- do { \
- if (amd_iommu_dump) \
- printk(KERN_INFO "AMD-Vi: " format, ## arg); \
+#define DUMP_printk(format, arg...) \
+ do { \
+ if (amd_iommu_dump) \
+ pr_info("AMD-Vi: " format, ## arg); \
} while(0);
/* global flag if IOMMUs cache non-present entries */
@@ -645,7 +645,6 @@ struct iommu_dev_data {
struct pci_dev *pdev;
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
- bool passthrough; /* Device is identity mapped */
struct {
bool enabled;
int qdep;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 82508730feb7..42e1ee7e5197 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2914,27 +2914,26 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
static struct iommu_ops arm_smmu_ops;
-static int arm_smmu_add_device(struct device *dev)
+static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{
int i, ret;
struct arm_smmu_device *smmu;
struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct iommu_group *group;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
if (!smmu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
master->dev = dev;
master->smmu = smmu;
@@ -2975,30 +2974,15 @@ static int arm_smmu_add_device(struct device *dev)
master->ssid_bits = min_t(u8, master->ssid_bits,
CTXDESC_LINEAR_CDMAX);
- ret = iommu_device_link(&smmu->iommu, dev);
- if (ret)
- goto err_disable_pasid;
+ return &smmu->iommu;
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto err_unlink;
- }
-
- iommu_group_put(group);
- return 0;
-
-err_unlink:
- iommu_device_unlink(&smmu->iommu, dev);
-err_disable_pasid:
- arm_smmu_disable_pasid(master);
err_free_master:
kfree(master);
dev_iommu_priv_set(dev, NULL);
- return ret;
+ return ERR_PTR(ret);
}
-static void arm_smmu_remove_device(struct device *dev)
+static void arm_smmu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master *master;
@@ -3010,8 +2994,6 @@ static void arm_smmu_remove_device(struct device *dev)
master = dev_iommu_priv_get(dev);
smmu = master->smmu;
arm_smmu_detach_dev(master);
- iommu_group_remove_device(dev);
- iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_disable_pasid(master);
kfree(master);
iommu_fwspec_free(dev);
@@ -3138,8 +3120,8 @@ static struct iommu_ops arm_smmu_ops = {
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
- .add_device = arm_smmu_add_device,
- .remove_device = arm_smmu_remove_device,
+ .probe_device = arm_smmu_probe_device,
+ .release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a6a5796e9c41..e622f4e33379 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -220,7 +220,7 @@ static int arm_smmu_register_legacy_master(struct device *dev,
* With the legacy DT binding in play, we have no guarantees about
* probe order, but then we're also not doing default domains, so we can
* delay setting bus ops until we're sure every possible SMMU is ready,
- * and that way ensure that no add_device() calls get missed.
+ * and that way ensure that no probe_device() calls get missed.
*/
static int arm_smmu_legacy_bus_init(void)
{
@@ -1062,7 +1062,6 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_smr *smrs = smmu->smrs;
- struct iommu_group *group;
int i, idx, ret;
mutex_lock(&smmu->stream_map_mutex);
@@ -1090,18 +1089,9 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
cfg->smendx[i] = (s16)idx;
}
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto out_err;
- }
- iommu_group_put(group);
-
/* It worked! Now, poke the actual hardware */
- for_each_cfg_sme(cfg, fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx)
arm_smmu_write_sme(smmu, idx);
- smmu->s2crs[idx].group = group;
- }
mutex_unlock(&smmu->stream_map_mutex);
return 0;
@@ -1172,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/*
* FIXME: The arch/arm DMA API code tries to attach devices to its own
- * domains between of_xlate() and add_device() - we have no way to cope
+ * domains between of_xlate() and probe_device() - we have no way to cope
* with that, so until ARM gets converted to rely on groups and default
* domains, just say no (but more politely than by dereferencing NULL).
* This should be at least a WARN_ON once that's sorted.
@@ -1382,7 +1372,7 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
return dev ? dev_get_drvdata(dev) : NULL;
}
-static int arm_smmu_add_device(struct device *dev)
+static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{
struct arm_smmu_device *smmu = NULL;
struct arm_smmu_master_cfg *cfg;
@@ -1403,7 +1393,7 @@ static int arm_smmu_add_device(struct device *dev)
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} else {
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
ret = -EINVAL;
@@ -1444,21 +1434,19 @@ static int arm_smmu_add_device(struct device *dev)
if (ret)
goto out_cfg_free;
- iommu_device_link(&smmu->iommu, dev);
-
device_link_add(dev, smmu->dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
- return 0;
+ return &smmu->iommu;
out_cfg_free:
kfree(cfg);
out_free:
iommu_fwspec_free(dev);
- return ret;
+ return ERR_PTR(ret);
}
-static void arm_smmu_remove_device(struct device *dev)
+static void arm_smmu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master_cfg *cfg;
@@ -1475,13 +1463,11 @@ static void arm_smmu_remove_device(struct device *dev)
if (ret < 0)
return;
- iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_master_free_smes(cfg, fwspec);
arm_smmu_rpm_put(smmu);
dev_iommu_priv_set(dev, NULL);
- iommu_group_remove_device(dev);
kfree(cfg);
iommu_fwspec_free(dev);
}
@@ -1512,6 +1498,11 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
else
group = generic_device_group(dev);
+ /* Remember group for faster lookups */
+ if (!IS_ERR(group))
+ for_each_cfg_sme(cfg, fwspec, i, idx)
+ smmu->s2crs[idx].group = group;
+
return group;
}
@@ -1628,8 +1619,8 @@ static struct iommu_ops arm_smmu_ops = {
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
- .add_device = arm_smmu_add_device,
- .remove_device = arm_smmu_remove_device,
+ .probe_device = arm_smmu_probe_device,
+ .release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 186ff5cc975c..60c8a56e4a3f 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1235,19 +1235,13 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
return phys;
}
-static int exynos_iommu_add_device(struct device *dev)
+static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct sysmmu_drvdata *data;
- struct iommu_group *group;
if (!has_sysmmu(dev))
- return -ENODEV;
-
- group = iommu_group_get_for_dev(dev);
-
- if (IS_ERR(group))
- return PTR_ERR(group);
+ return ERR_PTR(-ENODEV);
list_for_each_entry(data, &owner->controllers, owner_node) {
/*
@@ -1259,12 +1253,15 @@ static int exynos_iommu_add_device(struct device *dev)
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME);
}
- iommu_group_put(group);
- return 0;
+ /* There is always at least one entry, see exynos_iommu_of_xlate() */
+ data = list_first_entry(&owner->controllers,
+ struct sysmmu_drvdata, owner_node);
+
+ return &data->iommu;
}
-static void exynos_iommu_remove_device(struct device *dev)
+static void exynos_iommu_release_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct sysmmu_drvdata *data;
@@ -1282,7 +1279,6 @@ static void exynos_iommu_remove_device(struct device *dev)
iommu_group_put(group);
}
}
- iommu_group_remove_device(dev);
list_for_each_entry(data, &owner->controllers, owner_node)
device_link_del(data->link);
@@ -1331,8 +1327,8 @@ static const struct iommu_ops exynos_iommu_ops = {
.unmap = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys,
.device_group = generic_device_group,
- .add_device = exynos_iommu_add_device,
- .remove_device = exynos_iommu_remove_device,
+ .probe_device = exynos_iommu_probe_device,
+ .release_device = exynos_iommu_release_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
.of_xlate = exynos_iommu_of_xlate,
};
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 06828e2698d5..928d37771ece 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -1016,25 +1016,13 @@ static struct iommu_group *fsl_pamu_device_group(struct device *dev)
return group;
}
-static int fsl_pamu_add_device(struct device *dev)
+static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
{
- struct iommu_group *group;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
-
- iommu_device_link(&pamu_iommu, dev);
-
- return 0;
+ return &pamu_iommu;
}
-static void fsl_pamu_remove_device(struct device *dev)
+static void fsl_pamu_release_device(struct device *dev)
{
- iommu_device_unlink(&pamu_iommu, dev);
- iommu_group_remove_device(dev);
}
static const struct iommu_ops fsl_pamu_ops = {
@@ -1048,8 +1036,8 @@ static const struct iommu_ops fsl_pamu_ops = {
.iova_to_phys = fsl_pamu_iova_to_phys,
.domain_set_attr = fsl_pamu_set_domain_attr,
.domain_get_attr = fsl_pamu_get_domain_attr,
- .add_device = fsl_pamu_add_device,
- .remove_device = fsl_pamu_remove_device,
+ .probe_device = fsl_pamu_probe_device,
+ .release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
};
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0182cff2c7ac..34e08fa2ce3a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -5781,78 +5781,27 @@ static bool intel_iommu_capable(enum iommu_cap cap)
return false;
}
-static int intel_iommu_add_device(struct device *dev)
+static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{
- struct dmar_domain *dmar_domain;
- struct iommu_domain *domain;
struct intel_iommu *iommu;
- struct iommu_group *group;
u8 bus, devfn;
- int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
- return -ENODEV;
-
- iommu_device_link(&iommu->iommu, dev);
+ return ERR_PTR(-ENODEV);
if (translation_pre_enabled(iommu))
dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
- group = iommu_group_get_for_dev(dev);
-
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto unlink;
- }
-
- iommu_group_put(group);
-
- domain = iommu_get_domain_for_dev(dev);
- dmar_domain = to_dmar_domain(domain);
- if (domain->type == IOMMU_DOMAIN_DMA) {
- if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
- ret = iommu_request_dm_for_dev(dev);
- if (ret) {
- dmar_remove_one_dev_info(dev);
- dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
- domain_add_dev_info(si_domain, dev);
- dev_info(dev,
- "Device uses a private identity domain.\n");
- }
- }
- } else {
- if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
- ret = iommu_request_dma_domain_for_dev(dev);
- if (ret) {
- dmar_remove_one_dev_info(dev);
- dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
- if (!get_private_domain_for_dev(dev)) {
- dev_warn(dev,
- "Failed to get a private domain.\n");
- ret = -ENOMEM;
- goto unlink;
- }
-
- dev_info(dev,
- "Device uses a private dma domain.\n");
- }
- }
- }
-
if (device_needs_bounce(dev)) {
dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
set_dma_ops(dev, &bounce_dma_ops);
}
- return 0;
-
-unlink:
- iommu_device_unlink(&iommu->iommu, dev);
- return ret;
+ return &iommu->iommu;
}
-static void intel_iommu_remove_device(struct device *dev)
+static void intel_iommu_release_device(struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
@@ -5863,10 +5812,6 @@ static void intel_iommu_remove_device(struct device *dev)
dmar_remove_one_dev_info(dev);
- iommu_group_remove_device(dev);
-
- iommu_device_unlink(&iommu->iommu, dev);
-
if (device_needs_bounce(dev))
set_dma_ops(dev, NULL);
}
@@ -6198,8 +6143,8 @@ const struct iommu_ops intel_iommu_ops = {
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
- .add_device = intel_iommu_add_device,
- .remove_device = intel_iommu_remove_device,
+ .probe_device = intel_iommu_probe_device,
+ .release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.apply_resv_region = intel_iommu_apply_resv_region,
@@ -6209,6 +6154,7 @@ const struct iommu_ops intel_iommu_ops = {
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
+ .def_domain_type = device_def_domain_type,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 1faa08c8bbb4..629d209b8e88 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -44,6 +44,7 @@ struct iommu_group {
int id;
struct iommu_domain *default_domain;
struct iommu_domain *domain;
+ struct list_head entry;
};
struct group_device {
@@ -79,6 +80,19 @@ static bool iommu_cmd_line_dma_api(void)
return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
}
+static int iommu_alloc_default_domain(struct device *dev);
+static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
+ unsigned type);
+static int __iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev);
+static int __iommu_attach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+static void __iommu_detach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+static int iommu_create_device_direct_mappings(struct iommu_group *group,
+ struct device *dev);
+static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
+
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
@@ -175,57 +189,118 @@ static void dev_iommu_free(struct device *dev)
dev->iommu = NULL;
}
-int iommu_probe_device(struct device *dev)
+static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_device *iommu_dev;
+ struct iommu_group *group;
int ret;
- WARN_ON(dev->iommu_group);
if (!ops)
- return -EINVAL;
+ return -ENODEV;
if (!dev_iommu_get(dev))
return -ENOMEM;
if (!try_module_get(ops->owner)) {
ret = -EINVAL;
- goto err_free_dev_param;
+ goto err_free;
}
- ret = ops->add_device(dev);
- if (ret)
- goto err_module_put;
+ iommu_dev = ops->probe_device(dev);
+ if (IS_ERR(iommu_dev)) {
+ ret = PTR_ERR(iommu_dev);
+ goto out_module_put;
+ }
+
+ dev->iommu->iommu_dev = iommu_dev;
+
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ goto out_release;
+ }
+ iommu_group_put(group);
+
+ if (group_list && !group->default_domain && list_empty(&group->entry))
+ list_add_tail(&group->entry, group_list);
+
+ iommu_device_link(iommu_dev, dev);
return 0;
-err_module_put:
+out_release:
+ ops->release_device(dev);
+
+out_module_put:
module_put(ops->owner);
-err_free_dev_param:
+
+err_free:
dev_iommu_free(dev);
+
return ret;
}
-void iommu_release_device(struct device *dev)
+int iommu_probe_device(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_group *group;
+ int ret;
- if (dev->iommu_group)
- ops->remove_device(dev);
+ ret = __iommu_probe_device(dev, NULL);
+ if (ret)
+ goto err_out;
+
+ /*
+ * Try to allocate a default domain - needs support from the
+ * IOMMU driver. There are still some drivers which don't
+ * support default domains, so the return value is not yet
+ * checked.
+ */
+ iommu_alloc_default_domain(dev);
+
+ group = iommu_group_get(dev);
+ if (!group)
+ goto err_release;
+
+ if (group->default_domain)
+ ret = __iommu_attach_device(group->default_domain, dev);
+
+ iommu_create_device_direct_mappings(group, dev);
+
+ iommu_group_put(group);
+
+ if (ret)
+ goto err_release;
+
+ if (ops->probe_finalize)
+ ops->probe_finalize(dev);
+
+ return 0;
+
+err_release:
+ iommu_release_device(dev);
+
+err_out:
+ return ret;
- if (dev->iommu) {
- module_put(ops->owner);
- dev_iommu_free(dev);
- }
}
-static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
- unsigned type);
-static int __iommu_attach_device(struct iommu_domain *domain,
- struct device *dev);
-static int __iommu_attach_group(struct iommu_domain *domain,
- struct iommu_group *group);
-static void __iommu_detach_group(struct iommu_domain *domain,
- struct iommu_group *group);
+void iommu_release_device(struct device *dev)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (!dev->iommu)
+ return;
+
+ iommu_device_unlink(dev->iommu->iommu_dev, dev);
+ iommu_group_remove_device(dev);
+
+ ops->release_device(dev);
+
+ module_put(ops->owner);
+ dev_iommu_free(dev);
+}
static int __init iommu_set_def_domain_type(char *str)
{
@@ -497,6 +572,7 @@ struct iommu_group *iommu_group_alloc(void)
group->kobj.kset = iommu_group_kset;
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->devices);
+ INIT_LIST_HEAD(&group->entry);
BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
@@ -638,8 +714,8 @@ int iommu_group_set_name(struct iommu_group *group, const char *name)
}
EXPORT_SYMBOL_GPL(iommu_group_set_name);
-static int iommu_group_create_direct_mappings(struct iommu_group *group,
- struct device *dev)
+static int iommu_create_device_direct_mappings(struct iommu_group *group,
+ struct device *dev)
{
struct iommu_domain *domain = group->default_domain;
struct iommu_resv_region *entry;
@@ -752,8 +828,6 @@ rename:
dev->iommu_group = group;
- iommu_group_create_direct_mappings(group, dev);
-
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
@@ -1371,6 +1445,65 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_group);
+static int iommu_get_def_domain_type(struct device *dev)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ unsigned int type = 0;
+
+ if (ops->def_domain_type)
+ type = ops->def_domain_type(dev);
+
+ return (type == 0) ? iommu_def_domain_type : type;
+}
+
+static int iommu_group_alloc_default_domain(struct bus_type *bus,
+ struct iommu_group *group,
+ unsigned int type)
+{
+ struct iommu_domain *dom;
+
+ dom = __iommu_domain_alloc(bus, type);
+ if (!dom && type != IOMMU_DOMAIN_DMA) {
+ dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
+ if (dom)
+ pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
+ type, group->name);
+ }
+
+ if (!dom)
+ return -ENOMEM;
+
+ group->default_domain = dom;
+ if (!group->domain)
+ gr