summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/intel/iommu.c19
-rw-r--r--drivers/iommu/iommu-priv.h30
-rw-r--r--drivers/iommu/iommu.c81
-rw-r--r--drivers/iommu/iommufd/device.c731
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c112
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c36
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h84
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h19
-rw-r--r--drivers/iommu/iommufd/main.c35
-rw-r--r--drivers/iommu/iommufd/selftest.c213
-rw-r--r--drivers/vfio/iommufd.c11
-rw-r--r--drivers/vfio/vfio_main.c4
-rw-r--r--include/linux/iommu.h16
-rw-r--r--include/linux/iommufd.h2
-rw-r--r--include/uapi/linux/iommufd.h97
-rw-r--r--include/uapi/linux/vfio.h6
-rw-r--r--tools/testing/selftests/iommu/iommufd.c130
-rw-r--r--tools/testing/selftests/iommu/iommufd_fail_nth.c71
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h144
19 files changed, 1486 insertions, 355 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 5c8c5cdc36cf..9e6f78830ece 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
+#include <uapi/linux/iommufd.h>
#include "iommu.h"
#include "../dma-iommu.h"
@@ -4732,8 +4733,26 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
intel_pasid_tear_down_entry(iommu, dev, pasid, false);
}
+static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct iommu_hw_info_vtd *vtd;
+
+ vtd = kzalloc(sizeof(*vtd), GFP_KERNEL);
+ if (!vtd)
+ return ERR_PTR(-ENOMEM);
+
+ vtd->cap_reg = iommu->cap;
+ vtd->ecap_reg = iommu->ecap;
+ *length = sizeof(*vtd);
+ *type = IOMMU_HW_INFO_TYPE_INTEL_VTD;
+ return vtd;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
+ .hw_info = intel_iommu_hw_info,
.domain_alloc = intel_iommu_domain_alloc,
.probe_device = intel_iommu_probe_device,
.probe_finalize = intel_iommu_probe_finalize,
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
new file mode 100644
index 000000000000..2024a2313348
--- /dev/null
+++ b/drivers/iommu/iommu-priv.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
+ */
+#ifndef __LINUX_IOMMU_PRIV_H
+#define __LINUX_IOMMU_PRIV_H
+
+#include <linux/iommu.h>
+
+static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
+{
+ /*
+ * Assume that valid ops must be installed if iommu_probe_device()
+ * has succeeded. The device ops are essentially for internal use
+ * within the IOMMU subsystem itself, so we should be able to trust
+ * ourselves not to misuse the helper.
+ */
+ return dev->iommu->iommu_dev->ops;
+}
+
+int iommu_group_replace_domain(struct iommu_group *group,
+ struct iommu_domain *new_domain);
+
+int iommu_device_register_bus(struct iommu_device *iommu,
+ const struct iommu_ops *ops, struct bus_type *bus,
+ struct notifier_block *nb);
+void iommu_device_unregister_bus(struct iommu_device *iommu,
+ struct bus_type *bus,
+ struct notifier_block *nb);
+
+#endif /* __LINUX_IOMMU_PRIV_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index caaf563d38ae..c9c370de9d3d 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -34,8 +34,10 @@
#include <linux/msi.h>
#include "dma-iommu.h"
+#include "iommu-priv.h"
#include "iommu-sva.h"
+#include "iommu-priv.h"
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
@@ -287,6 +289,48 @@ void iommu_device_unregister(struct iommu_device *iommu)
}
EXPORT_SYMBOL_GPL(iommu_device_unregister);
+#if IS_ENABLED(CONFIG_IOMMUFD_TEST)
+void iommu_device_unregister_bus(struct iommu_device *iommu,
+ struct bus_type *bus,
+ struct notifier_block *nb)
+{
+ bus_unregister_notifier(bus, nb);
+ iommu_device_unregister(iommu);
+}
+EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
+
+/*
+ * Register an iommu driver against a single bus. This is only used by iommufd
+ * selftest to create a mock iommu driver. The caller must provide
+ * some memory to hold a notifier_block.
+ */
+int iommu_device_register_bus(struct iommu_device *iommu,
+ const struct iommu_ops *ops, struct bus_type *bus,
+ struct notifier_block *nb)
+{
+ int err;
+
+ iommu->ops = ops;
+ nb->notifier_call = iommu_bus_notifier;
+ err = bus_register_notifier(bus, nb);
+ if (err)
+ return err;
+
+ spin_lock(&iommu_device_lock);
+ list_add_tail(&iommu->list, &iommu_device_list);
+ spin_unlock(&iommu_device_lock);
+
+ bus->iommu_ops = ops;
+ err = bus_iommu_probe(bus);
+ if (err) {
+ iommu_device_unregister_bus(iommu, bus, nb);
+ return err;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_device_register_bus);
+#endif
+
static struct dev_iommu *dev_iommu_get(struct device *dev)
{
struct dev_iommu *param = dev->iommu;
@@ -2114,6 +2158,32 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_attach_group);
+/**
+ * iommu_group_replace_domain - replace the domain that a group is attached to
+ * @new_domain: new IOMMU domain to replace with
+ * @group: IOMMU group that will be attached to the new domain
+ *
+ * This API allows the group to switch domains without being forced to go to
+ * the blocking domain in-between.
+ *
+ * If the currently attached domain is a core domain (e.g. a default_domain),
+ * it will act just like the iommu_attach_group().
+ */
+int iommu_group_replace_domain(struct iommu_group *group,
+ struct iommu_domain *new_domain)
+{
+ int ret;
+
+ if (!new_domain)
+ return -EINVAL;
+
+ mutex_lock(&group->mutex);
+ ret = __iommu_group_set_domain(group, new_domain);
+ mutex_unlock(&group->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, IOMMUFD_INTERNAL);
+
static int __iommu_device_set_domain(struct iommu_group *group,
struct device *dev,
struct iommu_domain *new_domain,
@@ -2642,6 +2712,14 @@ int iommu_set_pgtable_quirks(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
+/**
+ * iommu_get_resv_regions - get reserved regions
+ * @dev: device for which to get reserved regions
+ * @list: reserved region list for device
+ *
+ * This returns a list of reserved IOVA regions specific to this device.
+ * A domain user should not map IOVA in these ranges.
+ */
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev_iommu_ops(dev);
@@ -2649,9 +2727,10 @@ void iommu_get_resv_regions(struct device *dev, struct list_head *list)
if (ops->get_resv_regions)
ops->get_resv_regions(dev, list);
}
+EXPORT_SYMBOL_GPL(iommu_get_resv_regions);
/**
- * iommu_put_resv_regions - release resered regions
+ * iommu_put_resv_regions - release reserved regions
* @dev: device for which to free reserved regions
* @list: reserved region list for device
*
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 1d49aad48c67..ce78c3671539 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -4,6 +4,8 @@
#include <linux/iommufd.h>
#include <linux/slab.h>
#include <linux/iommu.h>
+#include <uapi/linux/iommufd.h>
+#include "../iommu-priv.h"
#include "io_pagetable.h"
#include "iommufd_private.h"
@@ -15,13 +17,127 @@ MODULE_PARM_DESC(
"Allow IOMMUFD to bind to devices even if the platform cannot isolate "
"the MSI interrupt window. Enabling this is a security weakness.");
+static void iommufd_group_release(struct kref *kref)
+{
+ struct iommufd_group *igroup =
+ container_of(kref, struct iommufd_group, ref);
+
+ WARN_ON(igroup->hwpt || !list_empty(&igroup->device_list));
+
+ xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group), igroup,
+ NULL, GFP_KERNEL);
+ iommu_group_put(igroup->group);
+ mutex_destroy(&igroup->lock);
+ kfree(igroup);
+}
+
+static void iommufd_put_group(struct iommufd_group *group)
+{
+ kref_put(&group->ref, iommufd_group_release);
+}
+
+static bool iommufd_group_try_get(struct iommufd_group *igroup,
+ struct iommu_group *group)
+{
+ if (!igroup)
+ return false;
+ /*
+ * group ID's cannot be re-used until the group is put back which does
+ * not happen if we could get an igroup pointer under the xa_lock.
+ */
+ if (WARN_ON(igroup->group != group))
+ return false;
+ return kref_get_unless_zero(&igroup->ref);
+}
+
+/*
+ * iommufd needs to store some more data for each iommu_group, we keep a
+ * parallel xarray indexed by iommu_group id to hold this instead of putting it
+ * in the core structure. To keep things simple the iommufd_group memory is
+ * unique within the iommufd_ctx. This makes it easy to check there are no
+ * memory leaks.
+ */
+static struct iommufd_group *iommufd_get_group(struct iommufd_ctx *ictx,
+ struct device *dev)
+{
+ struct iommufd_group *new_igroup;
+ struct iommufd_group *cur_igroup;
+ struct iommufd_group *igroup;
+ struct iommu_group *group;
+ unsigned int id;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return ERR_PTR(-ENODEV);
+
+ id = iommu_group_id(group);
+
+ xa_lock(&ictx->groups);
+ igroup = xa_load(&ictx->groups, id);
+ if (iommufd_group_try_get(igroup, group)) {
+ xa_unlock(&ictx->groups);
+ iommu_group_put(group);
+ return igroup;
+ }
+ xa_unlock(&ictx->groups);
+
+ new_igroup = kzalloc(sizeof(*new_igroup), GFP_KERNEL);
+ if (!new_igroup) {
+ iommu_group_put(group);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ kref_init(&new_igroup->ref);
+ mutex_init(&new_igroup->lock);
+ INIT_LIST_HEAD(&new_igroup->device_list);
+ new_igroup->sw_msi_start = PHYS_ADDR_MAX;
+ /* group reference moves into new_igroup */
+ new_igroup->group = group;
+
+ /*
+ * The ictx is not additionally refcounted here becase all objects using
+ * an igroup must put it before their destroy completes.
+ */
+ new_igroup->ictx = ictx;
+
+ /*
+ * We dropped the lock so igroup is invalid. NULL is a safe and likely
+ * value to assume for the xa_cmpxchg algorithm.
+ */
+ cur_igroup = NULL;
+ xa_lock(&ictx->groups);
+ while (true) {
+ igroup = __xa_cmpxchg(&ictx->groups, id, cur_igroup, new_igroup,
+ GFP_KERNEL);
+ if (xa_is_err(igroup)) {
+ xa_unlock(&ictx->groups);
+ iommufd_put_group(new_igroup);
+ return ERR_PTR(xa_err(igroup));
+ }
+
+ /* new_group was successfully installed */
+ if (cur_igroup == igroup) {
+ xa_unlock(&ictx->groups);
+ return new_igroup;
+ }
+
+ /* Check again if the current group is any good */
+ if (iommufd_group_try_get(igroup, group)) {
+ xa_unlock(&ictx->groups);
+ iommufd_put_group(new_igroup);
+ return igroup;
+ }
+ cur_igroup = igroup;
+ }
+}
+
void iommufd_device_destroy(struct iommufd_object *obj)
{
struct iommufd_device *idev =
container_of(obj, struct iommufd_device, obj);
iommu_device_release_dma_owner(idev->dev);
- iommu_group_put(idev->group);
+ iommufd_put_group(idev->igroup);
if (!iommufd_selftest_is_mock_dev(idev->dev))
iommufd_ctx_put(idev->ictx);
}
@@ -46,7 +162,7 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id)
{
struct iommufd_device *idev;
- struct iommu_group *group;
+ struct iommufd_group *igroup;
int rc;
/*
@@ -56,9 +172,29 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY))
return ERR_PTR(-EINVAL);
- group = iommu_group_get(dev);
- if (!group)
- return ERR_PTR(-ENODEV);
+ igroup = iommufd_get_group(ictx, dev);
+ if (IS_ERR(igroup))
+ return ERR_CAST(igroup);
+
+ /*
+ * For historical compat with VFIO the insecure interrupt path is
+ * allowed if the module parameter is set. Secure/Isolated means that a
+ * MemWr operation from the device (eg a simple DMA) cannot trigger an
+ * interrupt outside this iommufd context.
+ */
+ if (!iommufd_selftest_is_mock_dev(dev) &&
+ !iommu_group_has_isolated_msi(igroup->group)) {
+ if (!allow_unsafe_interrupts) {
+ rc = -EPERM;
+ goto out_group_put;
+ }
+
+ dev_warn(
+ dev,
+ "MSI interrupts are not secure, they cannot be isolated by the platform. "
+ "Check that platform features like interrupt remapping are enabled. "
+ "Use the \"allow_unsafe_interrupts\" module parameter to override\n");
+ }
rc = iommu_device_claim_dma_owner(dev, ictx);
if (rc)
@@ -77,8 +213,8 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
/* The calling driver is a user until iommufd_device_unbind() */
refcount_inc(&idev->obj.users);
- /* group refcount moves into iommufd_device */
- idev->group = group;
+ /* igroup refcount moves into iommufd_device */
+ idev->igroup = igroup;
/*
* If the caller fails after this success it must call
@@ -93,7 +229,7 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
out_release_owner:
iommu_device_release_dma_owner(dev);
out_group_put:
- iommu_group_put(group);
+ iommufd_put_group(igroup);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, IOMMUFD);
@@ -118,7 +254,8 @@ bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group)
xa_lock(&ictx->objects);
xa_for_each(&ictx->objects, index, obj) {
if (obj->type == IOMMUFD_OBJ_DEVICE &&
- container_of(obj, struct iommufd_device, obj)->group == group) {
+ container_of(obj, struct iommufd_device, obj)
+ ->igroup->group == group) {
xa_unlock(&ictx->objects);
return true;
}
@@ -155,10 +292,10 @@ u32 iommufd_device_to_id(struct iommufd_device *idev)
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_to_id, IOMMUFD);
-static int iommufd_device_setup_msi(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- phys_addr_t sw_msi_start)
+static int iommufd_group_setup_msi(struct iommufd_group *igroup,
+ struct iommufd_hw_pagetable *hwpt)
{
+ phys_addr_t sw_msi_start = igroup->sw_msi_start;
int rc;
/*
@@ -185,128 +322,192 @@ static int iommufd_device_setup_msi(struct iommufd_device *idev,
*/
hwpt->msi_cookie = true;
}
-
- /*
- * For historical compat with VFIO the insecure interrupt path is
- * allowed if the module parameter is set. Insecure means that a MemWr
- * operation from the device (eg a simple DMA) cannot trigger an
- * interrupt outside this iommufd context.
- */
- if (!iommufd_selftest_is_mock_dev(idev->dev) &&
- !iommu_group_has_isolated_msi(idev->group)) {
- if (!allow_unsafe_interrupts)
- return -EPERM;
-
- dev_warn(
- idev->dev,
- "MSI interrupts are not secure, they cannot be isolated by the platform. "
- "Check that platform features like interrupt remapping are enabled. "
- "Use the \"allow_unsafe_interrupts\" module parameter to override\n");
- }
return 0;
}
-static bool iommufd_hw_pagetable_has_group(struct iommufd_hw_pagetable *hwpt,
- struct iommu_group *group)
-{
- struct iommufd_device *cur_dev;
-
- lockdep_assert_held(&hwpt->devices_lock);
-
- list_for_each_entry(cur_dev, &hwpt->devices, devices_item)
- if (cur_dev->group == group)
- return true;
- return false;
-}
-
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
- phys_addr_t sw_msi_start = PHYS_ADDR_MAX;
int rc;
- lockdep_assert_held(&hwpt->devices_lock);
-
- if (WARN_ON(idev->hwpt))
- return -EINVAL;
+ mutex_lock(&idev->igroup->lock);
- /*
- * Try to upgrade the domain we have, it is an iommu driver bug to
- * report IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail
- * enforce_cache_coherency when there are no devices attached to the
- * domain.
- */
- if (idev->enforce_cache_coherency && !hwpt->enforce_cache_coherency) {
- if (hwpt->domain->ops->enforce_cache_coherency)
- hwpt->enforce_cache_coherency =
- hwpt->domain->ops->enforce_cache_coherency(
- hwpt->domain);
- if (!hwpt->enforce_cache_coherency) {
- WARN_ON(list_empty(&hwpt->devices));
- return -EINVAL;
- }
+ if (idev->igroup->hwpt != NULL && idev->igroup->hwpt != hwpt) {
+ rc = -EINVAL;
+ goto err_unlock;
}
- rc = iopt_table_enforce_group_resv_regions(&hwpt->ioas->iopt, idev->dev,
- idev->group, &sw_msi_start);
- if (rc)
- return rc;
+ /* Try to upgrade the domain we have */
+ if (idev->enforce_cache_coherency) {
+ rc = iommufd_hw_pagetable_enforce_cc(hwpt);
+ if (rc)
+ goto err_unlock;
+ }
- rc = iommufd_device_setup_msi(idev, hwpt, sw_msi_start);
+ rc = iopt_table_enforce_dev_resv_regions(&hwpt->ioas->iopt, idev->dev,
+ &idev->igroup->sw_msi_start);
if (rc)
- goto err_unresv;
+ goto err_unlock;
/*
- * FIXME: Hack around missing a device-centric iommu api, only attach to
- * the group once for the first device that is in the group.
+ * Only attach to the group once for the first device that is in the
+ * group. All the other devices will follow this attachment. The user
+ * should attach every device individually to the hwpt as the per-device
+ * reserved regions are only updated during individual device
+ * attachment.
*/
- if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) {
- rc = iommu_attach_group(hwpt->domain, idev->group);
+ if (list_empty(&idev->igroup->device_list)) {
+ rc = iommufd_group_setup_msi(idev->igroup, hwpt);
if (rc)
goto err_unresv;
+
+ rc = iommu_attach_group(hwpt->domain, idev->igroup->group);
+ if (rc)
+ goto err_unresv;
+ idev->igroup->hwpt = hwpt;
}
+ refcount_inc(&hwpt->obj.users);
+ list_add_tail(&idev->group_item, &idev->igroup->device_list);
+ mutex_unlock(&idev->igroup->lock);
return 0;
err_unresv:
iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
+err_unlock:
+ mutex_unlock(&idev->igroup->lock);
return rc;
}
-void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
+struct iommufd_hw_pagetable *
+iommufd_hw_pagetable_detach(struct iommufd_device *idev)
{
- if (!iommufd_hw_pagetable_has_group(hwpt, idev->group))
- iommu_detach_group(hwpt->domain, idev->group);
+ struct iommufd_hw_pagetable *hwpt = idev->igroup->hwpt;
+
+ mutex_lock(&idev->igroup->lock);
+ list_del(&idev->group_item);
+ if (list_empty(&idev->igroup->device_list)) {
+ iommu_detach_group(hwpt->domain, idev->igroup->group);
+ idev->igroup->hwpt = NULL;
+ }
iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
+ mutex_unlock(&idev->igroup->lock);
+
+ /* Caller must destroy hwpt */
+ return hwpt;
}
-static int iommufd_device_do_attach(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt)
+static struct iommufd_hw_pagetable *
+iommufd_device_do_attach(struct iommufd_device *idev,
+ struct iommufd_hw_pagetable *hwpt)
{
int rc;
- mutex_lock(&hwpt->devices_lock);
rc = iommufd_hw_pagetable_attach(hwpt, idev);
if (rc)
- goto out_unlock;
+ return ERR_PTR(rc);
+ return NULL;
+}
- idev->hwpt = hwpt;
- refcount_inc(&hwpt->obj.users);
- list_add(&idev->devices_item, &hwpt->devices);
-out_unlock:
- mutex_unlock(&hwpt->devices_lock);
- return rc;
+static struct iommufd_hw_pagetable *
+iommufd_device_do_replace(struct iommufd_device *idev,
+ struct iommufd_hw_pagetable *hwpt)
+{
+ struct iommufd_group *igroup = idev->igroup;
+ struct iommufd_hw_pagetable *old_hwpt;
+ unsigned int num_devices = 0;
+ struct iommufd_device *cur;
+ int rc;
+
+ mutex_lock(&idev->igroup->lock);
+
+ if (igroup->hwpt == NULL) {
+ rc = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (hwpt == igroup->hwpt) {
+ mutex_unlock(&idev->igroup->lock);
+ return NULL;
+ }
+
+ /* Try to upgrade the domain we have */
+ list_for_each_entry(cur, &igroup->device_list, group_item) {
+ num_devices++;
+ if (cur->enforce_cache_coherency) {
+ rc = iommufd_hw_pagetable_enforce_cc(hwpt);
+ if (rc)
+ goto err_unlock;
+ }
+ }
+
+ old_hwpt = igroup->hwpt;
+ if (hwpt->ioas != old_hwpt->ioas) {
+ list_for_each_entry(cur, &igroup->device_list, group_item) {
+ rc = iopt_table_enforce_dev_resv_regions(
+ &hwpt->ioas->iopt, cur->dev, NULL);
+ if (rc)
+ goto err_unresv;
+ }
+ }
+
+ rc = iommufd_group_setup_msi(idev->igroup, hwpt);
+ if (rc)
+ goto err_unresv;
+
+ rc = iommu_group_replace_domain(igroup->group, hwpt->domain);
+ if (rc)
+ goto err_unresv;
+
+ if (hwpt->ioas != old_hwpt->ioas) {
+ list_for_each_entry(cur, &igroup->device_list, group_item)
+ iopt_remove_reserved_iova(&old_hwpt->ioas->iopt,
+ cur->dev);
+ }
+
+ igroup->hwpt = hwpt;
+
+ /*
+ * Move the refcounts held by the device_list to the new hwpt. Retain a
+ * refcount for this thread as the caller will free it.
+ */
+ refcount_add(num_devices, &hwpt->obj.users);
+ if (num_devices > 1)
+ WARN_ON(refcount_sub_and_test(num_devices - 1,
+ &old_hwpt->obj.users));
+ mutex_unlock(&idev->igroup->lock);
+
+ /* Caller must destroy old_hwpt */
+ return old_hwpt;
+err_unresv:
+ list_for_each_entry(cur, &igroup->device_list, group_item)
+ iopt_remove_reserved_iova(&hwpt->ioas->iopt, cur->dev);
+err_unlock:
+ mutex_unlock(&idev->igroup->lock);
+ return ERR_PTR(rc);
}
+typedef struct iommufd_hw_pagetable *(*attach_fn)(
+ struct iommufd_device *idev, struct iommufd_hw_pagetable *hwpt);
+
/*
* When automatically managing the domains we search for a compatible domain in
* the iopt and if one is found use it, otherwise create a new domain.
* Automatic domain selection will never pick a manually created domain.
*/
-static int iommufd_device_auto_get_domain(struct iommufd_device *idev,
- struct iommufd_ioas *ioas)
+static struct iommufd_hw_pagetable *
+iommufd_device_auto_get_domain(struct iommufd_device *idev,
+ struct iommufd_ioas *ioas, u32 *pt_id,
+ attach_fn do_attach)
{
+ /*
+ * iommufd_hw_pagetable_attach() is called by
+ * iommufd_hw_pagetable_alloc() in immediate attachment mode, same as
+ * iommufd_device_do_attach(). So if we are in this mode then we prefer
+ * to use the immediate_attach path as it supports drivers that can't
+ * directly allocate a domain.
+ */
+ bool immediate_attach = do_attach == iommufd_device_do_attach;
+ struct iommufd_hw_pagetable *destroy_hwpt;
struct iommufd_hw_pagetable *hwpt;
- int rc;
/*
* There is no differentiation when domains are allocated, so any domain
@@ -320,50 +521,58 @@ static int iommufd_device_auto_get_domain(struct iommufd_device *idev,
if (!iommufd_lock_obj(&hwpt->obj))
continue;
- rc = iommufd_device_do_attach(idev, hwpt);
+ destroy_hwpt = (*do_attach)(idev, hwpt);
+ if (IS_ERR(destroy_hwpt)) {
+ iommufd_put_object(&hwpt->obj);
+ /*
+ * -EINVAL means the domain is incompatible with the
+ * device. Other error codes should propagate to
+ * userspace as failure. Success means the domain is
+ * attached.
+ */
+ if (PTR_ERR(destroy_hwpt) == -EINVAL)
+ continue;
+ goto out_unlock;
+ }
+ *pt_id = hwpt->obj.id;
iommufd_put_object(&hwpt->obj);
-
- /*
- * -EINVAL means the domain is incompatible with the device.
- * Other error codes should propagate to userspace as failure.
- * Success means the domain is attached.
- */
- if (rc == -EINVAL)
- continue;
goto out_unlock;
}
- hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, true);
+ hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev,
+ immediate_attach);
if (IS_ERR(hwpt)) {
- rc = PTR_ERR(hwpt);
+ destroy_hwpt = ERR_CAST(hwpt);
goto out_unlock;
}
+
+ if (!immediate_attach) {
+ destroy_hwpt = (*do_attach)(idev, hwpt);
+ if (IS_ERR(destroy_hwpt))
+ goto out_abort;
+ } else {
+ destroy_hwpt = NULL;
+ }
+
hwpt->auto_domain = true;
+ *pt_id = hwpt->obj.id;
- mutex_unlock(&ioas->mutex);
iommufd_object_finalize(idev->ictx, &hwpt->obj);
- return 0;
+ mutex_unlock(&ioas->mutex);
+ return destroy_hwpt;
+
+out_abort:
+ iommufd_object_abort_and_destroy(idev->ictx, &hwpt->obj);
out_unlock:
mutex_unlock(&ioas->mutex);
- return rc;
+ return destroy_hwpt;
}
-/**
- * iommufd_device_attach - Connect a device from an iommu_domain
- * @idev: device to attach
- * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
- * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
- *
- * This connects the device to an iommu_domain, either automatically or manually
- * selected. Once this completes the device could do DMA.
- *
- * The caller should return the resulting pt_id back to userspace.
- * This function is undone by calling iommufd_device_detach().
- */
-int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
+static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id,
+ attach_fn do_attach)
{
+ struct iommufd_hw_pagetable *destroy_hwpt;
struct iommufd_object *pt_obj;
- int rc;
pt_obj = iommufd_get_object(idev->ictx, *pt_id, IOMMUFD_OBJ_ANY);
if (IS_ERR(pt_obj))
@@ -374,8 +583,8 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
struct iommufd_hw_pagetable *hwpt =
container_of(pt_obj, struct iommufd_hw_pagetable, obj);
- rc = iommufd_device_do_attach(idev, hwpt);
- if (rc)
+ destroy_hwpt = (*do_attach)(idev, hwpt);
+ if (IS_ERR(destroy_hwpt))
goto out_put_pt_obj;
break;
}
@@ -383,27 +592,80 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
struct iommufd_ioas *ioas =
container_of(pt_obj, struct iommufd_ioas, obj);
- rc = iommufd_device_auto_get_domain(idev, ioas);
- if (rc)
+ destroy_hwpt = iommufd_device_auto_get_domain(idev, ioas, pt_id,
+ do_attach);
+ if (IS_ERR(destroy_hwpt))
goto out_put_pt_obj;
break;
}
default:
- rc = -EINVAL;
+ destroy_hwpt = ERR_PTR(-EINVAL);
goto out_put_pt_obj;
}
+ iommufd_put_object(pt_obj);
- refcount_inc(&idev->obj.users);
- *pt_id = idev->hwpt->obj.id;
- rc = 0;
+ /* This destruction has to be after we unlock everything */
+ if (destroy_hwpt)
+ iommufd_hw_pagetable_put(idev->ictx, destroy_hwpt);
+ return 0;
out_put_pt_obj:
iommufd_put_object(pt_obj);
- return rc;
+ return PTR_ERR(destroy_hwpt);
+}
+
+/**
+ * iommufd_device_attach - Connect a device to an iommu_domain
+ * @idev: device to attach
+ * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
+ * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
+ *
+ * This connects the device to an iommu_domain, either automatically or manually
+ * selected. Once this completes the device could do DMA.
+ *
+ * The caller should return the resulting pt_id back to userspace.
+ * This function is undone by calling iommufd_device_detach().
+ */
+int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
+{
+ int rc;
+
+ rc = iommufd_device_change_pt(idev, pt_id, &iommufd_device_do_attach);
+ if (rc)
+ return rc;
+
+ /*
+ * Pairs with iommufd_device_detach() - catches caller bugs attempting
+ * to destroy a device with an attachment.
+ */
+ refcount_inc(&idev->obj.users);
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD);
/**
+ * iommufd_device_replace - Change the device's iommu_domain
+ * @idev: device to change
+ * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
+ * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
+ *
+ * This is the same as::
+ *
+ * iommufd_device_detach();
+ * iommufd_device_attach();
+ *
+ * If it fails then no change is made to the attachment. The iommu driver may
+ * implement this so there is no disruption in translation. This can only be
+ * called if iommufd_device_attach() has already succeeded.
+ */
+int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id)
+{
+ return iommufd_device_change_pt(idev, pt_id,
+ &iommufd_device_do_replace);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_device_replace, IOMMUFD);
+
+/**
* iommufd_device_detach - Disconnect a device to an iommu_domain
* @idev: device to detach
*
@@ -412,33 +674,87 @@ EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD);
*/
void iommufd_device_detach(struct iommufd_device *idev)
{
- struct iommufd_hw_pagetable *hwpt = idev->hwpt;
-
- mutex_lock(&hwpt->devices_lock);
- list_del(&idev->devices_item);
- idev->hwpt = NULL;
- iommufd_hw_pagetable_detach(hwpt, idev);
- mutex_unlock(&hwpt->devices_lock);
-
- if (hwpt->auto_domain)
- iommufd_object_deref_user(idev->ictx, &hwpt->obj);
- else
- refcount_dec(&hwpt->obj.users);
+ struct iommufd_hw_pagetable *hwpt;
+ hwpt = iommufd_hw_pagetable_detach(idev);
+ iommufd_hw_pagetable_put(idev->ictx, hwpt);
refcount_dec(&idev->obj.users);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD);
+/*
+ * On success, it will refcount_inc() at a valid new_ioas and refcount_dec() at
+ * a valid cur_ioas (access->ioas). A caller passing in a valid new_ioas should
+ * call iommufd_put_object() if it does an iommufd_get_object() for a new_ioas.
+ */
+static int iommufd_access_change_ioas(struct iommufd_access *access,
+ struct iommufd_ioas *new_ioas)
+{
+ u32 iopt_access_list_id = access->iopt_access_list_id;
+ struct iommufd_ioas *cur_ioas = access->ioas;
+ int rc;
+
+ lockdep_assert_held(&access->ioas_lock);
+
+ /* We are racing with a concurrent detach, bail */
+ if (cur_ioas != access->ioas_unpin)
+ return -EBUSY;
+
+ if (cur_ioas == new_ioas)
+ return 0;
+
+ /*
+ * Set ioas to NULL to block any further iommufd_access_pin_pages().
+ * iommufd_access_unpin_pages() can continue using access->ioas_unpin.
+ */
+ access->ioas = NULL;
+
+ if (new_ioas) {
+ rc = iopt_add_access(&new_ioas->iopt, access);
+ if (rc) {
+ access->ioas = cur_ioas;
+ return rc;
+ }
+ refcount_inc(&new_ioas->obj.users);
+ }
+
+ if (cur_ioas) {
+ if (access->ops->unmap) {
+ mutex_unlock(&access->ioas_lock);
+ access->ops->unmap(access->data, 0, ULONG_MAX);
+ mutex_lock(&access->ioas_lock);
+ }
+ iopt_remove_access(&cur_ioas->iopt, access, iopt_access_list_id);
+ refcount_dec(&cur_ioas->obj.users);
+ }
+
+ access->ioas = new_ioas;
+ access->ioas_unpin = new_ioas;
+
+ return 0;
+}
+
+static int iommufd_access_change_ioas_id(struct iommufd_access *access, u32 id)
+{
+ struct iommufd_ioas *ioas = iommufd_get_ioas(access->ictx, id);
+ int rc;
+