diff options
43 files changed, 1651 insertions, 1213 deletions
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt index b6bfbec3a849..020d6f226efb 100644 --- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt +++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt @@ -15,6 +15,7 @@ Required Properties: - "renesas,ipmmu-r8a7744" for the R8A7744 (RZ/G1N) IPMMU. - "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU. - "renesas,ipmmu-r8a774a1" for the R8A774A1 (RZ/G2M) IPMMU. + - "renesas,ipmmu-r8a774b1" for the R8A774B1 (RZ/G2N) IPMMU. - "renesas,ipmmu-r8a774c0" for the R8A774C0 (RZ/G2E) IPMMU. - "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU. - "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU. diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h index 7904f591a79b..eb0db20c9d4c 100644 --- a/arch/ia64/include/asm/iommu.h +++ b/arch/ia64/include/asm/iommu.h @@ -2,6 +2,8 @@ #ifndef _ASM_IA64_IOMMU_H #define _ASM_IA64_IOMMU_H 1 +#include <linux/acpi.h> + /* 10 seconds */ #define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) @@ -9,6 +11,9 @@ extern void no_iommu_init(void); #ifdef CONFIG_INTEL_IOMMU extern int force_iommu, no_iommu; extern int iommu_detected; + +static inline int __init +arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { return 0; } #else #define no_iommu (1) #define iommu_detected (0) diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index b91623d521d9..bf1ed2ddc74b 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -2,10 +2,28 @@ #ifndef _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H +#include <linux/acpi.h> + +#include <asm/e820/api.h> + extern int force_iommu, no_iommu; extern int iommu_detected; /* 10 seconds */ #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) +static inline int __init +arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) +{ + u64 start = rmrr->base_address; + u64 end = rmrr->end_address + 1; + + if (e820__mapped_all(start, end, E820_TYPE_RESERVED)) + return 0; + + pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n", + start, end - 1); + return -EINVAL; +} + #endif /* _ASM_X86_IOMMU_H */ diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index 215061c581e1..bee8729525ec 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c @@ -614,3 +614,8 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, addr, val); } + +int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable) +{ + return -ENODEV; +} diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index 91d5ad7cf58b..e1cd933ea9ae 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -62,32 +62,72 @@ static DEFINE_MUTEX(qcom_scm_lock); #define FIRST_EXT_ARG_IDX 3 #define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1) -/** - * qcom_scm_call() - Invoke a syscall in the secure world - * @dev: device - * @svc_id: service identifier - * @cmd_id: command identifier - * @desc: Descriptor structure containing arguments and return values - * - * Sends a command to the SCM and waits for the command to finish processing. - * This should *only* be called in pre-emptible context. -*/ -static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, - const struct qcom_scm_desc *desc, - struct arm_smccc_res *res) +static void __qcom_scm_call_do(const struct qcom_scm_desc *desc, + struct arm_smccc_res *res, u32 fn_id, + u64 x5, u32 type) +{ + u64 cmd; + struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 }; + + cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention, + ARM_SMCCC_OWNER_SIP, fn_id); + + quirk.state.a6 = 0; + + do { + arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0], + desc->args[1], desc->args[2], x5, + quirk.state.a6, 0, res, &quirk); + + if (res->a0 == QCOM_SCM_INTERRUPTED) + cmd = res->a0; + + } while (res->a0 == QCOM_SCM_INTERRUPTED); +} + +static void qcom_scm_call_do(const struct qcom_scm_desc *desc, + struct arm_smccc_res *res, u32 fn_id, + u64 x5, bool atomic) +{ + int retry_count = 0; + + if (atomic) { + __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL); + return; + } + + do { + mutex_lock(&qcom_scm_lock); + + __qcom_scm_call_do(desc, res, fn_id, x5, + ARM_SMCCC_STD_CALL); + + mutex_unlock(&qcom_scm_lock); + + if (res->a0 == QCOM_SCM_V2_EBUSY) { + if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) + break; + msleep(QCOM_SCM_EBUSY_WAIT_MS); + } + } while (res->a0 == QCOM_SCM_V2_EBUSY); +} + +static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, + const struct qcom_scm_desc *desc, + struct arm_smccc_res *res, bool atomic) { int arglen = desc->arginfo & 0xf; - int retry_count = 0, i; + int i; u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id); - u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX]; + u64 x5 = desc->args[FIRST_EXT_ARG_IDX]; dma_addr_t args_phys = 0; void *args_virt = NULL; size_t alloc_len; - struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6}; + gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; if (unlikely(arglen > N_REGISTER_ARGS)) { alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64); - args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); + args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); if (!args_virt) return -ENOMEM; @@ -117,46 +157,56 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, x5 = args_phys; } - do { - mutex_lock(&qcom_scm_lock); - - cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, - qcom_smccc_convention, - ARM_SMCCC_OWNER_SIP, fn_id); - - quirk.state.a6 = 0; - - do { - arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0], - desc->args[1], desc->args[2], x5, - quirk.state.a6, 0, res, &quirk); - - if (res->a0 == QCOM_SCM_INTERRUPTED) - cmd = res->a0; - - } while (res->a0 == QCOM_SCM_INTERRUPTED); - - mutex_unlock(&qcom_scm_lock); - - if (res->a0 == QCOM_SCM_V2_EBUSY) { - if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) - break; - msleep(QCOM_SCM_EBUSY_WAIT_MS); - } - } while (res->a0 == QCOM_SCM_V2_EBUSY); + qcom_scm_call_do(desc, res, fn_id, x5, atomic); if (args_virt) { dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); kfree(args_virt); } - if (res->a0 < 0) + if ((long)res->a0 < 0) return qcom_scm_remap_error(res->a0); return 0; } /** + * qcom_scm_call() - Invoke a syscall in the secure world + * @dev: device + * @svc_id: service identifier + * @cmd_id: command identifier + * @desc: Descriptor structure containing arguments and return values + * + * Sends a command to the SCM and waits for the command to finish processing. + * This should *only* be called in pre-emptible context. + */ +static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, + const struct qcom_scm_desc *desc, + struct arm_smccc_res *res) +{ + might_sleep(); + return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false); +} + +/** + * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() + * @dev: device + * @svc_id: service identifier + * @cmd_id: command identifier + * @desc: Descriptor structure containing arguments and return values + * @res: Structure containing results from SMC/HVC call + * + * Sends a command to the SCM and waits for the command to finish processing. + * This can be called in atomic context. + */ +static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id, + const struct qcom_scm_desc *desc, + struct arm_smccc_res *res) +{ + return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true); +} + +/** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus * @entry: Entry point function for the cpus * @cpus: The cpumask of cpus that will use the entry point @@ -502,3 +552,16 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, &desc, &res); } + +int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + + desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL; + desc.args[1] = en; + desc.arginfo = QCOM_SCM_ARGS(2); + + return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM, + QCOM_SCM_CONFIG_ERRATA1, &desc, &res); +} diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 4802ab170fe5..a729e05c21b8 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -345,6 +345,12 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) } EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); +int qcom_scm_qsmmu500_wait_safe_toggle(bool en) +{ + return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en); +} +EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); + int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return __qcom_scm_io_readl(__scm->dev, addr, val); diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 99506bd873c0..baee744dbcfe 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -91,10 +91,15 @@ extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare); #define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3 #define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4 +#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 +#define QCOM_SCM_CONFIG_ERRATA1 0x3 +#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2 extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, size_t *size); extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, u32 spare); +extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, + bool enable); #define QCOM_MEM_PROT_ASSIGN_ID 0x16 extern int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, size_t mem_sz, diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e3842eabcfdd..f2b7a7132ce4 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -3,6 +3,10 @@ config IOMMU_IOVA tristate +# The IOASID library may also be used by non-IOMMU_API users +config IOASID + tristate + # IOMMU_API always gets selected by whoever wants it. config IOMMU_API bool @@ -138,6 +142,7 @@ config AMD_IOMMU select PCI_PASID select IOMMU_API select IOMMU_IOVA + select IOMMU_DMA depends on X86_64 && PCI && ACPI ---help--- With this option you can enable support for AMD IOMMU hardware in diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 4f405f926e73..97814cc861ea 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -7,13 +7,14 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o +obj-$(CONFIG_IOASID) += ioasid.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o -obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o +obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index dd555078258c..a2668f94a8d6 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -20,6 +20,7 @@ #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/dma-direct.h> +#include <linux/dma-iommu.h> #include <linux/iommu-helper.h> #include <linux/iommu.h> #include <linux/delay.h> @@ -88,8 +89,6 @@ const struct iommu_ops amd_iommu_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; -static const struct dma_map_ops amd_iommu_dma_ops; - /* * general struct to manage commands send to an IOMMU */ @@ -102,21 +101,6 @@ struct kmem_cache *amd_iommu_irq_cache; static void update_domain(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain); static void detach_device(struct device *dev); -static void iova_domain_flush_tlb(struct iova_domain *iovad); - -/* - * Data container for a dma_ops specific protection domain - */ -struct dma_ops_domain { - /* generic protection domain information */ - struct protection_domain domain; - - /* IOVA RB-Tree */ - struct iova_domain iovad; -}; - -static struct iova_domain reserved_iova_ranges; -static struct lock_class_key reserved_rbtree_key; /**************************************************************************** * @@ -187,12 +171,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom) return container_of(dom, struct protection_domain, domain); } -static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain) -{ - BUG_ON(domain->flags != PD_DMA_OPS_MASK); - return container_of(domain, struct dma_ops_domain, domain); -} - static struct iommu_dev_data *alloc_dev_data(u16 devid) { struct iommu_dev_data *dev_data; @@ -226,71 +204,61 @@ static struct iommu_dev_data *search_dev_data(u16 devid) return NULL; } -static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) -{ - *(u16 *)data = alias; - return 0; -} - -static u16 get_alias(struct device *dev) +static int clone_alias(struct pci_dev *pdev, u16 alias, void *data) { - struct pci_dev *pdev = to_pci_dev(dev); - u16 devid, ivrs_alias, pci_alias; - - /* The callers make sure that get_device_id() does not fail here */ - devid = get_device_id(dev); + u16 devid = pci_dev_id(pdev); - /* For ACPI HID devices, we simply return the devid as such */ - if (!dev_is_pci(dev)) - return devid; + if (devid == alias) + return 0; - ivrs_alias = amd_iommu_alias_table[devid]; + amd_iommu_rlookup_table[alias] = + amd_iommu_rlookup_table[devid]; + memcpy(amd_iommu_dev_table[alias].data, + amd_iommu_dev_table[devid].data, + sizeof(amd_iommu_dev_table[alias].data)); - pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); + return 0; +} - if (ivrs_alias == pci_alias) - return ivrs_alias; +static void clone_aliases(struct pci_dev *pdev) +{ + if (!pdev) + return; /* - * DMA alias showdown - * - * The IVRS is fairly reliable in telling us about aliases, but it - * can't know about every screwy device. If we don't have an IVRS - * reported alias, use the PCI reported alias. In that case we may - * still need to initialize the rlookup and dev_table entries if the - * alias is to a non-existent device. + * The IVRS alias stored in the alias table may not be + * part of the PCI DMA aliases if it's bus differs + * from the original device. */ - if (ivrs_alias == devid) { - if (!amd_iommu_rlookup_table[pci_alias]) { - amd_iommu_rlookup_table[pci_alias] = - amd_iommu_rlookup_table[devid]; - memcpy(amd_iommu_dev_table[pci_alias].data, - amd_iommu_dev_table[devid].data, - sizeof(amd_iommu_dev_table[pci_alias].data)); - } + clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL); - return pci_alias; - } + pci_for_each_dma_alias(pdev, clone_alias, NULL); +} - pci_info(pdev, "Using IVRS reported alias %02x:%02x.%d " - "for device [%04x:%04x], kernel reported alias " - "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias), - PCI_FUNC(ivrs_alias), pdev->vendor, pdev->device, - PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias), - PCI_FUNC(pci_alias)); +static struct pci_dev *setup_aliases(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + u16 ivrs_alias; + + /* For ACPI HID devices, there are no aliases */ + if (!dev_is_pci(dev)) + return NULL; /* - * If we don't have a PCI DMA alias and the IVRS alias is on the same - * bus, then the IVRS table may know about a quirk that we don't. + * Add the IVRS alias to the pci aliases if it is on the same + * bus. The IVRS table may know about a quirk that we don't. */ - if (pci_alias == devid && + ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)]; + if (ivrs_alias != pci_dev_id(pdev) && PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { pci_add_dma_alias(pdev, ivrs_alias & 0xff); pci_info(pdev, "Added PCI DMA alias %02x.%d\n", PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias)); } - return ivrs_alias; + clone_aliases(pdev); + + return pdev; } static struct iommu_dev_data *find_dev_data(u16 devid) @@ -428,7 +396,7 @@ static int iommu_init_device(struct device *dev) if (!dev_data) return -ENOMEM; - dev_data->alias = get_alias(dev); + dev_data->pdev = setup_aliases(dev); /* * By default we use passthrough mode for IOMMUv2 capable device. @@ -453,20 +421,16 @@ static int iommu_init_device(struct device *dev) static void iommu_ignore_device(struct device *dev) { - u16 alias; int devid; devid = get_device_id(dev); if (devid < 0) return; - alias = get_alias(dev); - + amd_iommu_rlookup_table[devid] = NULL; memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); - memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); - amd_iommu_rlookup_table[devid] = NULL; - amd_iommu_rlookup_table[alias] = NULL; + setup_aliases(dev); } static void iommu_uninit_device(struct device *dev) @@ -640,8 +604,7 @@ retry: pasid, address, flags); break; case EVENT_TYPE_INV_PPR_REQ: - pasid = ((event[0] >> 16) & 0xFFFF) - | ((event[1] << 6) & 0xF0000); + pasid = PPR_PASID(*((u64 *)__evt)); tag = event[1] & 0x03FF; dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), @@ -876,17 +839,18 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu, struct iommu_cmd *cmd) { u8 *target; - - target = iommu->cmd_buf + iommu->cmd_buf_tail; - - iommu->cmd_buf_tail += sizeof(*cmd); - iommu->cmd_buf_tail %= CMD_BUFFER_SIZE; + u32 tail; /* Copy command to buffer */ + tail = iommu->cmd_buf_tail; + target = iommu->cmd_buf + tail; memcpy(target, cmd, sizeof(*cmd)); + tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; + iommu->cmd_buf_tail = tail; + /* Tell the IOMMU about it */ - writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); } static void build_completion_wait(struct iommu_cmd *cmd, u64 address) @@ -1236,6 +1200,13 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data, return iommu_queue_command(iommu, &cmd); } +static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data) +{ + struct amd_iommu *iommu = data; + + return iommu_flush_dte(iommu, alias); +} + /* * Command send function for invalidating a device table entry */ @@ -1246,14 +1217,22 @@ static int device_flush_dte(struct iommu_dev_data *dev_data) int ret; iommu = amd_iommu_rlookup_table[dev_data->devid]; - alias = dev_data->alias; - ret = iommu_flush_dte(iommu, dev_data->devid); - if (!ret && alias != dev_data->devid) - ret = iommu_flush_dte(iommu, alias); + if (dev_data->pdev) + ret = pci_for_each_dma_alias(dev_data->pdev, + device_flush_dte_alias, iommu); + else + ret = iommu_flush_dte(iommu, dev_data->devid); if (ret) return ret; + alias = amd_iommu_alias_table[dev_data->devid]; + if (alias != dev_data->devid) { + ret = iommu_flush_dte(iommu, alias); + if (ret) + return ret; + } + if (dev_data->ats.enabled) ret = device_flush_iotlb(dev_data, 0, ~0UL); @@ -1302,12 +1281,6 @@ static void domain_flush_pages(struct protection_domain *domain, __domain_flush_pages(domain, address, size, 0); } -/* Flush the whole IO/TLB for a given protection domain */ -static void domain_flush_tlb(struct protection_domain *domain) -{ - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); -} - /* Flush the whole IO/TLB for a given protection domain - including PDE */ static void domain_flush_tlb_pde(struct protection_domain *domain) { @@ -1755,43 +1728,6 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, /**************************************************************************** * - * The next functions belong to the address allocator for the dma_ops - * interface functions. - * - ****************************************************************************/ - - -static unsigned long dma_ops_alloc_iova(struct device *dev, - struct dma_ops_domain *dma_dom, - unsigned int pages, u64 dma_mask) -{ - unsigned long pfn = 0; - - pages = __roundup_pow_of_two(pages); - - if (dma_mask > DMA_BIT_MASK(32)) - pfn = alloc_iova_fast(&dma_dom->iovad, pages, - IOVA_PFN(DMA_BIT_MASK(32)), false); - - if (!pfn) - pfn = alloc_iova_fast(&dma_dom->iovad, pages, - IOVA_PFN(dma_mask), true); - - return (pfn << PAGE_SHIFT); -} - -static void dma_ops_free_iova(struct dma_ops_domain *dma_dom, - unsigned long address, - unsigned int pages) -{ - pages = __roundup_pow_of_two(pages); - address >>= PAGE_SHIFT; - - free_iova_fast(&dma_dom->iovad, address, pages); -} - -/**************************************************************************** - * * The next functions belong to the domain allocation. A domain is * allocated for every IOMMU as the default domain. If device isolation * is enabled, every device get its own domain. The most important thing @@ -1866,42 +1802,23 @@ static void free_gcr3_table(struct protection_domain *domain) free_page((unsigned long)domain->gcr3_tbl); } -static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom) -{ - unsigned long flags; - - spin_lock_irqsave(&dom->domain.lock, flags); - domain_flush_tlb(&dom->domain); - domain_flush_complete(&dom->domain); - spin_unlock_irqrestore(&dom->domain.lock, flags); -} - -static void iova_domain_flush_tlb(struct iova_domain *iovad) -{ - struct dma_ops_domain *dom; - - dom = container_of(iovad, struct dma_ops_domain, iovad); - - dma_ops_domain_flush_tlb(dom); -} - /* * Free a domain, only used if something went wrong in the * allocation path and we need to free an already allocated page table */ -static void dma_ops_domain_free(struct dma_ops_domain *dom) +static void dma_ops_domain_free(struct protection_domain *domain) { - if (!dom) + if (!domain) return; - put_iova_domain(&dom->iovad); + iommu_put_dma_cookie(&domain->domain); - free_pagetable(&dom->domain); + free_pagetable(domain); - if (dom->domain.id) - domain_id_free(dom->domain.id); + if (domain->id) + domain_id_free(domain->id); - kfree(dom); + kfree(domain); } /* @@ -1909,35 +1826,30 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) * It also initializes the page table and the address allocator data * structures required for the dma_ops interface */ -static struct dma_ops_domain *dma_ops_domain_alloc(void) +static struct protection_domain *dma_ops_domain_alloc(void) { - struct dma_ops_domain *dma_dom; + struct protection_domain *domain; - dma_dom = kzalloc(sizeof(struct dma_ops_ |
