From d2f85a263883b679f87ed8f911746105658e9c47 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 28 Mar 2024 05:29:58 -0700 Subject: iommu: Pass domain to remove_dev_pasid() op Existing remove_dev_pasid() callbacks of the underlying iommu drivers get the attached domain from the group->pasid_array. However, the domain stored in group->pasid_array is not always correct in all scenarios. A wrong domain may result in failure in remove_dev_pasid() callback. To avoid such problems, it is more reliable to pass the domain to the remove_dev_pasid() op. Suggested-by: Jason Gunthorpe Signed-off-by: Yi Liu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240328122958.83332-3-yi.l.liu@intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 50eb9aed47cc..45c75a8a0ef5 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4587,19 +4587,15 @@ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain, return 0; } -static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) +static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain) { struct device_domain_info *info = dev_iommu_priv_get(dev); + struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct dev_pasid_info *curr, *dev_pasid = NULL; struct intel_iommu *iommu = info->iommu; - struct dmar_domain *dmar_domain; - struct iommu_domain *domain; unsigned long flags; - domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0); - if (WARN_ON_ONCE(!domain)) - goto out_tear_down; - /* * The SVA implementation needs to handle its own stuffs like the mm * notification. Before consolidating that code into iommu core, let @@ -4610,7 +4606,6 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) goto out_tear_down; } - dmar_domain = to_dmar_domain(domain); spin_lock_irqsave(&dmar_domain->lock, flags); list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { if (curr->dev == dev && curr->pasid == pasid) { -- cgit v1.2.3 From 06c375053cefc3a2f383d200596abe5ab3fb35f9 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Sat, 13 Apr 2024 00:25:12 +0000 Subject: iommu/vt-d: add wrapper functions for page allocations In order to improve observability and accountability of IOMMU layer, we must account the number of pages that are allocated by functions that are calling directly into buddy allocator. This is achieved by first wrapping the allocation related functions into a separate inline functions in new file: drivers/iommu/iommu-pages.h Convert all page allocation calls under iommu/intel to use these new functions. Signed-off-by: Pasha Tatashin Acked-by: David Rientjes Tested-by: Bagas Sanjaya Link: https://lore.kernel.org/r/20240413002522.1101315-2-pasha.tatashin@soleen.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 47 +++++++++++++++------------------------------ 1 file changed, 16 insertions(+), 31 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a7ecd90303dc..daaa7a22595e 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -27,6 +27,7 @@ #include "iommu.h" #include "../dma-iommu.h" #include "../irq_remapping.h" +#include "../iommu-pages.h" #include "pasid.h" #include "cap_audit.h" #include "perfmon.h" @@ -298,22 +299,6 @@ static int __init intel_iommu_setup(char *str) } __setup("intel_iommu=", intel_iommu_setup); -void *alloc_pgtable_page(int node, gfp_t gfp) -{ - struct page *page; - void *vaddr = NULL; - - page = alloc_pages_node(node, gfp | __GFP_ZERO, 0); - if (page) - vaddr = page_address(page); - return vaddr; -} - -void free_pgtable_page(void *vaddr) -{ - free_page((unsigned long)vaddr); -} - static int domain_type_is_si(struct dmar_domain *domain) { return domain->domain.type == IOMMU_DOMAIN_IDENTITY; @@ -545,7 +530,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, if (!alloc) return NULL; - context = alloc_pgtable_page(iommu->node, GFP_ATOMIC); + context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); if (!context) return NULL; @@ -719,17 +704,17 @@ static void free_context_table(struct intel_iommu *iommu) for (i = 0; i < ROOT_ENTRY_NR; i++) { context = iommu_context_addr(iommu, i, 0, 0); if (context) - free_pgtable_page(context); + iommu_free_page(context); if (!sm_supported(iommu)) continue; context = iommu_context_addr(iommu, i, 0x80, 0); if (context) - free_pgtable_page(context); + iommu_free_page(context); } - free_pgtable_page(iommu->root_entry); + iommu_free_page(iommu->root_entry); iommu->root_entry = NULL; } @@ -867,7 +852,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, if (!dma_pte_present(pte)) { uint64_t pteval; - tmp_page = alloc_pgtable_page(domain->nid, gfp); + tmp_page = iommu_alloc_page_node(domain->nid, gfp); if (!tmp_page) return NULL; @@ -879,7 +864,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, if (cmpxchg64(&pte->val, 0ULL, pteval)) /* Someone else set it while we were thinking; use theirs. */ - free_pgtable_page(tmp_page); + iommu_free_page(tmp_page); else domain_flush_cache(domain, pte, sizeof(*pte)); } @@ -992,7 +977,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, last_pfn < level_pfn + level_size(level) - 1)) { dma_clear_pte(pte); domain_flush_cache(domain, pte, sizeof(*pte)); - free_pgtable_page(level_pte); + iommu_free_page(level_pte); } next: pfn += level_size(level); @@ -1016,7 +1001,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { - free_pgtable_page(domain->pgd); + iommu_free_page(domain->pgd); domain->pgd = NULL; } } @@ -1118,7 +1103,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) { struct root_entry *root; - root = alloc_pgtable_page(iommu->node, GFP_ATOMIC); + root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); if (!root) { pr_err("Allocating root entry for %s failed\n", iommu->name); @@ -1841,7 +1826,7 @@ static void domain_exit(struct dmar_domain *domain) LIST_HEAD(freelist); domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); - put_pages_list(&freelist); + iommu_put_pages_list(&freelist); } if (WARN_ON(!list_empty(&domain->devices))) @@ -2497,7 +2482,7 @@ static int copy_context_table(struct intel_iommu *iommu, if (!old_ce) goto out; - new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL); + new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL); if (!new_ce) goto out_unmap; @@ -3426,7 +3411,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb, start_vpfn, mhp->nr_pages, list_empty(&freelist), 0); rcu_read_unlock(); - put_pages_list(&freelist); + iommu_put_pages_list(&freelist); } break; } @@ -3833,7 +3818,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) domain->max_addr = 0; /* always allocate the top pgd */ - domain->pgd = alloc_pgtable_page(domain->nid, GFP_ATOMIC); + domain->pgd = iommu_alloc_page_node(domain->nid, GFP_ATOMIC); if (!domain->pgd) return -ENOMEM; domain_flush_cache(domain, domain->pgd, PAGE_SIZE); @@ -3987,7 +3972,7 @@ int prepare_domain_attach_device(struct iommu_domain *domain, pte = dmar_domain->pgd; if (dma_pte_present(pte)) { dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte)); - free_pgtable_page(pte); + iommu_free_page(pte); } dmar_domain->agaw--; } @@ -4141,7 +4126,7 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain, if (dmar_domain->nested_parent) parent_domain_flush(dmar_domain, start_pfn, nrpages, list_empty(&gather->freelist)); - put_pages_list(&gather->freelist); + iommu_put_pages_list(&gather->freelist); } static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, -- cgit v1.2.3 From 9e7ee0f045395dc8aa55fbdc164c062484f4c88d Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Wed, 24 Apr 2024 15:16:28 +0800 Subject: iommu/vt-d: Use try_cmpxchg64{,_local}() in iommu.c Replace this pattern in iommu.c: cmpxchg64{,_local}(*ptr, 0, new) != 0 ... with the simpler and faster: !try_cmpxchg64{,_local}(*ptr, &tmp, new) The x86 CMPXCHG instruction returns success in the ZF flag, so this change saves a compare after the CMPXCHG. No functional change intended. Signed-off-by: Uros Bizjak Cc: David Woodhouse Cc: Lu Baolu Cc: Joerg Roedel Cc: Will Deacon Cc: Robin Murphy Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20240414162454.49584-1-ubizjak@gmail.com Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a7ecd90303dc..4a2afe89b464 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -865,7 +865,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, break; if (!dma_pte_present(pte)) { - uint64_t pteval; + uint64_t pteval, tmp; tmp_page = alloc_pgtable_page(domain->nid, gfp); @@ -877,7 +877,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, if (domain->use_first_level) pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; - if (cmpxchg64(&pte->val, 0ULL, pteval)) + tmp = 0ULL; + if (!try_cmpxchg64(&pte->val, &tmp, pteval)) /* Someone else set it while we were thinking; use theirs. */ free_pgtable_page(tmp_page); else @@ -2128,8 +2129,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, /* We don't need lock here, nobody else * touches the iova range */ - tmp = cmpxchg64_local(&pte->val, 0ULL, pteval); - if (tmp) { + tmp = 0ULL; + if (!try_cmpxchg64_local(&pte->val, &tmp, pteval)) { static int dumps = 5; pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n", iov_pfn, tmp, (unsigned long long)pteval); -- cgit v1.2.3 From 304b3bde24b58515a75fd198beb52ca57df6275f Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 24 Apr 2024 15:16:32 +0800 Subject: iommu/vt-d: Remove caching mode check before device TLB flush The Caching Mode (CM) of the Intel IOMMU indicates if the hardware implementation caches not-present or erroneous translation-structure entries except for the first-stage translation. The caching mode is irrelevant to the device TLB, therefore there is no need to check it before a device TLB invalidation operation. Remove two caching mode checks before device TLB invalidation in the driver. The removal of these checks doesn't change the driver's behavior in critical map/unmap paths. Hence, there is no functionality or performance impact, especially since commit <29b32839725f> ("iommu/vt-d: Do not use flush-queue when caching-mode is on") has already disabled flush-queue for caching mode. Therefore, caching mode will never call intel_flush_iotlb_all(). Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Reviewed-by: Yi Liu Link: https://lore.kernel.org/r/20240415013835.9527-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 4a2afe89b464..002fee5fcb80 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1502,11 +1502,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, else __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih); - /* - * In caching mode, changes of pages from non-present to present require - * flush. However, device IOTLB doesn't need to be flushed in this case. - */ - if (!cap_caching_mode(iommu->cap) || !map) + if (!map) iommu_flush_dev_iotlb(domain, addr, mask); } @@ -1580,8 +1576,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); - if (!cap_caching_mode(iommu->cap)) - iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH); + iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH); } if (dmar_domain->nested_parent) -- cgit v1.2.3 From 3b1d9e2b2d6856eabf5faa12d20c97fef657999f Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 24 Apr 2024 15:16:33 +0800 Subject: iommu/vt-d: Add cache tag assignment interface Caching tag is a combination of tags used by the hardware to cache various translations. Whenever a mapping in a domain is changed, the IOMMU driver should invalidate the caches with the caching tags. The VT-d specification describes caching tags in section 6.2.1, Tagging of Cached Translations. Add interface to assign caching tags to an IOMMU domain when attached to a RID or PASID, and unassign caching tags when a domain is detached from a RID or PASID. All caching tags are listed in the per-domain tag list and are protected by a dedicated lock. In addition to the basic IOTLB and devTLB caching tag types, NESTING_IOTLB and NESTING_DEVTLB tag types are also introduced. These tags are used for caches that store translations for DMA accesses through a nested user domain. They are affected by changes to mappings in the parent domain. Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240416080656.60968-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 002fee5fcb80..8220bb36e420 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1746,7 +1746,9 @@ static struct dmar_domain *alloc_domain(unsigned int type) domain->has_iotlb_device = false; INIT_LIST_HEAD(&domain->devices); INIT_LIST_HEAD(&domain->dev_pasids); + INIT_LIST_HEAD(&domain->cache_tags); spin_lock_init(&domain->lock); + spin_lock_init(&domain->cache_lock); xa_init(&domain->iommu_array); return domain; @@ -1758,6 +1760,9 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) unsigned long ndomains; int num, ret = -ENOSPC; + if (domain->domain.type == IOMMU_DOMAIN_SVA) + return 0; + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; @@ -1805,6 +1810,9 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) { struct iommu_domain_info *info; + if (domain->domain.type == IOMMU_DOMAIN_SVA) + return; + spin_lock(&iommu->lock); info = xa_load(&domain->iommu_array, iommu->seq_id); if (--info->refcnt == 0) { @@ -2323,6 +2331,13 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, ret = domain_attach_iommu(domain, iommu); if (ret) return ret; + + ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID); + if (ret) { + domain_detach_iommu(domain, iommu); + return ret; + } + info->domain = domain; spin_lock_irqsave(&domain->lock, flags); list_add(&info->link, &domain->devices); @@ -3811,6 +3826,7 @@ void device_block_translation(struct device *dev) list_del(&info->link); spin_unlock_irqrestore(&info->domain->lock, flags); + cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID); domain_detach_iommu(info->domain, iommu); info->domain = NULL; } @@ -4598,6 +4614,7 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0); if (WARN_ON_ONCE(!domain)) goto out_tear_down; + dmar_domain = to_dmar_domain(domain); /* * The SVA implementation needs to handle its own stuffs like the mm @@ -4606,10 +4623,10 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) */ if (domain->type == IOMMU_DOMAIN_SVA) { intel_svm_remove_dev_pasid(dev, pasid); + cache_tag_unassign_domain(dmar_domain, dev, pasid); goto out_tear_down; } - dmar_domain = to_dmar_domain(domain); spin_lock_irqsave(&dmar_domain->lock, flags); list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { if (curr->dev == dev && curr->pasid == pasid) { @@ -4621,6 +4638,7 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) WARN_ON_ONCE(!dev_pasid); spin_unlock_irqrestore(&dmar_domain->lock, flags); + cache_tag_unassign_domain(dmar_domain, dev, pasid); domain_detach_iommu(dmar_domain, iommu); intel_iommu_debugfs_remove_dev_pasid(dev_pasid); kfree(dev_pasid); @@ -4660,6 +4678,10 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, if (ret) goto out_free; + ret = cache_tag_assign_domain(dmar_domain, dev, pasid); + if (ret) + goto out_detach_iommu; + if (domain_type_is_si(dmar_domain)) ret = intel_pasid_setup_pass_through(iommu, dev, pasid); else if (dmar_domain->use_first_level) @@ -4669,7 +4691,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, ret = intel_pasid_setup_second_level(iommu, dmar_domain, dev, pasid); if (ret) - goto out_detach_iommu; + goto out_unassign_tag; dev_pasid->dev = dev; dev_pasid->pasid = pasid; @@ -4681,6 +4703,8 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, intel_iommu_debugfs_create_dev_pasid(dev_pasid); return 0; +out_unassign_tag: + cache_tag_unassign_domain(dmar_domain, dev, pasid); out_detach_iommu: domain_detach_iommu(dmar_domain, iommu); out_free: -- cgit v1.2.3 From c4d27ffaa8eb034ec438a9aedfe202ce81e15312 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 24 Apr 2024 15:16:34 +0800 Subject: iommu/vt-d: Add cache tag invalidation helpers Add several helpers to invalidate the caches after mappings in the affected domain are changed. - cache_tag_flush_range() invalidates a range of caches after mappings within this range are changed. It uses the page-selective cache invalidation methods. - cache_tag_flush_all() invalidates all caches tagged by a domain ID. It uses the domain-selective cache invalidation methods. - cache_tag_flush_range_np() invalidates a range of caches when new mappings are created in the domain and the corresponding page table entries change from non-present to present. Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240416080656.60968-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 8220bb36e420..473df7cd1672 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -54,11 +54,6 @@ __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) -/* IO virtual address start page frame number */ -#define IOVA_START_PFN (1) - -#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) - static void __init check_tylersburg_isoch(void); static int rwbf_quirk; @@ -1992,13 +1987,6 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) domain_context_mapping_cb, domain); } -/* Returns a number of VTD pages, but aligned to MM page size */ -static unsigned long aligned_nrpages(unsigned long host_addr, size_t size) -{ - host_addr &= ~PAGE_MASK; - return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; -} - /* Return largest possible superpage level for a given mapping */ static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long phy_pfn, unsigned long pages) -- cgit v1.2.3 From 4e589a53685c7658f9055fcedbce15bd4cc41134 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 24 Apr 2024 15:16:36 +0800 Subject: iommu/vt-d: Use cache_tag_flush_all() in flush_iotlb_all The flush_iotlb_all callback is called by the iommu core to flush all caches for the affected domain. Use cache_tag_flush_all() in this callback. Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240416080656.60968-5-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 473df7cd1672..a268e2a51f4d 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1557,25 +1557,7 @@ static void parent_domain_flush(struct dmar_domain *domain, static void intel_flush_iotlb_all(struct iommu_domain *domain) { - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - struct iommu_domain_info *info; - unsigned long idx; - - xa_for_each(&dmar_domain->iommu_array, idx, info) { - struct intel_iommu *iommu = info->iommu; - u16 did = domain_id_iommu(dmar_domain, iommu); - - if (dmar_domain->use_first_level) - domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0); - else - iommu->flush.flush_iotlb(iommu, did, 0, 0, - DMA_TLB_DSI_FLUSH); - - iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH); - } - - if (dmar_domain->nested_parent) - parent_domain_flush(dmar_domain, 0, -1, 0); + cache_tag_flush_all(to_dmar_domain(domain)); } static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) -- cgit v1.2.3 From a600ccd0a347e8f9c9f35f229e1ccd0dca9374c4 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 24 Apr 2024 15:16:37 +0800 Subject: iommu/vt-d: Use cache_tag_flush_range() in tlb_sync The tlb_sync callback is called by the iommu core to flush a range of caches for the affected domain. Use cache_tag_flush_range() in this callback. Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240416080656.60968-6-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a268e2a51f4d..f3926ad7d737 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4104,25 +4104,8 @@ static size_t intel_iommu_unmap_pages(struct iommu_domain *domain, static void intel_iommu_tlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - unsigned long iova_pfn = IOVA_PFN(gather->start); - size_t size = gather->end - gather->start; - struct iommu_domain_info *info; - unsigned long start_pfn; - unsigned long nrpages; - unsigned long i; - - nrpages = aligned_nrpages(gather->start, size); - start_pfn = mm_to_dma_pfn_start(iova_pfn); - - xa_for_each(&dmar_domain->iommu_array, i, info) - iommu_flush_iotlb_psi(info->iommu, dmar_domain, - start_pfn, nrpages, - list_empty(&gather->freelist), 0); - - if (dmar_domain->nested_parent) - parent_domain_flush(dmar_domain, start_pfn, nrpages, - list_empty(&gather->freelist)); + cache_tag_flush_range(to_dmar_domain(domain), gather->start, + gather->end, list_empty(&gather->freelist)); put_pages_list(&gather->freelist); } -- cgit v1.2.3 From 129dab6e1286525fe5baed860d3dfcd9c6b4b327 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 24 Apr 2024 15:16:38 +0800 Subject: iommu/vt-d: Use cache_tag_flush_range_np() in iotlb_sync_map The iotlb_sync_map callback is called by the iommu core after non-present to present mappings are created. The iommu driver uses this callback to invalidate caches if IOMMU is working in caching mode and second-only translation is used for the domain. Use cache_tag_flush_range_np() in this callback. Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240416080656.60968-7-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index f3926ad7d737..c9ac8a1b635f 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1501,20 +1501,6 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, iommu_flush_dev_iotlb(domain, addr, mask); } -/* Notification for newly created mappings */ -static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain, - unsigned long pfn, unsigned int pages) -{ - /* - * It's a non-present to present mapping. Only flush if caching mode - * and second level. - */ - if (cap_caching_mode(iommu->cap) && !domain->use_first_level) - iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); - else - iommu_flush_write_buffer(iommu); -} - /* * Flush the relevant caches in nested translation if the domain * also serves as a parent @@ -4544,14 +4530,8 @@ static bool risky_device(struct pci_dev *pdev) static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size) { - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - unsigned long pages = aligned_nrpages(iova, size); - unsigned long pfn = iova >> VTD_PAGE_SHIFT; - struct iommu_domain_info *info; - unsigned long i; + cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1); - xa_for_each(&dmar_domain->iommu_array, i, info) - __mapping_notify_one(info->iommu, dmar_domain, pfn, pages); return 0; } -- cgit v1.2.3 From 06792d06798931696dd78f65024de4cc66f2fd5b Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 24 Apr 2024 15:16:39 +0800 Subject: iommu/vt-d: Cleanup use of iommu_flush_iotlb_psi() Use cache_tag_flush_range() in switch_to_super_page() to invalidate the necessary caches when switching mappings from normal to super pages. The iommu_flush_iotlb_psi() call in intel_iommu_memory_notifier() is unnecessary since there should be no cache invalidation for the identity domain. Clean up iommu_flush_iotlb_psi() after the last call site is removed. Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240416080656.60968-8-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 171 +------------------------------------------- 1 file changed, 2 insertions(+), 169 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index c9ac8a1b635f..b2bd96e7f03d 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1390,157 +1390,6 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info, quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep); } -static void iommu_flush_dev_iotlb(struct dmar_domain *domain, - u64 addr, unsigned mask) -{ - struct dev_pasid_info *dev_pasid; - struct device_domain_info *info; - unsigned long flags; - - if (!domain->has_iotlb_device) - return; - - spin_lock_irqsave(&domain->lock, flags); - list_for_each_entry(info, &domain->devices, link) - __iommu_flush_dev_iotlb(info, addr, mask); - - list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) { - info = dev_iommu_priv_get(dev_pasid->dev); - - if (!info->ats_enabled) - continue; - - qi_flush_dev_iotlb_pasid(info->iommu, - PCI_DEVID(info->bus, info->devfn), - info->pfsid, dev_pasid->pasid, - info->ats_qdep, addr, - mask); - } - spin_unlock_irqrestore(&domain->lock, flags); -} - -static void domain_flush_pasid_iotlb(struct intel_iommu *iommu, - struct dmar_domain *domain, u64 addr, - unsigned long npages, bool ih) -{ - u16 did = domain_id_iommu(domain, iommu); - struct dev_pasid_info *dev_pasid; - unsigned long flags; - - spin_lock_irqsave(&domain->lock, flags); - list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) - qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih); - - if (!list_empty(&domain->devices)) - qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih); - spin_unlock_irqrestore(&domain->lock, flags); -} - -static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, - unsigned long pfn, unsigned int pages, - int ih) -{ - unsigned int aligned_pages = __roundup_pow_of_two(pages); - unsigned long bitmask = aligned_pages - 1; - unsigned int mask = ilog2(aligned_pages); - u64 addr = (u64)pfn << VTD_PAGE_SHIFT; - - /* - * PSI masks the low order bits of the base address. If the - * address isn't aligned to the mask, then compute a mask value - * needed to ensure the target range is flushed. - */ - if (unlikely(bitmask & pfn)) { - unsigned long end_pfn = pfn + pages - 1, shared_bits; - - /* - * Since end_pfn <= pfn + bitmask, the only way bits - * higher than bitmask can differ in pfn and end_pfn is - * by carrying. This means after masking out bitmask, - * high bits starting with the first set bit in - * shared_bits are all equal in both pfn and end_pfn. - */ - shared_bits = ~(pfn ^ end_pfn) & ~bitmask; - mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG; - } - - /* - * Fallback to domain selective flush if no PSI support or - * the size is too big. - */ - if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) - iommu->flush.flush_iotlb(iommu, did, 0, 0, - DMA_TLB_DSI_FLUSH); - else - iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, - DMA_TLB_PSI_FLUSH); -} - -static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, - struct dmar_domain *domain, - unsigned long pfn, unsigned int pages, - int ih, int map) -{ - unsigned int aligned_pages = __roundup_pow_of_two(pages); - unsigned int mask = ilog2(aligned_pages); - uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; - u16 did = domain_id_iommu(domain, iommu); - - if (WARN_ON(!pages)) - return; - - if (ih) - ih = 1 << 6; - - if (domain->use_first_level) - domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih); - else - __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih); - - if (!map) - iommu_flush_dev_iotlb(domain, addr, mask); -} - -/* - * Flush the relevant caches in nested translation if the domain - * also serves as a parent - */ -static void parent_domain_flush(struct dmar_domain *domain, - unsigned long pfn, - unsigned long pages, int ih) -{ - struct dmar_domain *s1_domain; - - spin_lock(&domain->s1_lock); - list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { - struct device_domain_info *device_info; - struct iommu_domain_info *info; - unsigned long flags; - unsigned long i; - - xa_for_each(&s1_domain->iommu_array, i, info) - __iommu_flush_iotlb_psi(info->iommu, info->did, - pfn, pages, ih); - - if (!s1_domain->has_iotlb_device) - continue; - - spin_lock_irqsave(&s1_domain->lock, flags); - list_for_each_entry(device_info, &s1_domain->devices, link) - /* - * Address translation cache in device side caches the - * result of nested translation. There is no easy way - * to identify the exact set of nested translations - * affected by a change in S2. So just flush the entire - * device cache. - */ - __iommu_flush_dev_iotlb(device_info, 0, - MAX_AGAW_PFN_WIDTH); - spin_unlock_irqrestore(&s1_domain->lock, flags); - } - spin_unlock(&domain->s1_lock); -} - static void intel_flush_iotlb_all(struct iommu_domain *domain) { cache_tag_flush_all(to_dmar_domain(domain)); @@ -1991,9 +1840,7 @@ static void switch_to_super_page(struct dmar_domain *domain, unsigned long end_pfn, int level) { unsigned long lvl_pages = lvl_to_nr_pages(level); - struct iommu_domain_info *info; struct dma_pte *pte = NULL; - unsigned long i; while (start_pfn <= end_pfn) { if (!pte) @@ -2005,13 +1852,8 @@ static void switch_to_super_page(struct dmar_domain *domain, start_pfn + lvl_pages - 1, level + 1); - xa_for_each(&domain->iommu_array, i, info) - iommu_flush_iotlb_psi(info->iommu, domain, - start_pfn, lvl_pages, - 0, 0); - if (domain->nested_parent) - parent_domain_flush(domain, start_pfn, - lvl_pages, 0); + cache_tag_flush_range(domain, start_pfn << VTD_PAGE_SHIFT, + end_pfn << VTD_PAGE_SHIFT, 0); } pte++; @@ -3381,18 +3223,9 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb, case MEM_OFFLINE: case MEM_CANCEL_ONLINE: { - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; LIST_HEAD(freelist); domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist); - - rcu_read_lock(); - for_each_active_iommu(iommu, drhd) - iommu_flush_iotlb_psi(iommu, si_domain, - start_vpfn, mhp->nr_pages, - list_empty(&freelist), 0); - rcu_read_unlock(); put_pages_list(&freelist); } break; -- cgit v1.2.3 From deda9a7bf38fb9846ff3fb83179ca07437b1511c Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 24 Apr 2024 15:16:42 +0800 Subject: iommu/vt-d: Remove intel_svm_dev The intel_svm_dev data structure used in the sva implementation for the Intel IOMMU driver stores information about a device attached to an SVA domain. It is a duplicate of dev_pasid_info that serves the same purpose. Replace intel_svm_dev with dev_pasid_info and clean up the use of intel_svm_dev. Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240416080656.60968-11-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b2bd96e7f03d..7631d00cc882 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4387,11 +4387,8 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) * notification. Before consolidating that code into iommu core, let * the intel sva code handle it. */ - if (domain->type == IOMMU_DOMAIN_SVA) { - intel_svm_remove_dev_pasid(dev, pasid); - cache_tag_unassign_domain(dmar_domain, dev, pasid); - goto out_tear_down; - } + if (domain->type == IOMMU_DOMAIN_SVA) + intel_svm_remove_dev_pasid(domain); spin_lock_irqsave(&dmar_domain->lock, flags); list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { -- cgit v1.2.3 From 886f816c2f01f768384e78f91b3c0ff66920b53f Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 24 Apr 2024 15:16:44 +0800 Subject: iommu/vt-d: Remove struct intel_svm The struct intel_svm was used for keeping attached devices info for sva domain. Since sva domain is a kind of iommu_domain, the struct dmar_domain should centralize all info of a sva domain, including the info of attached devices. Therefore, retire struct intel_svm and clean up the code. Besides, register mmu notifier callback in domain_alloc_sva() callback which allows the memory management notifier lifetime to follow the lifetime of the iommu_domain. Call mmu_notifier_put() in the domain free and defer the real free to the mmu free_notifier callback. Co-developed-by: Tina Zhang Signed-off-by: Tina Zhang Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240416080656.60968-13-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 7631d00cc882..916cdb65d849 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3683,8 +3683,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return domain; case IOMMU_DOMAIN_IDENTITY: return &si_domain->domain; - case IOMMU_DOMAIN_SVA: - return intel_svm_domain_alloc(); default: return NULL; } @@ -4382,14 +4380,6 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) goto out_tear_down; dmar_domain = to_dmar_domain(domain); - /* - * The SVA implementation needs to handle its own stuffs like the mm - * notification. Before consolidating that code into iommu core, let - * the intel sva code handle it. - */ - if (domain->type == IOMMU_DOMAIN_SVA) - intel_svm_remove_dev_pasid(domain); - spin_lock_irqsave(&dmar_domain->lock, flags); list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { if (curr->dev == dev && curr->pasid == pasid) { @@ -4624,6 +4614,7 @@ const struct iommu_ops intel_iommu_ops = { .hw_info = intel_iommu_hw_info, .domain_alloc = intel_iommu_domain_alloc, .domain_alloc_user = intel_iommu_domain_alloc_user, + .domain_alloc_sva = intel_svm_domain_alloc, .probe_device = intel_iommu_probe_device, .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, -- cgit v1.2.3 From b67483b3c44eaef2f771fa4c712e13f452675a67 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 19 Apr 2024 17:54:45 +0100 Subject: iommu/dma: Centralise iommu_setup_dma_ops() It's somewhat hard to see, but arm64's arch_setup_dma_ops() should only ever call iommu_setup_dma_ops() after a successful iommu_probe_device(), which means there should be no harm in achieving the same order of operations by running it off the back of iommu_probe_device() itself. This then puts it in line with the x86 and s390 .probe_finalize bodges, letting us pull it all into the main flow properly. As a bonus this lets us fold in and de-scope the PCI workaround setup as well. At this point we can also then pull the call up inside the group mutex, and avoid having to think about whether iommu_group_store_type() could theoretically race and free the domain if iommu_setup_dma_ops() ran just *before* iommu_device_use_default_domain() claims it... Furthermore we replace one .probe_finalize call completely, since the only remaining implementations are now one which only needs to run once for the initial boot-time probe, and two which themselves render that path unreachable. This leaves us a big step closer to realistically being able to unpick the variety of different things that iommu_setup_dma_ops() has been muddling together, and further streamline iommu-dma into core API flows in future. Reviewed-by: Lu Baolu # For Intel IOMMU Reviewed-by: Jason Gunthorpe Tested-by: Hanjun Guo Signed-off-by: Robin Murphy Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/bebea331c1d688b34d9862eefd5ede47503961b8.1713523152.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 45c75a8a0ef5..64adfdadeb49 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4349,12 +4349,6 @@ static void intel_iommu_release_device(struct device *dev) set_dma_ops(dev, NULL); } -static void intel_iommu_probe_finalize(struct device *dev) -{ - set_dma_ops(dev, NULL); - iommu_setup_dma_ops(dev, 0, U64_MAX); -} - static void intel_iommu_get_resv_regions(struct device *device, struct list_head *head) { @@ -4834,7 +4828,6 @@ const struct iommu_ops intel_iommu_ops = { .domain_alloc = intel_iommu_domain_alloc, .domain_alloc_user = intel_iommu_domain_alloc_user, .probe_device = intel_iommu_probe_device, - .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, .get_resv_regions = intel_iommu_get_resv_regions, .device_group = intel_iommu_device_group, -- cgit v1.2.3 From ba00196ca41c4f6d0b0d3c4a6748a133577abe05 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 3 May 2024 21:36:02 +0800 Subject: iommu/vt-d: Decouple igfx_off from graphic identity mapping A kernel command called igfx_off was introduced in commit ("Intel IOMMU: Intel IOMMU driver"). This command allows the user to disable the IOMMU dedicated to SOC-integrated graphic devices. Commit <9452618e7462> ("iommu/intel: disable DMAR for g4x integrated gfx") used this mechanism to disable the graphic-dedicated IOMMU for some problematic devices. Later, more problematic graphic devices were added to the list by commit <1f76249cc3beb> ("iommu/vt-d: Declare Broadwell igfx dmar support snafu"). On the other hand, commit <19943b0e30b05> ("intel-iommu: Unify hardware and software passthrough support") uses the identity domain for graphic devices if CONFIG_DMAR_BROKEN_GFX_WA is selected. + if (iommu_pass_through) + iommu_identity_mapping = 1; +#ifdef CONFIG_DMAR_BROKEN_GFX_WA + else + iommu_identity_mapping = 2; +#endif ... static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { + if (iommu_identity_mapping == 2) + return IS_GFX_DEVICE(pdev); ... In the following driver evolution, CONFIG_DMAR_BROKEN_GFX_WA and quirk_iommu_igfx() are mixed together, causing confusion in the driver's device_def_domain_type callback. On one hand, dmar_map_gfx is used to turn off the graphic-dedicated IOMMU as a workaround for some buggy hardware; on the other hand, for those graphic devices, IDENTITY mapping is required for the IOMMU core. Commit <4b8d18c0c986> "iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA" has removed the CONFIG_DMAR_BROKEN_GFX_WA option, so the IDENTITY_DOMAIN requirement for graphic devices is no longer needed. Therefore, this requirement can be removed from device_def_domain_type() and igfx_off can be made independent. Fixes: 4b8d18c0c986 ("iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA") Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240428032020.214616-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'drivers/iommu/intel/iommu.c') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 916cdb65d849..e8e62ff1a65e 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -216,12 +216,11 @@ int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON); int intel_iommu_enabled = 0; EXPORT_SYMBOL_GPL(intel_iommu_enabled); -static int dmar_map_gfx = 1; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; static int iommu_skip_te_disable; +static int disable_igfx_iommu; -#define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 const struct iommu_ops intel_iommu_ops; @@ -260,7 +259,7 @@ static int __init intel_iommu_setup(char *str) no_platform_optin = 1; pr_info("IOMMU disabled\n"); } else if (!strncmp(str, "igfx_off", 8)) { - dmar_map_gfx = 0; + disable_igfx_iommu = 1; pr_info("Disable GFX device mapping\n"); } else if (!strncmp(str, "forcedac", 8)) { pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n"); @@ -2211,9 +2210,6 @@ static int device_def_domain_type(struct device *dev) if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) return IOMMU_DOMAIN_IDENTITY; - - if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) - return IOMMU_DOMAIN_IDENTITY; } return 0; @@ -2514,9 +2510,6 @@ static int __init init_dmars(void) iommu_set_root_entry(iommu); } - if (!dmar_map_gfx) - iommu_identity_mapping |= IDENTMAP_GFX; - check_tylersburg_isoch(); ret = si_domain_init(hw_pass_through); @@ -2607,7 +2600,7 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ drhd->gfx_dedicated = 1; - if (!dmar_map_gfx) + if (disable_igfx_iommu) drhd->ignored = 1; } } @@ -4649,7 +4642,7 @@ static void quirk_iommu_igfx(struct pci_dev *dev) return; pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); - dmar_map_gfx = 0; + disable_igfx_iommu = 1; } /* G4x/GM45 integrated gfx dmar support is totally busted. */ @@ -4730,8 +4723,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) if (!(ggc & GGC_MEMORY_VT_ENABLED)) { pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); - dmar_map_gfx = 0; - } else if (dmar_map_gfx) { + disable_igfx_iommu = 1; + } else if (!disable_igfx_iommu) { /* we have to ensure the gfx device is idle before we flush */ pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n"); iommu_set_dma_strict(); -- cgit v1.2.3