diff options
| author | Joerg Roedel <jroedel@suse.de> | 2024-04-26 12:54:13 +0200 |
|---|---|---|
| committer | Joerg Roedel <jroedel@suse.de> | 2024-04-26 12:54:13 +0200 |
| commit | 5dc72c8a146ddcfc568265c8088e90d8e996d340 (patch) | |
| tree | 0ec46a798323b9c64f896f71493b7d801b92e458 /drivers/iommu | |
| parent | a4eecd720546527d9c3091231b531e8ca18ba718 (diff) | |
| parent | 212c5c078d83d780cf2873ca931df135771e8bb7 (diff) | |
| download | linux-5dc72c8a146ddcfc568265c8088e90d8e996d340.tar.gz linux-5dc72c8a146ddcfc568265c8088e90d8e996d340.tar.bz2 linux-5dc72c8a146ddcfc568265c8088e90d8e996d340.zip | |
Merge branch 'memory-observability' into x86/amd
Diffstat (limited to 'drivers/iommu')
| -rw-r--r-- | drivers/iommu/amd/amd_iommu.h | 8 | ||||
| -rw-r--r-- | drivers/iommu/amd/init.c | 86 | ||||
| -rw-r--r-- | drivers/iommu/amd/io_pgtable.c | 13 | ||||
| -rw-r--r-- | drivers/iommu/amd/io_pgtable_v2.c | 18 | ||||
| -rw-r--r-- | drivers/iommu/amd/iommu.c | 11 | ||||
| -rw-r--r-- | drivers/iommu/amd/ppr.c | 4 | ||||
| -rw-r--r-- | drivers/iommu/dma-iommu.c | 7 | ||||
| -rw-r--r-- | drivers/iommu/exynos-iommu.c | 14 | ||||
| -rw-r--r-- | drivers/iommu/intel/dmar.c | 16 | ||||
| -rw-r--r-- | drivers/iommu/intel/iommu.c | 47 | ||||
| -rw-r--r-- | drivers/iommu/intel/iommu.h | 2 | ||||
| -rw-r--r-- | drivers/iommu/intel/irq_remapping.c | 16 | ||||
| -rw-r--r-- | drivers/iommu/intel/pasid.c | 18 | ||||
| -rw-r--r-- | drivers/iommu/intel/svm.c | 11 | ||||
| -rw-r--r-- | drivers/iommu/io-pgtable-arm.c | 15 | ||||
| -rw-r--r-- | drivers/iommu/io-pgtable-dart.c | 37 | ||||
| -rw-r--r-- | drivers/iommu/iommu-pages.h | 186 | ||||
| -rw-r--r-- | drivers/iommu/rockchip-iommu.c | 14 | ||||
| -rw-r--r-- | drivers/iommu/sun50i-iommu.c | 7 | ||||
| -rw-r--r-- | drivers/iommu/tegra-smmu.c | 18 |
20 files changed, 350 insertions, 198 deletions
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 0005da7af739..dca120468198 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -162,14 +162,6 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev) return PCI_SEG_DEVID_TO_SBDF(seg, devid); } -static inline void *alloc_pgtable_page(int nid, gfp_t gfp) -{ - struct page *page; - - page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0); - return page ? page_address(page) : NULL; -} - /* * This must be called after device probe completes. During probe * use rlookup_amd_iommu() get the iommu. diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 7f9b5c710058..fd3e76e43699 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -36,6 +36,7 @@ #include "amd_iommu.h" #include "../irq_remapping.h" +#include "../iommu-pages.h" /* * definitions for the ACPI scanning code @@ -649,8 +650,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_ /* Allocate per PCI segment device table */ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) { - pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, - get_order(pci_seg->dev_table_size)); + pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32, + get_order(pci_seg->dev_table_size)); if (!pci_seg->dev_table) return -ENOMEM; @@ -659,17 +660,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg) { - free_pages((unsigned long)pci_seg->dev_table, - get_order(pci_seg->dev_table_size)); + iommu_free_pages(pci_seg->dev_table, + get_order(pci_seg->dev_table_size)); pci_seg->dev_table = NULL; } /* Allocate per PCI segment IOMMU rlookup table. */ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) { - pci_seg->rlookup_table = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(pci_seg->rlookup_table_size)); + pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL, + get_order(pci_seg->rlookup_table_size)); if (pci_seg->rlookup_table == NULL) return -ENOMEM; @@ -678,16 +678,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg) { - free_pages((unsigned long)pci_seg->rlookup_table, - get_order(pci_seg->rlookup_table_size)); + iommu_free_pages(pci_seg->rlookup_table, + get_order(pci_seg->rlookup_table_size)); pci_seg->rlookup_table = NULL; } static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) { - pci_seg->irq_lookup_table = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(pci_seg->rlookup_table_size)); + pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL, + get_order(pci_seg->rlookup_table_size)); kmemleak_alloc(pci_seg->irq_lookup_table, pci_seg->rlookup_table_size, 1, GFP_KERNEL); if (pci_seg->irq_lookup_table == NULL) @@ -699,8 +698,8 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) { kmemleak_free(pci_seg->irq_lookup_table); - free_pages((unsigned long)pci_seg->irq_lookup_table, - get_order(pci_seg->rlookup_table_size)); + iommu_free_pages(pci_seg->irq_lookup_table, + get_order(pci_seg->rlookup_table_size)); pci_seg->irq_lookup_table = NULL; } @@ -708,8 +707,8 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) { int i; - pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL, - get_order(pci_seg->alias_table_size)); + pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL, + get_order(pci_seg->alias_table_size)); if (!pci_seg->alias_table) return -ENOMEM; @@ -724,8 +723,8 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) { - free_pages((unsigned long)pci_seg->alias_table, - get_order(pci_seg->alias_table_size)); + iommu_free_pages(pci_seg->alias_table, + get_order(pci_seg->alias_table_size)); pci_seg->alias_table = NULL; } @@ -736,8 +735,8 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) */ static int __init alloc_command_buffer(struct amd_iommu *iommu) { - iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(CMD_BUFFER_SIZE)); + iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL, + get_order(CMD_BUFFER_SIZE)); return iommu->cmd_buf ? 0 : -ENOMEM; } @@ -834,19 +833,19 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu) static void __init free_command_buffer(struct amd_iommu *iommu) { - free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); + iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); } void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp, size_t size) { int order = get_order(size); - void *buf = (void *)__get_free_pages(gfp, order); + void *buf = iommu_alloc_pages(gfp, order); if (buf && check_feature(FEATURE_SNP) && set_memory_4k((unsigned long)buf, (1 << order))) { - free_pages((unsigned long)buf, order); + iommu_free_pages(buf, order); buf = NULL; } @@ -856,7 +855,7 @@ void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp, /* allocates the memory where the IOMMU will log its events to */ static int __init alloc_event_buffer(struct amd_iommu *iommu) { - iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, + iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL, EVT_BUFFER_SIZE); return iommu->evt_buf ? 0 : -ENOMEM; @@ -890,14 +889,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu) static void __init free_event_buffer(struct amd_iommu *iommu) { - free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); + iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); } static void free_ga_log(struct amd_iommu *iommu) { #ifdef CONFIG_IRQ_REMAP - free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); - free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); + iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE)); + iommu_free_pages(iommu->ga_log_tail, get_order(8)); #endif } @@ -942,13 +941,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu) if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) return 0; - iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(GA_LOG_SIZE)); + iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE)); if (!iommu->ga_log) goto err_out; - iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(8)); + iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8)); if (!iommu->ga_log_tail) goto err_out; @@ -961,7 +958,7 @@ err_out: static int __init alloc_cwwb_sem(struct amd_iommu *iommu) { - iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1); + iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1); return iommu->cmd_sem ? 0 : -ENOMEM; } @@ -969,7 +966,7 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu) static void __init free_cwwb_sem(struct amd_iommu *iommu) { if (iommu->cmd_sem) - free_page((unsigned long)iommu->cmd_sem); + iommu_free_page((void *)iommu->cmd_sem); } static void iommu_enable_xt(struct amd_iommu *iommu) @@ -1034,7 +1031,6 @@ static bool __copy_device_table(struct amd_iommu *iommu) u32 lo, hi, devid, old_devtb_size; phys_addr_t old_devtb_phys; u16 dom_id, dte_v, irq_v; - gfp_t gfp_flag; u64 tmp; /* Each IOMMU use separate device table with the same size */ @@ -1068,9 +1064,8 @@ static bool __copy_device_table(struct amd_iommu *iommu) if (!old_devtb) return false; - gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; - pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, - get_order(pci_seg->dev_table_size)); + pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32, + get_order(pci_seg->dev_table_size)); if (pci_seg->old_dev_tbl_cpy == NULL) { pr_err("Failed to allocate memory for copying old device table!\n"); memunmap(old_devtb); @@ -2769,8 +2764,8 @@ static void early_enable_iommus(void) for_each_pci_segment(pci_seg) { if (pci_seg->old_dev_tbl_cpy != NULL) { - free_pages((unsigned long)pci_seg->old_dev_tbl_cpy, - get_order(pci_seg->dev_table_size)); + iommu_free_pages(pci_seg->old_dev_tbl_cpy, + get_order(pci_seg->dev_table_size)); pci_seg->old_dev_tbl_cpy = NULL; } } @@ -2783,8 +2778,8 @@ static void early_enable_iommus(void) pr_info("Copied DEV table from previous kernel.\n"); for_each_pci_segment(pci_seg) { - free_pages((unsigned long)pci_seg->dev_table, - get_order(pci_seg->dev_table_size)); + iommu_free_pages(pci_seg->dev_table, + get_order(pci_seg->dev_table_size)); pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; } @@ -2989,8 +2984,8 @@ static bool __init check_ioapic_information(void) static void __init free_dma_resources(void) { - free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, - get_order(MAX_DOMAIN_ID/8)); + iommu_free_pages(amd_iommu_pd_alloc_bitmap, + get_order(MAX_DOMAIN_ID / 8)); amd_iommu_pd_alloc_bitmap = NULL; free_unity_maps(); @@ -3062,9 +3057,8 @@ static int __init early_amd_iommu_init(void) /* Device table - directly used by all IOMMUs */ ret = -ENOMEM; - amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(MAX_DOMAIN_ID/8)); + amd_iommu_pd_alloc_bitmap = iommu_alloc_pages(GFP_KERNEL, + get_order(MAX_DOMAIN_ID / 8)); if (amd_iommu_pd_alloc_bitmap == NULL) goto out; diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 2a0d1e97e52f..9d9a7fde59e7 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -22,6 +22,7 @@ #include "amd_iommu_types.h" #include "amd_iommu.h" +#include "../iommu-pages.h" static void v1_tlb_flush_all(void *cookie) { @@ -156,7 +157,7 @@ static bool increase_address_space(struct protection_domain *domain, bool ret = true; u64 *pte; - pte = alloc_pgtable_page(domain->nid, gfp); + pte = iommu_alloc_page_node(domain->nid, gfp); if (!pte) return false; @@ -187,7 +188,7 @@ static bool increase_address_space(struct protection_domain *domain, out: spin_unlock_irqrestore(&domain->lock, flags); - free_page((unsigned long)pte); + iommu_free_page(pte); return ret; } @@ -250,7 +251,7 @@ static u64 *alloc_pte(struct protection_domain *domain, if (!IOMMU_PTE_PRESENT(__pte) || pte_level == PAGE_MODE_NONE) { - page = alloc_pgtable_page(domain->nid, gfp); + page = iommu_alloc_page_node(domain->nid, gfp); if (!page) return NULL; @@ -259,7 +260,7 @@ static u64 *alloc_pte(struct protection_domain *domain, /* pte could have been changed somewhere. */ if (!try_cmpxchg64(pte, &__pte, __npte)) - free_page((unsigned long)page); + iommu_free_page(page); else if (IOMMU_PTE_PRESENT(__pte)) *updated = true; @@ -431,7 +432,7 @@ out: } /* Everything flushed out, free pages now */ - put_pages_list(&freelist); + iommu_put_pages_list(&freelist); return ret; } @@ -580,7 +581,7 @@ static void v1_free_pgtable(struct io_pgtable *iop) /* Make changes visible to IOMMUs */ amd_iommu_domain_update(dom); - put_pages_list(&freelist); + iommu_put_pages_list(&freelist); } static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index 93489d2db4e8..78ac37c5ccc1 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -18,6 +18,7 @@ #include "amd_iommu_types.h" #include "amd_iommu.h" +#include "../iommu-pages.h" #define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */ #define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */ @@ -99,11 +100,6 @@ static inline int page_size_to_level(u64 pg_size) return PAGE_MODE_1_LEVEL; } -static inline void free_pgtable_page(u64 *pt) -{ - free_page((unsigned long)pt); -} - static void free_pgtable(u64 *pt, int level) { u64 *p; @@ -125,10 +121,10 @@ static void free_pgtable(u64 *pt, int level) if (level > 2) free_pgtable(p, level - 1); else - free_pgtable_page(p); + iommu_free_page(p); } - free_pgtable_page(pt); + iommu_free_page(pt); } /* Allocate page table */ @@ -156,14 +152,14 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova, } if (!IOMMU_PTE_PRESENT(__pte)) { - page = alloc_pgtable_page(nid, gfp); + page = iommu_alloc_page_node(nid, gfp); if (!page) return NULL; __npte = set_pgtable_attr(page); /* pte could have been changed somewhere. */ if (cmpxchg64(pte, __pte, __npte) != __pte) - free_pgtable_page(page); + iommu_free_page(page); else if (IOMMU_PTE_PRESENT(__pte)) *updated = true; @@ -185,7 +181,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova, if (pg_size == IOMMU_PAGE_SIZE_1G) free_pgtable(__pte, end_level - 1); else if (pg_size == IOMMU_PAGE_SIZE_2M) - free_pgtable_page(__pte); + iommu_free_page(__pte); } return pte; @@ -366,7 +362,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo struct protection_domain *pdom = (struct protection_domain *)cookie; int ias = IOMMU_IN_ADDR_BIT_SIZE; - pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC); + pgtable->pgd = iommu_alloc_page_node(pdom->nid, GFP_ATOMIC); if (!pgtable->pgd) return NULL; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 52a04a6db397..1ff80cc79dbc 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -42,6 +42,7 @@ #include "amd_iommu.h" #include "../dma-iommu.h" #include "../irq_remapping.h" +#include "../iommu-pages.h" #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) @@ -1686,7 +1687,7 @@ static void free_gcr3_tbl_level1(u64 *tbl) ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); - free_page((unsigned long)ptr); + iommu_free_page(ptr); } } @@ -1719,7 +1720,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info) /* Free per device domain ID */ domain_id_free(gcr3_info->domid); - free_page((unsigned long)gcr3_info->gcr3_tbl); + iommu_free_page(gcr3_info->gcr3_tbl); gcr3_info->gcr3_tbl = NULL; } @@ -1754,7 +1755,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info, /* Allocate per device domain ID */ gcr3_info->domid = domain_id_alloc(); - gcr3_info->gcr3_tbl = alloc_pgtable_page(nid, GFP_ATOMIC); + gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC); if (gcr3_info->gcr3_tbl == NULL) { domain_id_free(gcr3_info->domid); return -ENOMEM; @@ -2289,7 +2290,7 @@ void protection_domain_free(struct protection_domain *domain) free_io_pgtable_ops(&domain->iop.iop.ops); if (domain->iop.root) - free_page((unsigned long)domain->iop.root); + iommu_free_page(domain->iop.root); if (domain->id) domain_id_free(domain->id); @@ -2304,7 +2305,7 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode) BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL); if (mode != PAGE_MODE_NONE) { - pt_root = (void *)get_zeroed_page(GFP_KERNEL); + pt_root = iommu_alloc_page(GFP_KERNEL); if (!pt_root) return -ENOMEM; } diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c index 0463daa2d46b..091423bb8aac 100644 --- a/drivers/iommu/amd/ppr.c +++ b/drivers/iommu/amd/ppr.c @@ -15,6 +15,8 @@ #include "amd_iommu.h" #include "amd_iommu_types.h" +#include "../iommu-pages.h" + int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu) { iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, @@ -46,7 +48,7 @@ void amd_iommu_enable_ppr_log(struct amd_iommu *iommu) void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu) { - free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); + iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE)); } /* diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index e4cb26f6a943..16a7c4a4f3db 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -32,6 +32,7 @@ #include <trace/events/swiotlb.h> #include "dma-iommu.h" +#include "iommu-pages.h" struct iommu_dma_msi_page { struct list_head list; @@ -156,7 +157,7 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq if (fq->entries[idx].counter >= counter) break; - put_pages_list(&fq->entries[idx].freelist); + iommu_put_pages_list(&fq->entries[idx].freelist); free_iova_fast(&cookie->iovad, fq->entries[idx].iova_pfn, fq->entries[idx].pages); @@ -254,7 +255,7 @@ static void iommu_dma_free_fq_single(struct iova_fq *fq) int idx; fq_ring_for_each(idx, fq) - put_pages_list(&fq->entries[idx].freelist); + iommu_put_pages_list(&fq->entries[idx].freelist); vfree(fq); } @@ -267,7 +268,7 @@ static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq) struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu); fq_ring_for_each(idx, fq) - put_pages_list(&fq->entries[idx].freelist); + iommu_put_pages_list(&fq->entries[idx].freelist); } free_percpu(percpu_fq); diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index d98c9161948a..c666ecab955d 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -22,6 +22,8 @@ #include <linux/pm_runtime.h> #include <linux/slab.h> +#include "iommu-pages.h" + typedef u32 sysmmu_iova_t; typedef u32 sysmmu_pte_t; static struct iommu_domain exynos_identity_domain; @@ -900,11 +902,11 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev) if (!domain) return NULL; - domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); + domain->pgtable = iommu_alloc_pages(GFP_KERNEL, 2); if (!domain->pgtable) goto err_pgtable; - domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); + domain->lv2entcnt = iommu_alloc_pages(GFP_KERNEL, 1); if (!domain->lv2entcnt) goto err_counter; @@ -930,9 +932,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev) return &domain->domain; err_lv2ent: - free_pages((unsigned long)domain->lv2entcnt, 1); + iommu_free_pages(domain->lv2entcnt, 1); err_counter: - free_pages((unsigned long)domain->pgtable, 2); + iommu_free_pages(domain->pgtable, 2); err_pgtable: kfree(domain); return NULL; @@ -973,8 +975,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) phys_to_virt(base)); } - free_pages((unsigned long)domain->pgtable, 2); - free_pages((unsigned long)domain->lv2entcnt, 1); + iommu_free_pages(domain->pgtable, 2); + iommu_free_pages(domain->lv2entcnt, 1); kfree(domain); } diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 36d7427b1202..87ad996e5257 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -32,6 +32,7 @@ #include "iommu.h" #include "../irq_remapping.h" +#include "../iommu-pages.h" #include "perf.h" #include "trace.h" #include "perfmon.h" @@ -1187,7 +1188,7 @@ static void free_iommu(struct intel_iommu *iommu) } if (iommu->qi) { - free_page((unsigned long)iommu->qi->desc); + iommu_free_page(iommu->qi->desc); kfree(iommu->qi->desc_status); kfree(iommu->qi); } @@ -1755,7 +1756,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) int dmar_enable_qi(struct intel_iommu *iommu) { struct q_inval *qi; - struct page *desc_page; + void *desc; + int order; if (!ecap_qis(iommu->ecap)) return -ENOENT; @@ -1776,19 +1778,19 @@ int dmar_enable_qi(struct intel_iommu *iommu) * Need two pages to accommodate 256 descriptors of 256 bits each * if the remapping hardware supports scalable mode translation. */ - desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, - !!ecap_smts(iommu->ecap)); - if (!desc_page) { + order = ecap_smts(iommu->ecap) ? 1 : 0; + desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order); + if (!desc) { kfree(qi); iommu->qi = NULL; return -ENOMEM; } - qi->desc = page_address(desc_page); + qi->desc = desc; qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC); if (!qi->desc_status) { - free_page((unsigned long) qi->desc); + iommu_free_page(qi->desc); kfree(qi); iommu->qi = NULL; return -ENOMEM; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a7ecd90303dc..daaa7a22595e 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -27,6 +27,7 @@ #include "iommu.h" #include "../dma-iommu.h" #include "../irq_remapping.h" +#include "../iommu-pages.h" #include "pasid.h" #include "cap_audit.h" #include "perfmon.h" @@ -298,22 +299,6 @@ static int __init intel_iommu_setup(char *str) } __setup("intel_iommu=", intel_iommu_setup); -void *alloc_pgtable_page(int node, gfp_t gfp) -{ - struct page *page; - void *vaddr = NULL; - - page = alloc_pages_node(node, gfp | __GFP_ZERO, 0); - if (page) - vaddr = page_address(page); - return vaddr; -} - -void free_pgtable_page(void *vaddr) -{ - free_page((unsigned long)vaddr); -} - static int domain_type_is_si(struct dmar_domain *domain) { return domain->domain.type == IOMMU_DOMAIN_IDENTITY; @@ -545,7 +530,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, if (!alloc) return NULL; - context = alloc_pgtable_page(iommu->node, GFP_ATOMIC); + context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); if (!context) return NULL; @@ -719,17 +704,17 @@ static void free_context_table(struct intel_iommu *iommu) for (i = 0; i < ROOT_ENTRY_NR; i++) { context = iommu_context_addr(iommu, i, 0, 0); if (context) - free_pgtable_page(context); + iommu_free_page(context); if (!sm_supported(iommu)) continue; context = iommu_context_addr(iommu, i, 0x80, 0); if (context) - free_pgtable_page(context); + iommu_free_page(context); } - free_pgtable_page(iommu->root_entry); + iommu_free_page(iommu->root_entry); iommu->root_entry = NULL; } @@ -867,7 +852,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, if (!dma_pte_present(pte)) { uint64_t pteval; - tmp_page = alloc_pgtable_page(domain->nid, gfp); + tmp_page = iommu_alloc_page_node(domain->nid, gfp); if (!tmp_page) return NULL; @@ -879,7 +864,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, if (cmpxchg64(&pte->val, 0ULL, pteval)) /* Someone else set it while we were thinking; use theirs. */ - free_pgtable_page(tmp_page); + iommu_free_page(tmp_page); else domain_flush_cache(domain, pte, sizeof(*pte)); } @@ -992,7 +977,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, last_pfn < level_pfn + level_size(level) - 1)) { dma_clear_pte(pte); domain_flush_cache(domain, pte, sizeof(*pte)); - free_pgtable_page(level_pte); + iommu_free_page(level_pte); } next: pfn += level_size(level); @@ -1016,7 +1001,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { - free_pgtable_page(domain->pgd); + iommu_free_page(domain->pgd); domain->pgd = NULL; } } @@ -1118,7 +1103,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) { struct root_entry *root; - root = alloc_pgtable_page(iommu->node, GFP_ATOMIC); + root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); if (!root) { pr_err("Allocating root entry for %s failed\n", iommu->name); @@ -1841,7 +1826,7 @@ static void domain_exit(struct dmar_domain *domain) LIST_HEAD(freelist); domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); - put_pages_list(&freelist); + iommu_put_pages_list(&freelist); } if (WARN_ON(!list_empty(&domain->devices))) @@ -2497,7 +2482,7 @@ static int copy_context_table(struct intel_iommu *iommu, if (!old_ce) goto out; - new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL); + new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL); if (!new_ce) goto out_unmap; @@ -3426,7 +3411,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb, start_vpfn, mhp->nr_pages, list_empty(&freelist), 0); rcu_read_unlock(); - put_pages_list(&freelist); + iommu_put_pages_list(&freelist); } break; } @@ -3833,7 +3818,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) domain->max_addr = 0; /* always allocate the top pgd */ - domain->pgd = alloc_pgtable_page(domain->nid, GFP_ATOMIC); + domain->pgd = iommu_alloc_page_node(domain->nid, GFP_ATOMIC); if (!domain->pgd) return -ENOMEM; domain_flush_cache(domain, domain->pgd, PAGE_SIZE); @@ -3987,7 +3972,7 @@ int prepare_domain_attach_device(struct iommu_domain *domain, pte = dmar_domain->pgd; if (dma_pte_present(pte)) { dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte)); - free_pgtable_page(pte); + iommu_free_page(pte); } dmar_domain->agaw--; } @@ -4141,7 +4126,7 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain, if (dmar_domain->nested_parent) parent_domain_flush(dmar_domain, start_pfn, nrpages, list_empty(&gather->freelist)); - put_pages_list(&gather->freelist); + iommu_put_pages_list(&gather->freelist); } static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 404d2476a877..8d081d8c6f41 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -1085,8 +1085,6 @@ void domain_update_iommu_cap(struct dmar_domain *domain); int dmar_ir_support(void); -void *alloc_pgtable_page(int node, gfp_t gfp); -void free_pgtable_page(void *vaddr); void iommu_flush_write_buffer(struct intel_iommu *iommu); struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, const struct iommu_user_data *user_data); diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 566297bc87dd..39cd9626eb8d 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -22,6 +22,7 @@ #include "iommu.h" #include "../irq_remapping.h" +#include "../iommu-pages.h" #include "cap_audit.h" enum irq_mode { @@ -527,7 +528,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) struct ir_table *ir_table; struct fwnode_handle *fn; unsigned long *bitmap; - struct page *pages; + void *ir_table_base; if (iommu->ir_table) return 0; @@ -536,9 +537,9 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) if (!ir_table) return -ENOMEM; - pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, - INTR_REMAP_PAGE_ORDER); - if (!pages) { + ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, + INTR_REMAP_PAGE_ORDER); + if (!ir_table_base) { pr_err("IR%d: failed to allocate pages of order %d\n", iommu->seq_id, INTR_REMAP_PAGE_ORDER); goto out_fre |
