diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-16 11:31:55 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-16 11:31:55 -0700 |
| commit | c4cf498dc0241fa2d758dba177634268446afb06 (patch) | |
| tree | 77e86dd4f211c6681ff9fbab481295732dc23422 /mm | |
| parent | 9ff9b0d392ea08090cd1780fb196f36dbb586529 (diff) | |
| parent | 4d0e9df5e43dba52d38b251e3b909df8fa1110be (diff) | |
| download | linux-c4cf498dc0241fa2d758dba177634268446afb06.tar.gz linux-c4cf498dc0241fa2d758dba177634268446afb06.tar.bz2 linux-c4cf498dc0241fa2d758dba177634268446afb06.zip | |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
"155 patches.
Subsystems affected by this patch series: mm (dax, debug, thp,
readahead, page-poison, util, memory-hotplug, zram, cleanups), misc,
core-kernel, get_maintainer, MAINTAINERS, lib, bitops, checkpatch,
binfmt, ramfs, autofs, nilfs, rapidio, panic, relay, kgdb, ubsan,
romfs, and fault-injection"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (155 commits)
lib, uaccess: add failure injection to usercopy functions
lib, include/linux: add usercopy failure capability
ROMFS: support inode blocks calculation
ubsan: introduce CONFIG_UBSAN_LOCAL_BOUNDS for Clang
sched.h: drop in_ubsan field when UBSAN is in trap mode
scripts/gdb/tasks: add headers and improve spacing format
scripts/gdb/proc: add struct mount & struct super_block addr in lx-mounts command
kernel/relay.c: drop unneeded initialization
panic: dump registers on panic_on_warn
rapidio: fix the missed put_device() for rio_mport_add_riodev
rapidio: fix error handling path
nilfs2: fix some kernel-doc warnings for nilfs2
autofs: harden ioctl table
ramfs: fix nommu mmap with gaps in the page cache
mm: remove the now-unnecessary mmget_still_valid() hack
mm/gup: take mmap_lock in get_dump_page()
binfmt_elf, binfmt_elf_fdpic: use a VMA list snapshot
coredump: rework elf/elf_fdpic vma_dump_size() into common helper
coredump: refactor page range dumping into common helper
coredump: let dump_emit() bail out on short writes
...
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/Kconfig | 2 | ||||
| -rw-r--r-- | mm/compaction.c | 6 | ||||
| -rw-r--r-- | mm/debug_vm_pgtable.c | 207 | ||||
| -rw-r--r-- | mm/filemap.c | 58 | ||||
| -rw-r--r-- | mm/gup.c | 61 | ||||
| -rw-r--r-- | mm/highmem.c | 4 | ||||
| -rw-r--r-- | mm/huge_memory.c | 45 | ||||
| -rw-r--r-- | mm/hwpoison-inject.c | 18 | ||||
| -rw-r--r-- | mm/internal.h | 27 | ||||
| -rw-r--r-- | mm/khugepaged.c | 2 | ||||
| -rw-r--r-- | mm/madvise.c | 52 | ||||
| -rw-r--r-- | mm/memory-failure.c | 311 | ||||
| -rw-r--r-- | mm/memory.c | 7 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 211 | ||||
| -rw-r--r-- | mm/memremap.c | 3 | ||||
| -rw-r--r-- | mm/migrate.c | 11 | ||||
| -rw-r--r-- | mm/mmap.c | 7 | ||||
| -rw-r--r-- | mm/mmu_notifier.c | 2 | ||||
| -rw-r--r-- | mm/page-writeback.c | 1 | ||||
| -rw-r--r-- | mm/page_alloc.c | 241 | ||||
| -rw-r--r-- | mm/page_isolation.c | 16 | ||||
| -rw-r--r-- | mm/page_owner.c | 10 | ||||
| -rw-r--r-- | mm/page_poison.c | 20 | ||||
| -rw-r--r-- | mm/page_reporting.c | 4 | ||||
| -rw-r--r-- | mm/readahead.c | 130 | ||||
| -rw-r--r-- | mm/rmap.c | 10 | ||||
| -rw-r--r-- | mm/shmem.c | 2 | ||||
| -rw-r--r-- | mm/shuffle.c | 2 | ||||
| -rw-r--r-- | mm/slab.c | 2 | ||||
| -rw-r--r-- | mm/slab.h | 1 | ||||
| -rw-r--r-- | mm/slub.c | 2 | ||||
| -rw-r--r-- | mm/sparse.c | 2 | ||||
| -rw-r--r-- | mm/swap_state.c | 2 | ||||
| -rw-r--r-- | mm/truncate.c | 6 | ||||
| -rw-r--r-- | mm/util.c | 3 | ||||
| -rw-r--r-- | mm/vmscan.c | 5 | ||||
| -rw-r--r-- | mm/vmstat.c | 8 | ||||
| -rw-r--r-- | mm/workingset.c | 2 |
38 files changed, 735 insertions, 768 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index e72e61c1d62e..c7f30f8b282b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -152,6 +152,7 @@ config HAVE_BOOTMEM_INFO_NODE # eventually, we can have this option just 'select SPARSEMEM' config MEMORY_HOTPLUG bool "Allow for memory hot-add" + select MEMORY_ISOLATION depends on SPARSEMEM || X86_64_ACPI_NUMA depends on ARCH_ENABLE_MEMORY_HOTPLUG depends on 64BIT || BROKEN @@ -178,7 +179,6 @@ config MEMORY_HOTPLUG_DEFAULT_ONLINE config MEMORY_HOTREMOVE bool "Allow for memory hot remove" - select MEMORY_ISOLATION select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64) depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE depends on MIGRATION diff --git a/mm/compaction.c b/mm/compaction.c index 6c63844fc061..6e0ee5641788 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -625,7 +625,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, } /* Found a free page, will break it into order-0 pages */ - order = page_order(page); + order = buddy_order(page); isolated = __isolate_free_page(page, order); if (!isolated) break; @@ -898,7 +898,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * potential isolation targets. */ if (PageBuddy(page)) { - unsigned long freepage_order = page_order_unsafe(page); + unsigned long freepage_order = buddy_order_unsafe(page); /* * Without lock, we cannot be sure that what we got is @@ -1172,7 +1172,7 @@ static bool suitable_migration_target(struct compact_control *cc, * the only small danger is that we skip a potentially suitable * pageblock, so it's not worth to check order for valid range. */ - if (page_order_unsafe(page) >= pageblock_order) + if (buddy_order_unsafe(page) >= pageblock_order) return false; } diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 086309fb9b6f..c05d9dcf7891 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -28,6 +28,7 @@ #include <linux/swapops.h> #include <linux/start_kernel.h> #include <linux/sched/mm.h> +#include <linux/io.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> @@ -44,10 +45,17 @@ * entry type. But these bits might affect the ability to clear entries with * pxx_clear() because of how dynamic page table folding works on s390. So * while loading up the entries do not change the lower 4 bits. It does not - * have affect any other platform. + * have affect any other platform. Also avoid the 62nd bit on ppc64 that is + * used to mark a pte entry. */ -#define S390_MASK_BITS 4 -#define RANDOM_ORVALUE GENMASK(BITS_PER_LONG - 1, S390_MASK_BITS) +#define S390_SKIP_MASK GENMASK(3, 0) +#if __BITS_PER_LONG == 64 +#define PPC64_SKIP_MASK GENMASK(62, 62) +#else +#define PPC64_SKIP_MASK 0x0 +#endif +#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK) +#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK) #define RANDOM_NZVALUE GENMASK(7, 0) static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot) @@ -71,15 +79,18 @@ static void __init pte_advanced_tests(struct mm_struct *mm, { pte_t pte = pfn_pte(pfn, prot); + /* + * Architectures optimize set_pte_at by avoiding TLB flush. + * This requires set_pte_at to be not used to update an + * existing pte entry. Clear pte before we do set_pte_at + */ + pr_debug("Validating PTE advanced\n"); pte = pfn_pte(pfn, prot); set_pte_at(mm, vaddr, ptep, pte); ptep_set_wrprotect(mm, vaddr, ptep); pte = ptep_get(ptep); WARN_ON(pte_write(pte)); - - pte = pfn_pte(pfn, prot); - set_pte_at(mm, vaddr, ptep, pte); ptep_get_and_clear(mm, vaddr, ptep); pte = ptep_get(ptep); WARN_ON(!pte_none(pte)); @@ -93,13 +104,11 @@ static void __init pte_advanced_tests(struct mm_struct *mm, ptep_set_access_flags(vma, vaddr, ptep, pte, 1); pte = ptep_get(ptep); WARN_ON(!(pte_write(pte) && pte_dirty(pte))); - - pte = pfn_pte(pfn, prot); - set_pte_at(mm, vaddr, ptep, pte); ptep_get_and_clear_full(mm, vaddr, ptep, 1); pte = ptep_get(ptep); WARN_ON(!pte_none(pte)); + pte = pfn_pte(pfn, prot); pte = pte_mkyoung(pte); set_pte_at(mm, vaddr, ptep, pte); ptep_test_and_clear_young(vma, vaddr, ptep); @@ -111,10 +120,14 @@ static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot) { pte_t pte = pfn_pte(pfn, prot); + if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) + return; + pr_debug("Validating PTE saved write\n"); WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte)))); WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte)))); } + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { @@ -141,7 +154,7 @@ static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) static void __init pmd_advanced_tests(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmdp, unsigned long pfn, unsigned long vaddr, - pgprot_t prot) + pgprot_t prot, pgtable_t pgtable) { pmd_t pmd = pfn_pmd(pfn, prot); @@ -152,14 +165,13 @@ static void __init pmd_advanced_tests(struct mm_struct *mm, /* Align the address wrt HPAGE_PMD_SIZE */ vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE; + pgtable_trans_huge_deposit(mm, pmdp, pgtable); + pmd = pfn_pmd(pfn, prot); set_pmd_at(mm, vaddr, pmdp, pmd); pmdp_set_wrprotect(mm, vaddr, pmdp); pmd = READ_ONCE(*pmdp); WARN_ON(pmd_write(pmd)); - - pmd = pfn_pmd(pfn, prot); - set_pmd_at(mm, vaddr, pmdp, pmd); pmdp_huge_get_and_clear(mm, vaddr, pmdp); pmd = READ_ONCE(*pmdp); WARN_ON(!pmd_none(pmd)); @@ -173,18 +185,20 @@ static void __init pmd_advanced_tests(struct mm_struct *mm, pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1); pmd = READ_ONCE(*pmdp); WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd))); - - pmd = pmd_mkhuge(pfn_pmd(pfn, prot)); - set_pmd_at(mm, vaddr, pmdp, pmd); pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1); pmd = READ_ONCE(*pmdp); WARN_ON(!pmd_none(pmd)); + pmd = pmd_mkhuge(pfn_pmd(pfn, prot)); pmd = pmd_mkyoung(pmd); set_pmd_at(mm, vaddr, pmdp, pmd); pmdp_test_and_clear_young(vma, vaddr, pmdp); pmd = READ_ONCE(*pmdp); WARN_ON(pmd_young(pmd)); + + /* Clear the pte entries */ + pmdp_huge_get_and_clear(mm, vaddr, pmdp); + pgtable = pgtable_trans_huge_withdraw(mm, pmdp); } static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) @@ -199,11 +213,12 @@ static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) WARN_ON(!pmd_leaf(pmd)); } +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { pmd_t pmd; - if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) + if (!arch_ioremap_pmd_supported()) return; pr_debug("Validating PMD huge\n"); @@ -217,11 +232,17 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) pmd = READ_ONCE(*pmdp); WARN_ON(!pmd_none(pmd)); } +#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { } +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { pmd_t pmd = pfn_pmd(pfn, prot); + if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) + return; + pr_debug("Validating PMD saved write\n"); WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd)))); WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd)))); @@ -272,17 +293,9 @@ static void __init pud_advanced_tests(struct mm_struct *mm, WARN_ON(pud_write(pud)); #ifndef __PAGETABLE_PMD_FOLDED - pud = pfn_pud(pfn, prot); - set_pud_at(mm, vaddr, pudp, pud); pudp_huge_get_and_clear(mm, vaddr, pudp); pud = READ_ONCE(*pudp); WARN_ON(!pud_none(pud)); - - pud = pfn_pud(pfn, prot); - set_pud_at(mm, vaddr, pudp, pud); - pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1); - pud = READ_ONCE(*pudp); - WARN_ON(!pud_none(pud)); #endif /* __PAGETABLE_PMD_FOLDED */ pud = pfn_pud(pfn, prot); pud = pud_wrprotect(pud); @@ -294,11 +307,20 @@ static void __init pud_advanced_tests(struct mm_struct *mm, pud = READ_ONCE(*pudp); WARN_ON(!(pud_write(pud) && pud_dirty(pud))); +#ifndef __PAGETABLE_PMD_FOLDED + pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1); + pud = READ_ONCE(*pudp); + WARN_ON(!pud_none(pud)); +#endif /* __PAGETABLE_PMD_FOLDED */ + + pud = pfn_pud(pfn, prot); pud = pud_mkyoung(pud); set_pud_at(mm, vaddr, pudp, pud); pudp_test_and_clear_young(vma, vaddr, pudp); pud = READ_ONCE(*pudp); WARN_ON(pud_young(pud)); + + pudp_huge_get_and_clear(mm, vaddr, pudp); } static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) @@ -313,11 +335,12 @@ static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) WARN_ON(!pud_leaf(pud)); } +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { pud_t pud; - if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) + if (!arch_ioremap_pud_supported()) return; pr_debug("Validating PUD huge\n"); @@ -331,6 +354,10 @@ static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) pud = READ_ONCE(*pudp); WARN_ON(!pud_none(pud)); } +#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { } +#endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ + #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { } static void __init pud_advanced_tests(struct mm_struct *mm, @@ -350,7 +377,7 @@ static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { } static void __init pmd_advanced_tests(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmdp, unsigned long pfn, unsigned long vaddr, - pgprot_t prot) + pgprot_t prot, pgtable_t pgtable) { } static void __init pud_advanced_tests(struct mm_struct *mm, @@ -417,8 +444,6 @@ static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp, * This entry points to next level page table page. * Hence this must not qualify as pud_bad(). */ - pmd_clear(pmdp); - pud_clear(pudp); pud_populate(mm, pudp, pmdp); pud = READ_ONCE(*pudp); WARN_ON(pud_bad(pud)); @@ -515,12 +540,15 @@ static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp, #endif /* PAGETABLE_P4D_FOLDED */ static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep, - unsigned long vaddr) + unsigned long pfn, unsigned long vaddr, + pgprot_t prot) { - pte_t pte = ptep_get(ptep); + pte_t pte = pfn_pte(pfn, prot); pr_debug("Validating PTE clear\n"); +#ifndef CONFIG_RISCV pte = __pte(pte_val(pte) | RANDOM_ORVALUE); +#endif set_pte_at(mm, vaddr, ptep, pte); barrier(); pte_clear(mm, vaddr, ptep); @@ -550,7 +578,6 @@ static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp, * This entry points to next level page table page. * Hence this must not qualify as pmd_bad(). */ - pmd_clear(pmdp); pmd_populate(mm, pmdp, pgtable); pmd = READ_ONCE(*pmdp); WARN_ON(pmd_bad(pmd)); @@ -784,57 +811,8 @@ static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) WARN_ON(!pte_huge(pte_mkhuge(pte))); #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ } - -static void __init hugetlb_advanced_tests(struct mm_struct *mm, - struct vm_area_struct *vma, - pte_t *ptep, unsigned long pfn, - unsigned long vaddr, pgprot_t prot) -{ - struct page *page = pfn_to_page(pfn); - pte_t pte = ptep_get(ptep); - unsigned long paddr = __pfn_to_phys(pfn) & PMD_MASK; - - pr_debug("Validating HugeTLB advanced\n"); - pte = pte_mkhuge(mk_pte(pfn_to_page(PHYS_PFN(paddr)), prot)); - set_huge_pte_at(mm, vaddr, ptep, pte); - barrier(); - WARN_ON(!pte_same(pte, huge_ptep_get(ptep))); - huge_pte_clear(mm, vaddr, ptep, PMD_SIZE); - pte = huge_ptep_get(ptep); - WARN_ON(!huge_pte_none(pte)); - - pte = mk_huge_pte(page, prot); - set_huge_pte_at(mm, vaddr, ptep, pte); - barrier(); - huge_ptep_set_wrprotect(mm, vaddr, ptep); - pte = huge_ptep_get(ptep); - WARN_ON(huge_pte_write(pte)); - - pte = mk_huge_pte(page, prot); - set_huge_pte_at(mm, vaddr, ptep, pte); - barrier(); - huge_ptep_get_and_clear(mm, vaddr, ptep); - pte = huge_ptep_get(ptep); - WARN_ON(!huge_pte_none(pte)); - - pte = mk_huge_pte(page, prot); - pte = huge_pte_wrprotect(pte); - set_huge_pte_at(mm, vaddr, ptep, pte); - barrier(); - pte = huge_pte_mkwrite(pte); - pte = huge_pte_mkdirty(pte); - huge_ptep_set_access_flags(vma, vaddr, ptep, pte, 1); - pte = huge_ptep_get(ptep); - WARN_ON(!(huge_pte_write(pte) && huge_pte_dirty(pte))); -} #else /* !CONFIG_HUGETLB_PAGE */ static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { } -static void __init hugetlb_advanced_tests(struct mm_struct *mm, - struct vm_area_struct *vma, - pte_t *ptep, unsigned long pfn, - unsigned long vaddr, pgprot_t prot) -{ -} #endif /* CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -965,7 +943,13 @@ static int __init debug_vm_pgtable(void) p4dp = p4d_alloc(mm, pgdp, vaddr); pudp = pud_alloc(mm, p4dp, vaddr); pmdp = pmd_alloc(mm, pudp, vaddr); - ptep = pte_alloc_map_lock(mm, pmdp, vaddr, &ptl); + /* + * Allocate pgtable_t + */ + if (pte_alloc(mm, pmdp)) { + pr_err("pgtable allocation failed\n"); + return 1; + } /* * Save all the page table page addresses as the page table @@ -985,32 +969,11 @@ static int __init debug_vm_pgtable(void) p4d_basic_tests(p4d_aligned, prot); pgd_basic_tests(pgd_aligned, prot); - pte_clear_tests(mm, ptep, vaddr); - pmd_clear_tests(mm, pmdp); - pud_clear_tests(mm, pudp); - p4d_clear_tests(mm, p4dp); - pgd_clear_tests(mm, pgdp); - - pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot); - pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot); - pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot); - hugetlb_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot); - pmd_leaf_tests(pmd_aligned, prot); pud_leaf_tests(pud_aligned, prot); - pmd_huge_tests(pmdp, pmd_aligned, prot); - pud_huge_tests(pudp, pud_aligned, prot); - - pte_savedwrite_tests(pte_aligned, prot); - pmd_savedwrite_tests(pmd_aligned, prot); - - pte_unmap_unlock(ptep, ptl); - - pmd_populate_tests(mm, pmdp, saved_ptep); - pud_populate_tests(mm, pudp, saved_pmdp); - p4d_populate_tests(mm, p4dp, saved_pudp); - pgd_populate_tests(mm, pgdp, saved_p4dp); + pte_savedwrite_tests(pte_aligned, protnone); + pmd_savedwrite_tests(pmd_aligned, protnone); pte_special_tests(pte_aligned, prot); pte_protnone_tests(pte_aligned, protnone); @@ -1029,11 +992,43 @@ static int __init debug_vm_pgtable(void) pmd_swap_tests(pmd_aligned, prot); swap_migration_tests(); - hugetlb_basic_tests(pte_aligned, prot); pmd_thp_tests(pmd_aligned, prot); pud_thp_tests(pud_aligned, prot); + hugetlb_basic_tests(pte_aligned, prot); + + /* + * Page table modifying tests. They need to hold + * proper page table lock. + */ + + ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl); + pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot); + pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot); + pte_unmap_unlock(ptep, ptl); + + ptl = pmd_lock(mm, pmdp); + pmd_clear_tests(mm, pmdp); + pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep); + pmd_huge_tests(pmdp, pmd_aligned, prot); + pmd_populate_tests(mm, pmdp, saved_ptep); + spin_unlock(ptl); + + ptl = pud_lock(mm, pudp); + pud_clear_tests(mm, pudp); + pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot); + pud_huge_tests(pudp, pud_aligned, prot); + pud_populate_tests(mm, pudp, saved_pmdp); + spin_unlock(ptl); + + spin_lock(&mm->page_table_lock); + p4d_clear_tests(mm, p4dp); + pgd_clear_tests(mm, pgdp); + p4d_populate_tests(mm, p4dp, saved_pudp); + pgd_populate_tests(mm, pgdp, saved_p4dp); + spin_unlock(&mm->page_table_lock); + p4d_free(mm, saved_p4dp); pud_free(mm, saved_pudp); pmd_free(mm, saved_pmdp); diff --git a/mm/filemap.c b/mm/filemap.c index e3b8987153e6..1a6beaf69f49 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -249,7 +249,7 @@ static void page_cache_free_page(struct address_space *mapping, freepage(page); if (PageTransHuge(page) && !PageHuge(page)) { - page_ref_sub(page, HPAGE_PMD_NR); + page_ref_sub(page, thp_nr_pages(page)); VM_BUG_ON_PAGE(page_count(page) <= 0, page); } else { put_page(page); @@ -829,13 +829,12 @@ EXPORT_SYMBOL_GPL(replace_page_cache_page); noinline int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t offset, gfp_t gfp_mask, + pgoff_t offset, gfp_t gfp, void **shadowp) { XA_STATE(xas, &mapping->i_pages, offset); int huge = PageHuge(page); int error; - void *old; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageSwapBacked(page), page); @@ -846,25 +845,46 @@ noinline int __add_to_page_cache_locked(struct page *page, page->index = offset; if (!huge) { - error = mem_cgroup_charge(page, current->mm, gfp_mask); + error = mem_cgroup_charge(page, current->mm, gfp); if (error) goto error; } + gfp &= GFP_RECLAIM_MASK; + do { + unsigned int order = xa_get_order(xas.xa, xas.xa_index); + void *entry, *old = NULL; + + if (order > thp_order(page)) + xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), + order, gfp); xas_lock_irq(&xas); - old = xas_load(&xas); - if (old && !xa_is_value(old)) - xas_set_err(&xas, -EEXIST); + xas_for_each_conflict(&xas, entry) { + old = entry; + if (!xa_is_value(entry)) { + xas_set_err(&xas, -EEXIST); + goto unlock; + } + } + + if (old) { + if (shadowp) + *shadowp = old; + /* entry may have been split before we acquired lock */ + order = xa_get_order(xas.xa, xas.xa_index); + if (order > thp_order(page)) { + xas_split(&xas, old, order); + xas_reset(&xas); + } + } + xas_store(&xas, page); if (xas_error(&xas)) goto unlock; - if (xa_is_value(old)) { + if (old) mapping->nrexceptional--; - if (shadowp) - *shadowp = old; - } mapping->nrpages++; /* hugetlb pages do not participate in page cache accounting */ @@ -872,7 +892,7 @@ noinline int __add_to_page_cache_locked(struct page *page, __inc_lruvec_page_state(page, NR_FILE_PAGES); unlock: xas_unlock_irq(&xas); - } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK)); + } while (xas_nomem(&xas, gfp)); if (xas_error(&xas)) { error = xas_error(&xas); @@ -1425,7 +1445,7 @@ static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem * unlock_page - unlock a locked page * @page: the page * - * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). + * Unlocks the page and wakes up sleepers in wait_on_page_locked(). * Also wakes sleepers in wait_on_page_writeback() because the wakeup * mechanism between PageLocked pages and PageWriteback pages is shared. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. @@ -2568,8 +2588,8 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) struct file *file = vmf->vma->vm_file; struct file_ra_state *ra = &file->f_ra; struct address_space *mapping = file->f_mapping; + DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff); struct file *fpin = NULL; - pgoff_t offset = vmf->pgoff; unsigned int mmap_miss; /* If we don't want any read-ahead, don't bother */ @@ -2580,8 +2600,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) if (vmf->vma->vm_flags & VM_SEQ_READ) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); - page_cache_sync_readahead(mapping, ra, file, offset, - ra->ra_pages); + page_cache_sync_ra(&ractl, ra, ra->ra_pages); return fpin; } @@ -2601,10 +2620,11 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) * mmap read-around */ fpin = maybe_unlock_mmap_for_io(vmf, fpin); - ra->start = max_t(long, 0, offset - ra->ra_pages / 2); + ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); ra->size = ra->ra_pages; ra->async_size = ra->ra_pages / 4; - ra_submit(ra, mapping, file); + ractl._index = ra->start; + do_page_cache_ra(&ractl, ra->size, ra->async_size); return fpin; } @@ -2984,7 +3004,7 @@ filler: goto out; /* - * Page is not up to date and may be locked due one of the following + * Page is not up to date and may be locked due to one of the following * case a: Page is being filled and the page lock is held * case b: Read/write error clearing the page uptodate status * case c: Truncation in progress (page locked) @@ -1490,35 +1490,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) mmap_read_unlock(mm); return ret; /* 0 or negative error code */ } - -/** - * get_dump_page() - pin user page in memory while writing it to core dump - * @addr: user address - * - * Returns struct page pointer of user page pinned for dump, - * to be freed afterwards by put_page(). - * - * Returns NULL on any kind of failure - a hole must then be inserted into - * the corefile, to preserve alignment with its headers; and also returns - * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - - * allowing a hole to be left in the corefile to save diskspace. - * - * Called without mmap_lock, but after all other threads have been killed. - */ -#ifdef CONFIG_ELF_CORE -struct page *get_dump_page(unsigned long addr) -{ - struct vm_area_struct *vma; - struct page *page; - - if (__get_user_pages(current->mm, addr, 1, - FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, - NULL) < 1) - return NULL; - flush_cache_page(vma, addr, page_to_pfn(page)); - return page; -} -#endif /* CONFIG_ELF_CORE */ #else /* CONFIG_MMU */ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, @@ -1564,6 +1535,38 @@ finish_or_fault: } #endif /* !CONFIG_MMU */ +/** + * get_dump_page() - pin user page in memory while writing it to core dump + * @addr: user address + * + * Returns struct page pointer of user page pinned for dump, + * to be freed afterwards by put_page(). + * + * Returns NULL on any kind of failure - a hole must then be inserted into + * the corefile, to preserve alignment with its headers; and also returns + * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - + * allowing a hole to be left in the corefile to save diskspace. + * + * Called without mmap_lock (takes and releases the mmap_lock by itself). + */ +#ifdef CONFIG_ELF_CORE +struct page *get_dump_page(unsigned long addr) +{ + struct mm_struct *mm = current->mm; + struct page *page; + int locked = 1; + int ret; + + if (mmap_read_lock_killable(mm)) + return NULL; + ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked, + FOLL_FORCE | FOLL_DUMP | FOLL_GET); + if (locked) + mmap_read_unlock(mm); + return (ret == 1) ? page : NULL; +} +#endif /* CONFIG_ELF_CORE */ + #if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA) static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages) { diff --git a/mm/highmem.c b/mm/highmem.c index 64d8dea47dd1..1352a27951e3 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -369,7 +369,7 @@ void kunmap_high(struct page *page) } EXPORT_SYMBOL(kunmap_high); -#endif +#endif /* CONFIG_HIGHMEM */ #if defined(HASHED_PAGE_VIRTUAL) @@ -481,4 +481,4 @@ void __init page_address_init(void) } } -#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ +#endif /* defined(HASHED_PAGE_VIRTUAL) */ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 65c289c13b58..9474dbc150ed 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2335,13 +2335,13 @@ static void unmap_page(struct page *page) VM_BUG_ON_PAGE(!unmap_success, page); } -static void remap_page(struct page *page) +static void remap_page(struct page *page, unsigned int nr |
