diff options
Diffstat (limited to 'mm')
69 files changed, 2933 insertions, 1647 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 02d44e3420f5..ded98fb859ab 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -19,7 +19,7 @@ choice config FLATMEM_MANUAL bool "Flat Memory" - depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE + depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE help This option is best suited for non-NUMA systems with flat address space. The FLATMEM is the most efficient @@ -32,21 +32,6 @@ config FLATMEM_MANUAL If unsure, choose this option (Flat Memory) over any other. -config DISCONTIGMEM_MANUAL - bool "Discontiguous Memory" - depends on ARCH_DISCONTIGMEM_ENABLE - help - This option provides enhanced support for discontiguous - memory systems, over FLATMEM. These systems have holes - in their physical address spaces, and this option provides - more efficient handling of these holes. - - Although "Discontiguous Memory" is still used by several - architectures, it is considered deprecated in favor of - "Sparse Memory". - - If unsure, choose "Sparse Memory" over this option. - config SPARSEMEM_MANUAL bool "Sparse Memory" depends on ARCH_SPARSEMEM_ENABLE @@ -62,30 +47,13 @@ config SPARSEMEM_MANUAL endchoice -config DISCONTIGMEM - def_bool y - depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL - config SPARSEMEM def_bool y depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL config FLATMEM def_bool y - depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL - -config FLAT_NODE_MEM_MAP - def_bool y - depends on !SPARSEMEM - -# -# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's -# to represent different areas of memory. This variable allows -# those dependencies to exist individually. -# -config NEED_MULTIPLE_NODES - def_bool y - depends on DISCONTIGMEM || NUMA + depends on !SPARSEMEM || FLATMEM_MANUAL # # SPARSEMEM_EXTREME (which is the default) does some bootmem diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 576220acd686..271f2ca862c8 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -371,12 +371,16 @@ static void wb_exit(struct bdi_writeback *wb) #include <linux/memcontrol.h> /* - * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list. - * bdi->cgwb_tree is also RCU protected. + * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and + * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. */ static DEFINE_SPINLOCK(cgwb_lock); static struct workqueue_struct *cgwb_release_wq; +static LIST_HEAD(offline_cgwbs); +static void cleanup_offline_cgwbs_workfn(struct work_struct *work); +static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn); + static void cgwb_release_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(work, struct bdi_writeback, @@ -395,7 +399,13 @@ static void cgwb_release_workfn(struct work_struct *work) fprop_local_destroy_percpu(&wb->memcg_completions); percpu_ref_exit(&wb->refcnt); + + spin_lock_irq(&cgwb_lock); + list_del(&wb->offline_node); + spin_unlock_irq(&cgwb_lock); + wb_exit(wb); + WARN_ON_ONCE(!list_empty(&wb->b_attached)); kfree_rcu(wb, rcu); } @@ -413,6 +423,7 @@ static void cgwb_kill(struct bdi_writeback *wb) WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); list_del(&wb->memcg_node); list_del(&wb->blkcg_node); + list_add(&wb->offline_node, &offline_cgwbs); percpu_ref_kill(&wb->refcnt); } @@ -472,6 +483,7 @@ static int cgwb_create(struct backing_dev_info *bdi, wb->memcg_css = memcg_css; wb->blkcg_css = blkcg_css; + INIT_LIST_HEAD(&wb->b_attached); INIT_WORK(&wb->release_work, cgwb_release_workfn); set_bit(WB_registered, &wb->state); @@ -633,6 +645,54 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi) mutex_unlock(&bdi->cgwb_release_mutex); } +/* + * cleanup_offline_cgwbs_workfn - try to release dying cgwbs + * + * Try to release dying cgwbs by switching attached inodes to the nearest + * living ancestor's writeback. Processed wbs are placed at the end + * of the list to guarantee the forward progress. + */ +static void cleanup_offline_cgwbs_workfn(struct work_struct *work) +{ + struct bdi_writeback *wb; + LIST_HEAD(processed); + + spin_lock_irq(&cgwb_lock); + + while (!list_empty(&offline_cgwbs)) { + wb = list_first_entry(&offline_cgwbs, struct bdi_writeback, + offline_node); + list_move(&wb->offline_node, &processed); + + /* + * If wb is dirty, cleaning up the writeback by switching + * attached inodes will result in an effective removal of any + * bandwidth restrictions, which isn't the goal. Instead, + * it can be postponed until the next time, when all io + * will be likely completed. If in the meantime some inodes + * will get re-dirtied, they should be eventually switched to + * a new cgwb. + */ + if (wb_has_dirty_io(wb)) + continue; + + if (!wb_tryget(wb)) + continue; + + spin_unlock_irq(&cgwb_lock); + while (cleanup_offline_cgwb(wb)) + cond_resched(); + spin_lock_irq(&cgwb_lock); + + wb_put(wb); + } + + if (!list_empty(&processed)) + list_splice_tail(&processed, &offline_cgwbs); + + spin_unlock_irq(&cgwb_lock); +} + /** * wb_memcg_offline - kill all wb's associated with a memcg being offlined * @memcg: memcg being offlined @@ -649,6 +709,8 @@ void wb_memcg_offline(struct mem_cgroup *memcg) cgwb_kill(wb); memcg_cgwb_list->next = NULL; /* prevent new wb's */ spin_unlock_irq(&cgwb_lock); + + queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work); } /** diff --git a/mm/compaction.c b/mm/compaction.c index 84fde270ae74..3a509fbf2bea 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1028,7 +1028,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (!TestClearPageLRU(page)) goto isolate_fail_put; - lruvec = mem_cgroup_page_lruvec(page, pgdat); + lruvec = mem_cgroup_page_lruvec(page); /* If we already hold the lock, we can skip some rechecking */ if (lruvec != locked) { @@ -1955,7 +1955,7 @@ static inline bool is_via_compact_memory(int order) static bool kswapd_is_running(pg_data_t *pgdat) { - return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING); + return pgdat->kswapd && task_is_running(pgdat->kswapd); } /* diff --git a/mm/debug.c b/mm/debug.c index 0bdda8407f71..e73fe0a8ec3d 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -42,11 +42,10 @@ const struct trace_print_flags vmaflag_names[] = { {0, NULL} }; -void __dump_page(struct page *page, const char *reason) +static void __dump_page(struct page *page) { struct page *head = compound_head(page); struct address_space *mapping; - bool page_poisoned = PagePoisoned(page); bool compound = PageCompound(page); /* * Accessing the pageblock without the zone lock. It could change to @@ -58,16 +57,6 @@ void __dump_page(struct page *page, const char *reason) int mapcount; char *type = ""; - /* - * If struct page is poisoned don't access Page*() functions as that - * leads to recursive loop. Page*() check for poisoned pages, and calls - * dump_page() when detected. - */ - if (page_poisoned) { - pr_warn("page:%px is uninitialized and poisoned", page); - goto hex_only; - } - if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) { /* * Corrupt page, so we cannot call page_mapping. Instead, do a @@ -173,8 +162,6 @@ out_mapping: pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags, page_cma ? " CMA" : ""); - -hex_only: print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); @@ -182,14 +169,16 @@ hex_only: print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), head, sizeof(struct page), false); - - if (reason) - pr_warn("page dumped because: %s\n", reason); } void dump_page(struct page *page, const char *reason) { - __dump_page(page, reason); + if (PagePoisoned(page)) + pr_warn("page:%p is uninitialized and poisoned", page); + else + __dump_page(page); + if (reason) + pr_warn("page dumped because: %s\n", reason); dump_page_owner(page); } EXPORT_SYMBOL(dump_page); diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 05efe98a9ac2..92bfc37300df 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -146,13 +146,14 @@ static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot) static void __init pmd_basic_tests(unsigned long pfn, int idx) { pgprot_t prot = protection_map[idx]; - pmd_t pmd = pfn_pmd(pfn, prot); unsigned long val = idx, *ptr = &val; + pmd_t pmd; if (!has_transparent_hugepage()) return; pr_debug("Validating PMD basic (%pGv)\n", ptr); + pmd = pfn_pmd(pfn, prot); /* * This test needs to be executed after the given page table entry @@ -185,14 +186,14 @@ static void __init pmd_advanced_tests(struct mm_struct *mm, unsigned long pfn, unsigned long vaddr, pgprot_t prot, pgtable_t pgtable) { - pmd_t pmd = pfn_pmd(pfn, prot); + pmd_t pmd; if (!has_transparent_hugepage()) return; pr_debug("Validating PMD advanced\n"); /* Align the address wrt HPAGE_PMD_SIZE */ - vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE; + vaddr &= HPAGE_PMD_MASK; pgtable_trans_huge_deposit(mm, pmdp, pgtable); @@ -232,9 +233,14 @@ static void __init pmd_advanced_tests(struct mm_struct *mm, static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { - pmd_t pmd = pfn_pmd(pfn, prot); + pmd_t pmd; + + if (!has_transparent_hugepage()) + return; pr_debug("Validating PMD leaf\n"); + pmd = pfn_pmd(pfn, prot); + /* * PMD based THP is a leaf entry. */ @@ -267,12 +273,16 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { - pmd_t pmd = pfn_pmd(pfn, prot); + pmd_t pmd; if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) return; + if (!has_transparent_hugepage()) + return; + pr_debug("Validating PMD saved write\n"); + pmd = pfn_pmd(pfn, prot); WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd)))); WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd)))); } @@ -281,13 +291,14 @@ static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { pgprot_t prot = protection_map[idx]; - pud_t pud = pfn_pud(pfn, prot); unsigned long val = idx, *ptr = &val; + pud_t pud; if (!has_transparent_hugepage()) return; pr_debug("Validating PUD basic (%pGv)\n", ptr); + pud = pfn_pud(pfn, prot); /* * This test needs to be executed after the given page table entry @@ -323,15 +334,16 @@ static void __init pud_advanced_tests(struct mm_struct *mm, unsigned long pfn, unsigned long vaddr, pgprot_t prot) { - pud_t pud = pfn_pud(pfn, prot); + pud_t pud; if (!has_transparent_hugepage()) return; pr_debug("Validating PUD advanced\n"); /* Align the address wrt HPAGE_PUD_SIZE */ - vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE; + vaddr &= HPAGE_PUD_MASK; + pud = pfn_pud(pfn, prot); set_pud_at(mm, vaddr, pudp, pud); pudp_set_wrprotect(mm, vaddr, pudp); pud = READ_ONCE(*pudp); @@ -370,9 +382,13 @@ static void __init pud_advanced_tests(struct mm_struct *mm, static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { - pud_t pud = pfn_pud(pfn, prot); + pud_t pud; + + if (!has_transparent_hugepage()) + return; pr_debug("Validating PUD leaf\n"); + pud = pfn_pud(pfn, prot); /* * PUD based THP is a leaf entry. */ @@ -654,12 +670,16 @@ static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot) #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { - pmd_t pmd = pmd_mkhuge(pfn_pmd(pfn, prot)); + pmd_t pmd; if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) return; + if (!has_transparent_hugepage()) + return; + pr_debug("Validating PMD protnone\n"); + pmd = pmd_mkhuge(pfn_pmd(pfn, prot)); WARN_ON(!pmd_protnone(pmd)); WARN_ON(!pmd_present(pmd)); } @@ -679,18 +699,26 @@ static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { - pmd_t pmd = pfn_pmd(pfn, prot); + pmd_t pmd; + + if (!has_transparent_hugepage()) + return; pr_debug("Validating PMD devmap\n"); + pmd = pfn_pmd(pfn, prot); WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd))); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { - pud_t pud = pfn_pud(pfn, prot); + pud_t pud; + + if (!has_transparent_hugepage()) + return; pr_debug("Validating PUD devmap\n"); + pud = pfn_pud(pfn, prot); WARN_ON(!pud_devmap(pud_mkdevmap(pud))); } #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ @@ -733,25 +761,33 @@ static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot) #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { - pmd_t pmd = pfn_pmd(pfn, prot); + pmd_t pmd; if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) return; + if (!has_transparent_hugepage()) + return; + pr_debug("Validating PMD soft dirty\n"); + pmd = pfn_pmd(pfn, prot); WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd))); WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd))); } static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { - pmd_t pmd = pfn_pmd(pfn, prot); + pmd_t pmd; if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) || !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION)) return; + if (!has_transparent_hugepage()) + return; + pr_debug("Validating PMD swap soft dirty\n"); + pmd = pfn_pmd(pfn, prot); WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd))); WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd))); } @@ -780,6 +816,9 @@ static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) swp_entry_t swp; pmd_t pmd; + if (!has_transparent_hugepage()) + return; + pr_debug("Validating PMD swap\n"); pmd = pfn_pmd(pfn, prot); swp = __pmd_to_swp_entry(pmd); diff --git a/mm/dmapool.c b/mm/dmapool.c index 16483f86360e..64b537b3ccb0 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -62,8 +62,7 @@ struct dma_page { /* cacheable header for 'allocation' bytes */ static DEFINE_MUTEX(pools_lock); static DEFINE_MUTEX(pools_reg_lock); -static ssize_t -show_pools(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned temp; unsigned size; @@ -103,7 +102,7 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf) return PAGE_SIZE - size; } -static DEVICE_ATTR(pools, 0444, show_pools, NULL); +static DEVICE_ATTR_RO(pools); /** * dma_pool_create - Creates a pool of consistent memory blocks, for dma. diff --git a/mm/filemap.c b/mm/filemap.c index 66f7e9fdfbc4..ac82a93d4f38 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -872,7 +872,7 @@ noinline int __add_to_page_cache_locked(struct page *page, page->index = offset; if (!huge) { - error = mem_cgroup_charge(page, current->mm, gfp); + error = mem_cgroup_charge(page, NULL, gfp); if (error) goto error; charged = true; @@ -44,6 +44,23 @@ static void hpage_pincount_sub(struct page *page, int refs) atomic_sub(refs, compound_pincount_ptr(page)); } +/* Equivalent to calling put_page() @refs times. */ +static void put_page_refs(struct page *page, int refs) +{ +#ifdef CONFIG_DEBUG_VM + if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page)) + return; +#endif + + /* + * Calling put_pag |