diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/balloon_compaction.c | 10 | ||||
| -rw-r--r-- | mm/compaction.c | 34 | ||||
| -rw-r--r-- | mm/filemap.c | 143 | ||||
| -rw-r--r-- | mm/folio-compat.c | 22 | ||||
| -rw-r--r-- | mm/gup.c | 6 | ||||
| -rw-r--r-- | mm/hmm.c | 19 | ||||
| -rw-r--r-- | mm/huge_memory.c | 11 | ||||
| -rw-r--r-- | mm/hugetlb.c | 24 | ||||
| -rw-r--r-- | mm/internal.h | 2 | ||||
| -rw-r--r-- | mm/ioremap.c | 26 | ||||
| -rw-r--r-- | mm/kasan/common.c | 3 | ||||
| -rw-r--r-- | mm/kfence/core.c | 18 | ||||
| -rw-r--r-- | mm/ksm.c | 2 | ||||
| -rw-r--r-- | mm/memblock.c | 7 | ||||
| -rw-r--r-- | mm/memory-failure.c | 2 | ||||
| -rw-r--r-- | mm/memory.c | 9 | ||||
| -rw-r--r-- | mm/memremap.c | 6 | ||||
| -rw-r--r-- | mm/migrate.c | 238 | ||||
| -rw-r--r-- | mm/migrate_device.c | 3 | ||||
| -rw-r--r-- | mm/page-writeback.c | 89 | ||||
| -rw-r--r-- | mm/page_alloc.c | 33 | ||||
| -rw-r--r-- | mm/secretmem.c | 55 | ||||
| -rw-r--r-- | mm/shmem.c | 26 | ||||
| -rw-r--r-- | mm/slab.c | 20 | ||||
| -rw-r--r-- | mm/slab.h | 39 | ||||
| -rw-r--r-- | mm/slab_common.c | 36 | ||||
| -rw-r--r-- | mm/slob.c | 33 | ||||
| -rw-r--r-- | mm/slub.c | 98 | ||||
| -rw-r--r-- | mm/sparse-vmemmap.c | 2 | ||||
| -rw-r--r-- | mm/swap.c | 29 | ||||
| -rw-r--r-- | mm/swap_slots.c | 2 | ||||
| -rw-r--r-- | mm/swap_state.c | 4 | ||||
| -rw-r--r-- | mm/truncate.c | 2 | ||||
| -rw-r--r-- | mm/usercopy.c | 2 | ||||
| -rw-r--r-- | mm/util.c | 4 | ||||
| -rw-r--r-- | mm/vmscan.c | 56 | ||||
| -rw-r--r-- | mm/z3fold.c | 84 | ||||
| -rw-r--r-- | mm/zsmalloc.c | 102 |
38 files changed, 584 insertions, 717 deletions
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 4b8eab4b3f45..22c96fed70b5 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -228,10 +228,8 @@ static void balloon_page_putback(struct page *page) spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); } - /* move_to_new_page() counterpart for a ballooned page */ -static int balloon_page_migrate(struct address_space *mapping, - struct page *newpage, struct page *page, +static int balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode) { struct balloon_dev_info *balloon = balloon_page_device(page); @@ -250,11 +248,11 @@ static int balloon_page_migrate(struct address_space *mapping, return balloon->migratepage(balloon, newpage, page, mode); } -const struct address_space_operations balloon_aops = { - .migratepage = balloon_page_migrate, +const struct movable_operations balloon_mops = { + .migrate_page = balloon_page_migrate, .isolate_page = balloon_page_isolate, .putback_page = balloon_page_putback, }; -EXPORT_SYMBOL_GPL(balloon_aops); +EXPORT_SYMBOL_GPL(balloon_mops); #endif /* CONFIG_BALLOON_COMPACTION */ diff --git a/mm/compaction.c b/mm/compaction.c index 1f89b969c12b..a2c53fcf933e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -110,28 +110,27 @@ static void split_map_pages(struct list_head *list) } #ifdef CONFIG_COMPACTION - -int PageMovable(struct page *page) +bool PageMovable(struct page *page) { - struct address_space *mapping; + const struct movable_operations *mops; VM_BUG_ON_PAGE(!PageLocked(page), page); if (!__PageMovable(page)) - return 0; + return false; - mapping = page_mapping(page); - if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) - return 1; + mops = page_movable_ops(page); + if (mops) + return true; - return 0; + return false; } EXPORT_SYMBOL(PageMovable); -void __SetPageMovable(struct page *page, struct address_space *mapping) +void __SetPageMovable(struct page *page, const struct movable_operations *mops) { VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); - page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); + VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page); + page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE); } EXPORT_SYMBOL(__SetPageMovable); @@ -139,12 +138,10 @@ void __ClearPageMovable(struct page *page) { VM_BUG_ON_PAGE(!PageMovable(page), page); /* - * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE - * flag so that VM can catch up released page by driver after isolation. - * With it, VM migration doesn't try to put it back. + * This page still has the type of a movable page, but it's + * actually not movable any more. */ - page->mapping = (void *)((unsigned long)page->mapping & - PAGE_MAPPING_MOVABLE); + page->mapping = (void *)PAGE_MAPPING_MOVABLE; } EXPORT_SYMBOL(__ClearPageMovable); @@ -1034,7 +1031,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* * Only pages without mappings or that have a - * ->migratepage callback are possible to migrate + * ->migrate_folio callback are possible to migrate * without blocking. However, we can be racing with * truncation so it's necessary to lock the page * to stabilise the mapping as truncation holds @@ -1045,7 +1042,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, goto isolate_fail_put; mapping = page_mapping(page); - migrate_dirty = !mapping || mapping->a_ops->migratepage; + migrate_dirty = !mapping || + mapping->a_ops->migrate_folio; unlock_page(page); if (!migrate_dirty) goto isolate_fail_put; diff --git a/mm/filemap.c b/mm/filemap.c index ffdfbc8b0e3c..0dec96ea6688 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -929,26 +929,6 @@ error: } ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO); -/** - * add_to_page_cache_locked - add a locked page to the pagecache - * @page: page to add - * @mapping: the page's address_space - * @offset: page index - * @gfp_mask: page allocation mode - * - * This function is used to add a page to the pagecache. It must be locked. - * This function does not add the page to the LRU. The caller must do that. - * - * Return: %0 on success, negative error code otherwise. - */ -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t offset, gfp_t gfp_mask) -{ - return __filemap_add_folio(mapping, page_folio(page), offset, - gfp_mask, NULL); -} -EXPORT_SYMBOL(add_to_page_cache_locked); - int filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp) { @@ -1988,6 +1968,10 @@ no_page: gfp |= __GFP_WRITE; if (fgp_flags & FGP_NOFS) gfp &= ~__GFP_FS; + if (fgp_flags & FGP_NOWAIT) { + gfp &= ~GFP_KERNEL; + gfp |= GFP_NOWAIT | __GFP_NOWARN; + } folio = filemap_alloc_folio(gfp, 0); if (!folio) @@ -2147,65 +2131,46 @@ put: return folio_batch_count(fbatch); } -static inline -bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max) -{ - if (!folio_test_large(folio) || folio_test_hugetlb(folio)) - return false; - if (index >= max) - return false; - return index < folio->index + folio_nr_pages(folio) - 1; -} - /** - * find_get_pages_range - gang pagecache lookup + * filemap_get_folios - Get a batch of folios * @mapping: The address_space to search * @start: The starting page index * @end: The final page index (inclusive) - * @nr_pages: The maximum number of pages - * @pages: Where the resulting pages are placed + * @fbatch: The batch to fill. * - * find_get_pages_range() will search for and return a group of up to @nr_pages - * pages in the mapping starting at index @start and up to index @end - * (inclusive). The pages are placed at @pages. find_get_pages_range() takes - * a reference against the returned pages. + * Search for and return a batch of folios in the mapping starting at + * index @start and up to index @end (inclusive). The folios are returned + * in @fbatch with an elevated reference count. * - * The search returns a group of mapping-contiguous pages with ascending - * indexes. There may be holes in the indices due to not-present pages. - * We also update @start to index the next page for the traversal. + * The first folio may start before @start; if it does, it will contain + * @start. The final folio may extend beyond @end; if it does, it will + * contain @end. The folios have ascending indices. There may be gaps + * between the folios if there are indices which have no folio in the + * page cache. If folios are added to or removed from the page cache + * while this is running, they may or may not be found by this call. * - * Return: the number of pages which were found. If this number is - * smaller than @nr_pages, the end of specified range has been - * reached. + * Return: The number of folios which were found. + * We also update @start to index the next folio for the traversal. */ -unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, - pgoff_t end, unsigned int nr_pages, - struct page **pages) +unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, + pgoff_t end, struct folio_batch *fbatch) { XA_STATE(xas, &mapping->i_pages, *start); struct folio *folio; - unsigned ret = 0; - - if (unlikely(!nr_pages)) - return 0; rcu_read_lock(); - while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { + while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { /* Skip over shadow, swap and DAX entries */ if (xa_is_value(folio)) continue; + if (!folio_batch_add(fbatch, folio)) { + unsigned long nr = folio_nr_pages(folio); -again: - pages[ret] = folio_file_page(folio, xas.xa_index); - if (++ret == nr_pages) { - *start = xas.xa_index + 1; + if (folio_test_hugetlb(folio)) + nr = 1; + *start = folio->index + nr; goto out; } - if (folio_more_pages(folio, xas.xa_index, end)) { - xas.xa_index++; - folio_ref_inc(folio); - goto again; - } } /* @@ -2221,7 +2186,18 @@ again: out: rcu_read_unlock(); - return ret; + return folio_batch_count(fbatch); +} +EXPORT_SYMBOL(filemap_get_folios); + +static inline +bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max) +{ + if (!folio_test_large(folio) || folio_test_hugetlb(folio)) + return false; + if (index >= max) + return false; + return index < folio->index + folio_nr_pages(folio) - 1; } /** @@ -2409,7 +2385,7 @@ retry: rcu_read_unlock(); } -static int filemap_read_folio(struct file *file, struct address_space *mapping, +static int filemap_read_folio(struct file *file, filler_t filler, struct folio *folio) { int error; @@ -2421,7 +2397,7 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping, */ folio_clear_error(folio); /* Start the actual read. The read will unlock the page. */ - error = mapping->a_ops->read_folio(file, folio); + error = filler(file, folio); if (error) return error; @@ -2430,7 +2406,8 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping, return error; if (folio_test_uptodate(folio)) return 0; - shrink_readahead_size_eio(&file->f_ra); + if (file) + shrink_readahead_size_eio(&file->f_ra); return -EIO; } @@ -2503,7 +2480,8 @@ static int filemap_update_page(struct kiocb *iocb, if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) goto unlock; - error = filemap_read_folio(iocb->ki_filp, mapping, folio); + error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, + folio); goto unlock_mapping; unlock: folio_unlock(folio); @@ -2546,7 +2524,7 @@ static int filemap_create_folio(struct file *file, if (error) goto error; - error = filemap_read_folio(file, mapping, folio); + error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); if (error) goto error; @@ -3230,7 +3208,7 @@ page_not_uptodate: * and we need to check for errors. */ fpin = maybe_unlock_mmap_for_io(vmf, fpin); - error = filemap_read_folio(file, mapping, folio); + error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); if (fpin) goto out_retry; folio_put(folio); @@ -3520,20 +3498,7 @@ repeat: return ERR_PTR(err); } -filler: - err = filler(file, folio); - if (err < 0) { - folio_put(folio); - return ERR_PTR(err); - } - - folio_wait_locked(folio); - if (!folio_test_uptodate(folio)) { - folio_put(folio); - return ERR_PTR(-EIO); - } - - goto out; + goto filler; } if (folio_test_uptodate(folio)) goto out; @@ -3556,14 +3521,14 @@ filler: goto out; } - /* - * A previous I/O error may have been due to temporary - * failures. - * Clear page error before actual read, PG_error will be - * set again if read page fails. - */ - folio_clear_error(folio); - goto filler; +filler: + err = filemap_read_folio(file, filler, folio); + if (err) { + folio_put(folio); + if (err == AOP_TRUNCATED_PAGE) + goto repeat; + return ERR_PTR(err); + } out: folio_mark_accessed(folio); diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 20bc15b57d93..458618c7302c 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -51,28 +51,6 @@ void mark_page_accessed(struct page *page) } EXPORT_SYMBOL(mark_page_accessed); -#ifdef CONFIG_MIGRATION -int migrate_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page, int extra_count) -{ - return folio_migrate_mapping(mapping, page_folio(newpage), - page_folio(page), extra_count); -} -EXPORT_SYMBOL(migrate_page_move_mapping); - -void migrate_page_states(struct page *newpage, struct page *page) -{ - folio_migrate_flags(page_folio(newpage), page_folio(page)); -} -EXPORT_SYMBOL(migrate_page_states); - -void migrate_page_copy(struct page *newpage, struct page *page) -{ - folio_migrate_copy(page_folio(newpage), page_folio(page)); -} -EXPORT_SYMBOL(migrate_page_copy); -#endif - bool set_page_writeback(struct page *page) { return folio_start_writeback(page_folio(page)); @@ -87,7 +87,8 @@ retry: * belongs to this folio. */ if (unlikely(page_folio(page) != folio)) { - folio_put_refs(folio, refs); + if (!put_devmap_managed_page_refs(&folio->page, refs)) + folio_put_refs(folio, refs); goto retry; } @@ -176,7 +177,8 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) refs *= GUP_PIN_COUNTING_BIAS; } - folio_put_refs(folio, refs); + if (!put_devmap_managed_page_refs(&folio->page, refs)) + folio_put_refs(folio, refs); } /** @@ -212,14 +212,6 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, unsigned long end, unsigned long hmm_pfns[], pmd_t pmd); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -static inline bool hmm_is_device_private_entry(struct hmm_range *range, - swp_entry_t entry) -{ - return is_device_private_entry(entry) && - pfn_swap_entry_to_page(entry)->pgmap->owner == - range->dev_private_owner; -} - static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) { @@ -252,10 +244,12 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, swp_entry_t entry = pte_to_swp_entry(pte); /* - * Never fault in device private pages, but just report - * the PFN even if not present. + * Don't fault in device private pages owned by the caller, + * just report the PFN. */ - if (hmm_is_device_private_entry(range, entry)) { + if (is_device_private_entry(entry) && + pfn_swap_entry_to_page(entry)->pgmap->owner == + range->dev_private_owner) { cpu_flags = HMM_PFN_VALID; if (is_writable_device_private_entry(entry)) cpu_flags |= HMM_PFN_WRITE; @@ -273,6 +267,9 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, if (!non_swap_entry(entry)) goto fault; + if (is_device_private_entry(entry)) + goto fault; + if (is_device_exclusive_entry(entry)) goto fault; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 834f288b3769..15965084816d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -18,6 +18,7 @@ #include <linux/shrinker.h> #include <linux/mm_inline.h> #include <linux/swapops.h> +#include <linux/backing-dev.h> #include <linux/dax.h> #include <linux/khugepaged.h> #include <linux/freezer.h> @@ -2440,11 +2441,15 @@ static void __split_huge_page(struct page *page, struct list_head *list, __split_huge_page_tail(head, i, lruvec, list); /* Some pages can be beyond EOF: drop them from page cache */ if (head[i].index >= end) { - ClearPageDirty(head + i); - __delete_from_page_cache(head + i, NULL); + struct folio *tail = page_folio(head + i); + if (shmem_mapping(head->mapping)) shmem_uncharge(head->mapping->host, 1); - put_page(head + i); + else if (folio_test_clear_dirty(tail)) + folio_account_cleaned(tail, + inode_to_wb(folio->mapping->host)); + __filemap_remove_folio(tail, NULL); + folio_put(tail); } else if (!PageAnon(page)) { __xa_store(&head->mapping->i_pages, head[i].index, head + i, 0); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a57e1be41401..aa39534898e0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4788,8 +4788,13 @@ again: * sharing with another vma. */ ; - } else if (unlikely(is_hugetlb_entry_migration(entry) || - is_hugetlb_entry_hwpoisoned(entry))) { + } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { + bool uffd_wp = huge_pte_uffd_wp(entry); + + if (!userfaultfd_wp(dst_vma) && uffd_wp) + entry = huge_pte_clear_uffd_wp(entry); + set_huge_pte_at(dst, addr, dst_pte, entry); + } else if (unlikely(is_hugetlb_entry_migration(entry))) { swp_entry_t swp_entry = pte_to_swp_entry(entry); bool uffd_wp = huge_pte_uffd_wp(entry); @@ -5414,19 +5419,25 @@ static bool hugetlbfs_pagecache_present(struct hstate *h, int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx) { + struct folio *folio = page_folio(page); struct inode *inode = mapping->host; struct hstate *h = hstate_inode(inode); - int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); + int err; - if (err) + __folio_set_locked(folio); + err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); + + if (unlikely(err)) { + __folio_clear_locked(folio); return err; + } ClearHPageRestoreReserve(page); /* - * set page dirty so that it will not be removed from cache/file + * mark folio dirty so that it will not be removed from cache/file * by non-hugetlbfs specific code paths. */ - set_page_dirty(page); + folio_mark_dirty(folio); spin_lock(&inode->i_lock); inode->i_blocks += blocks_per_huge_page(h); @@ -5947,6 +5958,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, page = alloc_huge_page(dst_vma, dst_addr, 0); if (IS_ERR(page)) { + put_page(*pagep); ret = -ENOMEM; *pagep = NULL; goto out; diff --git a/mm/internal.h b/mm/internal.h index c0f8fbe0445b..ddd2d6a46f1b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -861,4 +861,6 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags); DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); +extern bool mirrored_kernelcore; + #endif /* __MM_INTERNAL_H */ diff --git a/mm/ioremap.c b/mm/ioremap.c index 5fe598ecd9b7..8652426282cc 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -11,29 +11,35 @@ #include <linux/io.h> #include <linux/export.h> -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot) +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + unsigned long prot) { unsigned long offset, vaddr; phys_addr_t last_addr; struct vm_struct *area; /* Disallow wrap-around or zero size */ - last_addr = addr + size - 1; - if (!size || last_addr < addr) + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) return NULL; /* Page-align mappings */ - offset = addr & (~PAGE_MASK); - addr -= offset; + offset = phys_addr & (~PAGE_MASK); + phys_addr -= offset; size = PAGE_ALIGN(size + offset); + if (!ioremap_allowed(phys_addr, size, prot)) + return NULL; + area = get_vm_area_caller(size, VM_IOREMAP, __builtin_return_address(0)); if (!area) return NULL; vaddr = (unsigned long)area->addr; + area->phys_addr = phys_addr; - if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) { + if (ioremap_page_range(vaddr, vaddr + size, phys_addr, + __pgprot(prot))) { free_vm_area(area); return NULL; } @@ -44,6 +50,12 @@ EXPORT_SYMBOL(ioremap_prot); void iounmap(volatile void __iomem *addr) { - vunmap((void *)((unsigned long)addr & PAGE_MASK)); + void *vaddr = (void *)((unsigned long)addr & PAGE_MASK); + + if (!iounmap_allowed(vaddr)) + return; + + if (is_vmalloc_addr(vaddr)) + vunmap(vaddr); } EXPORT_SYMBOL(iounmap); diff --git a/mm/kasan/common.c b/mm/kasan/common.c index c40c0e7b3b5f..78be2beb7453 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -108,9 +108,10 @@ void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init) return; tag = kasan_random_tag(); + kasan_unpoison(set_tag(page_address(page), tag), + PAGE_SIZE << order, init); for (i = 0; i < (1 << order); i++) page_kasan_tag_set(page + i, tag); - kasan_unpoison(page_address(page), PAGE_SIZE << order, init); } void __kasan_poison_pages(struct page *page, unsigned int order, bool init) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 4b5e5a3d3a63..6aff49f6b79e 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -603,14 +603,6 @@ static unsigned long kfence_init_pool(void) addr += 2 * PAGE_SIZE; } - /* - * The pool is live and will never be deallocated from this point on. - * Remove the pool object from the kmemleak object tree, as it would - * otherwise overlap with allocations returned by kfence_alloc(), which - * are registered with kmemleak through the slab post-alloc hook. - */ - kmemleak_free(__kfence_pool); - return 0; } @@ -623,8 +615,16 @@ static bool __init kfence_init_pool_early(void) addr = kfence_init_pool(); - if (!addr) + if (!addr) { + /* + * The pool is live and will never be deallocated from this point on. + * Ignore the pool object from the kmemleak phys object tree, as it would + * otherwise overlap with allocations returned by kfence_alloc(), which + * are registered with kmemleak through the slab post-alloc hook. + */ + kmemleak_ignore_phys(__pa(__kfence_pool)); return true; + } /* * Only release unprotected pages, and do not try to go back and change @@ -712,7 +712,7 @@ again: * however, it might mean that the page is under page_ref_freeze(). * The __remove_mapping() case is easy, again the node is now stale; * the same is in reuse_ksm_page() case; but if page is swapcache - * in migrate_page_move_mapping(), it might still be our page, + * in folio_migrate_mapping(), it might still be our page, * in which case it's essential to keep the node. */ while (!get_page_unless_zero(page)) { diff --git a/mm/memblock.c b/mm/memblock.c index e4f03a6e8e56..a9f18b988b7f 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -327,7 +327,7 @@ again: NUMA_NO_NODE, flags); if (!ret && (flags & MEMBLOCK_MIRROR)) { - pr_warn("Could not allocate %pap bytes of mirrored memory\n", + pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n", &size); flags &= ~MEMBLOCK_MIRROR; goto again; @@ -924,6 +924,9 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) */ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) { + if (!mirrored_kernelcore) + return 0; + system_has_some_mirror = true; return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); @@ -1384,7 +1387,7 @@ again: if (flags & MEMBLOCK_MIRROR) { flags &= ~MEMBLOCK_MIRROR; - pr_warn("Could not allocate %pap bytes of mirrored memory\n", + pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n", &size); goto again; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index da39ec8afca8..b864c2eff641 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1940,7 +1940,7 @@ try_again: /* * Now take care of user space mappings. - * Abort on fail: __delete_from_page_cache() assumes unmapped page. + * Abort on fail: __filemap_remove_folio() assumes unmapped page. */ if (!hwpoison_user_mappings(p, pfn, flags, p)) { action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); diff --git a/mm/memory.c b/mm/memory.c index 4cf7d4b6c950..1c6027adc542 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3043,7 +3043,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf) pte_t entry; VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); - VM_BUG_ON(PageAnon(page) && !PageAnonExclusive(page)); + VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page)); /* * Clear the pages cpupid information as the existing @@ -4369,9 +4369,12 @@ vm_fault_t finish_fault(struct vm_fault *vmf) return VM_FAULT_OOM; } - /* See comment in handle_pte_fault() */ + /* + * See comment in handle_pte_fault( |
