summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/balloon_compaction.c10
-rw-r--r--mm/compaction.c34
-rw-r--r--mm/damon/vaddr.c3
-rw-r--r--mm/filemap.c143
-rw-r--r--mm/folio-compat.c22
-rw-r--r--mm/gup.c6
-rw-r--r--mm/highmem.c2
-rw-r--r--mm/hmm.c19
-rw-r--r--mm/huge_memory.c11
-rw-r--r--mm/hugetlb.c24
-rw-r--r--mm/internal.h2
-rw-r--r--mm/ioremap.c26
-rw-r--r--mm/kasan/common.c3
-rw-r--r--mm/kasan/shadow.c29
-rw-r--r--mm/kfence/core.c18
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memblock.c7
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c36
-rw-r--r--mm/memremap.c6
-rw-r--r--mm/migrate.c238
-rw-r--r--mm/migrate_device.c3
-rw-r--r--mm/page-writeback.c89
-rw-r--r--mm/page_alloc.c33
-rw-r--r--mm/rmap.c27
-rw-r--r--mm/secretmem.c55
-rw-r--r--mm/shmem.c26
-rw-r--r--mm/slab.c20
-rw-r--r--mm/slab.h39
-rw-r--r--mm/slab_common.c36
-rw-r--r--mm/slob.c33
-rw-r--r--mm/slub.c98
-rw-r--r--mm/sparse-vmemmap.c10
-rw-r--r--mm/swap.c29
-rw-r--r--mm/swap_slots.c2
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/usercopy.c2
-rw-r--r--mm/userfaultfd.c5
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmscan.c56
-rw-r--r--mm/z3fold.c84
-rw-r--r--mm/zsmalloc.c102
44 files changed, 656 insertions, 754 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index f73f5b272144..e59cf5fe5ce9 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -639,14 +639,6 @@ config BOUNCE
memory available to the CPU. Enabled by default when HIGHMEM is
selected, but you may say n to override this.
-config VIRT_TO_BUS
- bool
- help
- An architecture should select this if it implements the
- deprecated interface virt_to_bus(). All new architectures
- should probably not select this.
-
-
config MMU_NOTIFIER
bool
select SRCU
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 4b8eab4b3f45..22c96fed70b5 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -228,10 +228,8 @@ static void balloon_page_putback(struct page *page)
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
}
-
/* move_to_new_page() counterpart for a ballooned page */
-static int balloon_page_migrate(struct address_space *mapping,
- struct page *newpage, struct page *page,
+static int balloon_page_migrate(struct page *newpage, struct page *page,
enum migrate_mode mode)
{
struct balloon_dev_info *balloon = balloon_page_device(page);
@@ -250,11 +248,11 @@ static int balloon_page_migrate(struct address_space *mapping,
return balloon->migratepage(balloon, newpage, page, mode);
}
-const struct address_space_operations balloon_aops = {
- .migratepage = balloon_page_migrate,
+const struct movable_operations balloon_mops = {
+ .migrate_page = balloon_page_migrate,
.isolate_page = balloon_page_isolate,
.putback_page = balloon_page_putback,
};
-EXPORT_SYMBOL_GPL(balloon_aops);
+EXPORT_SYMBOL_GPL(balloon_mops);
#endif /* CONFIG_BALLOON_COMPACTION */
diff --git a/mm/compaction.c b/mm/compaction.c
index d024d18e0b5c..640fa76228dd 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -110,28 +110,27 @@ static void split_map_pages(struct list_head *list)
}
#ifdef CONFIG_COMPACTION
-
-int PageMovable(struct page *page)
+bool PageMovable(struct page *page)
{
- struct address_space *mapping;
+ const struct movable_operations *mops;
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (!__PageMovable(page))
- return 0;
+ return false;
- mapping = page_mapping(page);
- if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
- return 1;
+ mops = page_movable_ops(page);
+ if (mops)
+ return true;
- return 0;
+ return false;
}
EXPORT_SYMBOL(PageMovable);
-void __SetPageMovable(struct page *page, struct address_space *mapping)
+void __SetPageMovable(struct page *page, const struct movable_operations *mops)
{
VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
- page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
+ VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page);
+ page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE);
}
EXPORT_SYMBOL(__SetPageMovable);
@@ -139,12 +138,10 @@ void __ClearPageMovable(struct page *page)
{
VM_BUG_ON_PAGE(!PageMovable(page), page);
/*
- * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
- * flag so that VM can catch up released page by driver after isolation.
- * With it, VM migration doesn't try to put it back.
+ * This page still has the type of a movable page, but it's
+ * actually not movable any more.
*/
- page->mapping = (void *)((unsigned long)page->mapping &
- PAGE_MAPPING_MOVABLE);
+ page->mapping = (void *)PAGE_MAPPING_MOVABLE;
}
EXPORT_SYMBOL(__ClearPageMovable);
@@ -1035,7 +1032,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/*
* Only pages without mappings or that have a
- * ->migratepage callback are possible to migrate
+ * ->migrate_folio callback are possible to migrate
* without blocking. However, we can be racing with
* truncation so it's necessary to lock the page
* to stabilise the mapping as truncation holds
@@ -1046,7 +1043,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
goto isolate_fail_put;
mapping = page_mapping(page);
- migrate_dirty = !mapping || mapping->a_ops->migratepage;
+ migrate_dirty = !mapping ||
+ mapping->a_ops->migrate_folio;
unlock_page(page);
if (!migrate_dirty)
goto isolate_fail_put;
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 59e1653799f8..3c7b9d6dca95 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -336,8 +336,7 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
if (pte_young(entry)) {
referenced = true;
entry = pte_mkold(entry);
- huge_ptep_set_access_flags(vma, addr, pte, entry,
- vma->vm_flags & VM_WRITE);
+ set_huge_pte_at(mm, addr, pte, entry);
}
#ifdef CONFIG_MMU_NOTIFIER
diff --git a/mm/filemap.c b/mm/filemap.c
index cd59f055e29d..15800334147b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -923,26 +923,6 @@ error:
}
ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
-/**
- * add_to_page_cache_locked - add a locked page to the pagecache
- * @page: page to add
- * @mapping: the page's address_space
- * @offset: page index
- * @gfp_mask: page allocation mode
- *
- * This function is used to add a page to the pagecache. It must be locked.
- * This function does not add the page to the LRU. The caller must do that.
- *
- * Return: %0 on success, negative error code otherwise.
- */
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t offset, gfp_t gfp_mask)
-{
- return __filemap_add_folio(mapping, page_folio(page), offset,
- gfp_mask, NULL);
-}
-EXPORT_SYMBOL(add_to_page_cache_locked);
-
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
pgoff_t index, gfp_t gfp)
{
@@ -1982,6 +1962,10 @@ no_page:
gfp |= __GFP_WRITE;
if (fgp_flags & FGP_NOFS)
gfp &= ~__GFP_FS;
+ if (fgp_flags & FGP_NOWAIT) {
+ gfp &= ~GFP_KERNEL;
+ gfp |= GFP_NOWAIT | __GFP_NOWARN;
+ }
folio = filemap_alloc_folio(gfp, 0);
if (!folio)
@@ -2141,65 +2125,46 @@ put:
return folio_batch_count(fbatch);
}
-static inline
-bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
-{
- if (!folio_test_large(folio) || folio_test_hugetlb(folio))
- return false;
- if (index >= max)
- return false;
- return index < folio->index + folio_nr_pages(folio) - 1;
-}
-
/**
- * find_get_pages_range - gang pagecache lookup
+ * filemap_get_folios - Get a batch of folios
* @mapping: The address_space to search
* @start: The starting page index
* @end: The final page index (inclusive)
- * @nr_pages: The maximum number of pages
- * @pages: Where the resulting pages are placed
+ * @fbatch: The batch to fill.
*
- * find_get_pages_range() will search for and return a group of up to @nr_pages
- * pages in the mapping starting at index @start and up to index @end
- * (inclusive). The pages are placed at @pages. find_get_pages_range() takes
- * a reference against the returned pages.
+ * Search for and return a batch of folios in the mapping starting at
+ * index @start and up to index @end (inclusive). The folios are returned
+ * in @fbatch with an elevated reference count.
*
- * The search returns a group of mapping-contiguous pages with ascending
- * indexes. There may be holes in the indices due to not-present pages.
- * We also update @start to index the next page for the traversal.
+ * The first folio may start before @start; if it does, it will contain
+ * @start. The final folio may extend beyond @end; if it does, it will
+ * contain @end. The folios have ascending indices. There may be gaps
+ * between the folios if there are indices which have no folio in the
+ * page cache. If folios are added to or removed from the page cache
+ * while this is running, they may or may not be found by this call.
*
- * Return: the number of pages which were found. If this number is
- * smaller than @nr_pages, the end of specified range has been
- * reached.
+ * Return: The number of folios which were found.
+ * We also update @start to index the next folio for the traversal.
*/
-unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
- pgoff_t end, unsigned int nr_pages,
- struct page **pages)
+unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, struct folio_batch *fbatch)
{
XA_STATE(xas, &mapping->i_pages, *start);
struct folio *folio;
- unsigned ret = 0;
-
- if (unlikely(!nr_pages))
- return 0;
rcu_read_lock();
- while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
+ while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
/* Skip over shadow, swap and DAX entries */
if (xa_is_value(folio))
continue;
+ if (!folio_batch_add(fbatch, folio)) {
+ unsigned long nr = folio_nr_pages(folio);
-again:
- pages[ret] = folio_file_page(folio, xas.xa_index);
- if (++ret == nr_pages) {
- *start = xas.xa_index + 1;
+ if (folio_test_hugetlb(folio))
+ nr = 1;
+ *start = folio->index + nr;
goto out;
}
- if (folio_more_pages(folio, xas.xa_index, end)) {
- xas.xa_index++;
- folio_ref_inc(folio);
- goto again;
- }
}
/*
@@ -2215,7 +2180,18 @@ again:
out:
rcu_read_unlock();
- return ret;
+ return folio_batch_count(fbatch);
+}
+EXPORT_SYMBOL(filemap_get_folios);
+
+static inline
+bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
+{
+ if (!folio_test_large(folio) || folio_test_hugetlb(folio))
+ return false;
+ if (index >= max)
+ return false;
+ return index < folio->index + folio_nr_pages(folio) - 1;
}
/**
@@ -2403,7 +2379,7 @@ retry:
rcu_read_unlock();
}
-static int filemap_read_folio(struct file *file, struct address_space *mapping,
+static int filemap_read_folio(struct file *file, filler_t filler,
struct folio *folio)
{
int error;
@@ -2415,7 +2391,7 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping,
*/
folio_clear_error(folio);
/* Start the actual read. The read will unlock the page. */
- error = mapping->a_ops->read_folio(file, folio);
+ error = filler(file, folio);
if (error)
return error;
@@ -2424,7 +2400,8 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping,
return error;
if (folio_test_uptodate(folio))
return 0;
- shrink_readahead_size_eio(&file->f_ra);
+ if (file)
+ shrink_readahead_size_eio(&file->f_ra);
return -EIO;
}
@@ -2497,7 +2474,8 @@ static int filemap_update_page(struct kiocb *iocb,
if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
goto unlock;
- error = filemap_read_folio(iocb->ki_filp, mapping, folio);
+ error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
+ folio);
goto unlock_mapping;
unlock:
folio_unlock(folio);
@@ -2540,7 +2518,7 @@ static int filemap_create_folio(struct file *file,
if (error)
goto error;
- error = filemap_read_folio(file, mapping, folio);
+ error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
if (error)
goto error;
@@ -3224,7 +3202,7 @@ page_not_uptodate:
* and we need to check for errors.
*/
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
- error = filemap_read_folio(file, mapping, folio);
+ error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
if (fpin)
goto out_retry;
folio_put(folio);
@@ -3514,20 +3492,7 @@ repeat:
return ERR_PTR(err);
}
-filler:
- err = filler(file, folio);
- if (err < 0) {
- folio_put(folio);
- return ERR_PTR(err);
- }
-
- folio_wait_locked(folio);
- if (!folio_test_uptodate(folio)) {
- folio_put(folio);
- return ERR_PTR(-EIO);
- }
-
- goto out;
+ goto filler;
}
if (folio_test_uptodate(folio))
goto out;
@@ -3550,14 +3515,14 @@ filler:
goto out;
}
- /*
- * A previous I/O error may have been due to temporary
- * failures.
- * Clear page error before actual read, PG_error will be
- * set again if read page fails.
- */
- folio_clear_error(folio);
- goto filler;
+filler:
+ err = filemap_read_folio(file, filler, folio);
+ if (err) {
+ folio_put(folio);
+ if (err == AOP_TRUNCATED_PAGE)
+ goto repeat;
+ return ERR_PTR(err);
+ }
out:
folio_mark_accessed(folio);
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 20bc15b57d93..458618c7302c 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -51,28 +51,6 @@ void mark_page_accessed(struct page *page)
}
EXPORT_SYMBOL(mark_page_accessed);
-#ifdef CONFIG_MIGRATION
-int migrate_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page, int extra_count)
-{
- return folio_migrate_mapping(mapping, page_folio(newpage),
- page_folio(page), extra_count);
-}
-EXPORT_SYMBOL(migrate_page_move_mapping);
-
-void migrate_page_states(struct page *newpage, struct page *page)
-{
- folio_migrate_flags(page_folio(newpage), page_folio(page));
-}
-EXPORT_SYMBOL(migrate_page_states);
-
-void migrate_page_copy(struct page *newpage, struct page *page)
-{
- folio_migrate_copy(page_folio(newpage), page_folio(page));
-}
-EXPORT_SYMBOL(migrate_page_copy);
-#endif
-
bool set_page_writeback(struct page *page)
{
return folio_start_writeback(page_folio(page));
diff --git a/mm/gup.c b/mm/gup.c
index c6d060dee9e0..732825157430 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -87,7 +87,8 @@ retry:
* belongs to this folio.
*/
if (unlikely(page_folio(page) != folio)) {
- folio_put_refs(folio, refs);
+ if (!put_devmap_managed_page_refs(&folio->page, refs))
+ folio_put_refs(folio, refs);
goto retry;
}
@@ -176,7 +177,8 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
refs *= GUP_PIN_COUNTING_BIAS;
}
- folio_put_refs(folio, refs);
+ if (!put_devmap_managed_page_refs(&folio->page, refs))
+ folio_put_refs(folio, refs);
}
/**
diff --git a/mm/highmem.c b/mm/highmem.c
index e92a7ceb30e8..c707d7202d5f 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -561,7 +561,7 @@ void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
}
EXPORT_SYMBOL(__kmap_local_page_prot);
-void kunmap_local_indexed(void *vaddr)
+void kunmap_local_indexed(const void *vaddr)
{
unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
pte_t *kmap_pte;
diff --git a/mm/hmm.c b/mm/hmm.c
index 3fd3242c5e50..f2aa63b94d9b 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -212,14 +212,6 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline bool hmm_is_device_private_entry(struct hmm_range *range,
- swp_entry_t entry)
-{
- return is_device_private_entry(entry) &&
- pfn_swap_entry_to_page(entry)->pgmap->owner ==
- range->dev_private_owner;
-}
-
static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
pte_t pte)
{
@@ -252,10 +244,12 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
swp_entry_t entry = pte_to_swp_entry(pte);
/*
- * Never fault in device private pages, but just report
- * the PFN even if not present.
+ * Don't fault in device private pages owned by the caller,
+ * just report the PFN.
*/
- if (hmm_is_device_private_entry(range, entry)) {
+ if (is_device_private_entry(entry) &&
+ pfn_swap_entry_to_page(entry)->pgmap->owner ==
+ range->dev_private_owner) {
cpu_flags = HMM_PFN_VALID;
if (is_writable_device_private_entry(entry))
cpu_flags |= HMM_PFN_WRITE;
@@ -273,6 +267,9 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
if (!non_swap_entry(entry))
goto fault;
+ if (is_device_private_entry(entry))
+ goto fault;
+
if (is_device_exclusive_entry(entry))
goto fault;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 814020689d3e..8a7c1b344abe 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -18,6 +18,7 @@
#include <linux/shrinker.h>
#include <linux/mm_inline.h>
#include <linux/swapops.h>
+#include <linux/backing-dev.h>
#include <linux/dax.h>
#include <linux/khugepaged.h>
#include <linux/freezer.h>
@@ -2485,11 +2486,15 @@ static void __split_huge_page(struct page *page, struct list_head *list,
__split_huge_page_tail(head, i, lruvec, list);
/* Some pages can be beyond EOF: drop them from page cache */
if (head[i].index >= end) {
- ClearPageDirty(head + i);
- __delete_from_page_cache(head + i, NULL);
+ struct folio *tail = page_folio(head + i);
+
if (shmem_mapping(head->mapping))
shmem_uncharge(head->mapping->host, 1);
- put_page(head + i);
+ else if (folio_test_clear_dirty(tail))
+ folio_account_cleaned(tail,
+ inode_to_wb(folio->mapping->host));
+ __filemap_remove_folio(tail, NULL);
+ folio_put(tail);
} else if (!PageAnon(page)) {
__xa_store(&head->mapping->i_pages, head[i].index,
head + i, 0);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bb763f5d30b9..f044962ad9df 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4787,8 +4787,13 @@ again:
* sharing with another vma.
*/
;
- } else if (unlikely(is_hugetlb_entry_migration(entry) ||
- is_hugetlb_entry_hwpoisoned(entry))) {
+ } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
+ bool uffd_wp = huge_pte_uffd_wp(entry);
+
+ if (!userfaultfd_wp(dst_vma) && uffd_wp)
+ entry = huge_pte_clear_uffd_wp(entry);
+ set_huge_pte_at(dst, addr, dst_pte, entry);
+ } else if (unlikely(is_hugetlb_entry_migration(entry))) {
swp_entry_t swp_entry = pte_to_swp_entry(entry);
bool uffd_wp = huge_pte_uffd_wp(entry);
@@ -5417,19 +5422,25 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx)
{
+ struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
struct hstate *h = hstate_inode(inode);
- int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+ int err;
- if (err)
+ __folio_set_locked(folio);
+ err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
+
+ if (unlikely(err)) {
+ __folio_clear_locked(folio);
return err;
+ }
ClearHPageRestoreReserve(page);
/*
- * set page dirty so that it will not be removed from cache/file
+ * mark folio dirty so that it will not be removed from cache/file
* by non-hugetlbfs specific code paths.
*/
- set_page_dirty(page);
+ folio_mark_dirty(folio);
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
@@ -5950,6 +5961,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
page = alloc_huge_page(dst_vma, dst_addr, 0);
if (IS_ERR(page)) {
+ put_page(*pagep);
ret = -ENOMEM;
*pagep = NULL;
goto out;
diff --git a/mm/internal.h b/mm/internal.h
index caebaeb2e5c9..785409805ed7 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -862,6 +862,8 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
+extern bool mirrored_kernelcore;
+
static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
{
/*
diff --git a/mm/ioremap.c b/mm/ioremap.c
index 5fe598ecd9b7..8652426282cc 100644
--- a/mm/ioremap.c
+++ b/mm/ioremap.c
@@ -11,29 +11,35 @@
#include <linux/io.h>
#include <linux/export.h>
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
{
unsigned long offset, vaddr;
phys_addr_t last_addr;
struct vm_struct *area;
/* Disallow wrap-around or zero size */
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
return NULL;
/* Page-align mappings */
- offset = addr & (~PAGE_MASK);
- addr -= offset;
+ offset = phys_addr & (~PAGE_MASK);
+ phys_addr -= offset;
size = PAGE_ALIGN(size + offset);
+ if (!ioremap_allowed(phys_addr, size, prot))
+ return NULL;
+
area = get_vm_area_caller(size, VM_IOREMAP,
__builtin_return_address(0));
if (!area)
return NULL;
vaddr = (unsigned long)area->addr;
+ area->phys_addr = phys_addr;
- if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
+ if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
+ __pgprot(prot))) {
free_vm_area(area);
return NULL;
}
@@ -44,6 +50,12 @@ EXPORT_SYMBOL(ioremap_prot);
void iounmap(volatile void __iomem *addr)
{
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
+ void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
+
+ if (!iounmap_allowed(vaddr))
+ return;
+
+ if (is_vmalloc_addr(vaddr))
+ vunmap(vaddr);
}
EXPORT_SYMBOL(iounmap);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 707c3a527fcb..69f583855c8b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -108,9 +108,10 @@ void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
return;
tag = kasan_random_tag();
+ kasan_unpoison(set_tag(page_address(page), tag),
+ PAGE_SIZE << order, init);
for (i = 0; i < (1 << order); i++)
page_kasan_tag_set(page + i, tag);
- kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
}
void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index a4f07de21771..0e3648b603a6 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -295,9 +295,22 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
return 0;
shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
- shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
- shadow_end = ALIGN(shadow_end, PAGE_SIZE);
+
+ /*
+ * User Mode Linux maps enough shadow memory for all of virtual memory
+ * at boot, so doesn't need to allocate more on vmalloc, just clear it.
+ *
+ * The remaining CONFIG_UML checks in this file exist for the same
+ * reason.
+ */
+ if (IS_ENABLED(CONFIG_UML)) {
+ __memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start);
+ return 0;
+ }
+
+ shadow_start = PAGE_ALIGN_