diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-05-26 12:32:41 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-05-26 12:32:41 -0700 |
commit | 98931dd95fd489fcbfa97da563505a6f071d7c77 (patch) | |
tree | 44683fc4a92efa614acdca2742a7ff19d26da1e3 /include | |
parent | df202b452fe6c6d6f1351bad485e2367ef1e644e (diff) | |
parent | f403f22f8ccb12860b2b62fec3173c6ccd45938b (diff) | |
download | linux-98931dd95fd489fcbfa97da563505a6f071d7c77.tar.gz linux-98931dd95fd489fcbfa97da563505a6f071d7c77.tar.bz2 linux-98931dd95fd489fcbfa97da563505a6f071d7c77.zip |
Merge tag 'mm-stable-2022-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Almost all of MM here. A few things are still getting finished off,
reviewed, etc.
- Yang Shi has improved the behaviour of khugepaged collapsing of
readonly file-backed transparent hugepages.
- Johannes Weiner has arranged for zswap memory use to be tracked and
managed on a per-cgroup basis.
- Munchun Song adds a /proc knob ("hugetlb_optimize_vmemmap") for
runtime enablement of the recent huge page vmemmap optimization
feature.
- Baolin Wang contributes a series to fix some issues around hugetlb
pagetable invalidation.
- Zhenwei Pi has fixed some interactions between hwpoisoned pages and
virtualization.
- Tong Tiangen has enabled the use of the presently x86-only
page_table_check debugging feature on arm64 and riscv.
- David Vernet has done some fixup work on the memcg selftests.
- Peter Xu has taught userfaultfd to handle write protection faults
against shmem- and hugetlbfs-backed files.
- More DAMON development from SeongJae Park - adding online tuning of
the feature and support for monitoring of fixed virtual address
ranges. Also easier discovery of which monitoring operations are
available.
- Nadav Amit has done some optimization of TLB flushing during
mprotect().
- Neil Brown continues to labor away at improving our swap-over-NFS
support.
- David Hildenbrand has some fixes to anon page COWing versus
get_user_pages().
- Peng Liu fixed some errors in the core hugetlb code.
- Joao Martins has reduced the amount of memory consumed by
device-dax's compound devmaps.
- Some cleanups of the arch-specific pagemap code from Anshuman
Khandual.
- Muchun Song has found and fixed some errors in the TLB flushing of
transparent hugepages.
- Roman Gushchin has done more work on the memcg selftests.
... and, of course, many smaller fixes and cleanups. Notably, the
customary million cleanup serieses from Miaohe Lin"
* tag 'mm-stable-2022-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (381 commits)
mm: kfence: use PAGE_ALIGNED helper
selftests: vm: add the "settings" file with timeout variable
selftests: vm: add "test_hmm.sh" to TEST_FILES
selftests: vm: check numa_available() before operating "merge_across_nodes" in ksm_tests
selftests: vm: add migration to the .gitignore
selftests/vm/pkeys: fix typo in comment
ksm: fix typo in comment
selftests: vm: add process_mrelease tests
Revert "mm/vmscan: never demote for memcg reclaim"
mm/kfence: print disabling or re-enabling message
include/trace/events/percpu.h: cleanup for "percpu: improve percpu_alloc_percpu event trace"
include/trace/events/mmflags.h: cleanup for "tracing: incorrect gfp_t conversion"
mm: fix a potential infinite loop in start_isolate_page_range()
MAINTAINERS: add Muchun as co-maintainer for HugeTLB
zram: fix Kconfig dependency warning
mm/shmem: fix shmem folio swapoff hang
cgroup: fix an error handling path in alloc_pagecache_max_30M()
mm: damon: use HPAGE_PMD_SIZE
tracing: incorrect isolate_mote_t cast in mm_vmscan_lru_isolate
nodemask.h: fix compilation error with GCC12
...
Diffstat (limited to 'include')
48 files changed, 1061 insertions, 418 deletions
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 8e1e6244a89d..a57d667addd2 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -2,6 +2,9 @@ #ifndef _ASM_GENERIC_HUGETLB_H #define _ASM_GENERIC_HUGETLB_H +#include <linux/swap.h> +#include <linux/swapops.h> + static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) { return mk_pte(page, pgprot); @@ -32,6 +35,21 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) return pte_modify(pte, newprot); } +static inline pte_t huge_pte_mkuffd_wp(pte_t pte) +{ + return pte_mkuffd_wp(pte); +} + +static inline pte_t huge_pte_clear_uffd_wp(pte_t pte) +{ + return pte_clear_uffd_wp(pte); +} + +static inline int huge_pte_uffd_wp(pte_t pte) +{ + return pte_uffd_wp(pte); +} + #ifndef __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) @@ -66,10 +84,10 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, #endif #ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH -static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, +static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { - ptep_clear_flush(vma, addr, ptep); + return ptep_clear_flush(vma, addr, ptep); } #endif @@ -80,6 +98,12 @@ static inline int huge_pte_none(pte_t pte) } #endif +/* Please refer to comments above pte_none_mostly() for the usage */ +static inline int huge_pte_none_mostly(pte_t pte) +{ + return huge_pte_none(pte) || is_pte_marker(pte); +} + #ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT static inline pte_t huge_pte_wrprotect(pte_t pte) { diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index 10789cf51d16..8ffd64e7a24c 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -30,6 +30,8 @@ typedef struct { pud_t pud; } pmd_t; static inline int pud_none(pud_t pud) { return 0; } static inline int pud_bad(pud_t pud) { return 0; } static inline int pud_present(pud_t pud) { return 1; } +static inline int pud_user(pud_t pud) { return 0; } +static inline int pud_leaf(pud_t pud) { return 0; } static inline void pud_clear(pud_t *pud) { } #define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index eee6f7763a39..ff3e82553a76 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -658,6 +658,20 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, } while (0) #endif +#ifndef pte_needs_flush +static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte) +{ + return true; +} +#endif + +#ifndef huge_pmd_needs_flush +static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) +{ + return true; +} +#endif + #endif /* CONFIG_MMU */ #endif /* _ASM_GENERIC__TLB_H */ diff --git a/include/linux/cma.h b/include/linux/cma.h index a6f637342740..63873b93deaa 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -17,11 +17,11 @@ #define CMA_MAX_NAME 64 /* - * TODO: once the buddy -- especially pageblock merging and alloc_contig_range() + * the buddy -- especially pageblock merging and alloc_contig_range() * -- can deal with only some pageblocks of a higher-order page being * MIGRATE_CMA, we can use pageblock_nr_pages. */ -#define CMA_MIN_ALIGNMENT_PAGES MAX_ORDER_NR_PAGES +#define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages #define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES) struct cma; diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 34bce35c808d..52a9ff65faee 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -177,7 +177,7 @@ static inline bool compaction_withdrawn(enum compact_result result) bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags); -extern int kcompactd_run(int nid); +extern void kcompactd_run(int nid); extern void kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); @@ -212,9 +212,8 @@ static inline bool compaction_withdrawn(enum compact_result result) return true; } -static inline int kcompactd_run(int nid) +static inline void kcompactd_run(int nid) { - return 0; } static inline void kcompactd_stop(int nid) { diff --git a/include/linux/damon.h b/include/linux/damon.h index f23cbfa4248d..7c62da31ce4b 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -261,10 +261,14 @@ struct damos { * enum damon_ops_id - Identifier for each monitoring operations implementation * * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces + * @DAMON_OPS_FVADDR: Monitoring operations for only fixed ranges of virtual + * address spaces * @DAMON_OPS_PADDR: Monitoring operations for the physical address space + * @NR_DAMON_OPS: Number of monitoring operations implementations */ enum damon_ops_id { DAMON_OPS_VADDR, + DAMON_OPS_FVADDR, DAMON_OPS_PADDR, NR_DAMON_OPS, }; @@ -340,6 +344,7 @@ struct damon_operations { * struct damon_callback - Monitoring events notification callbacks. * * @before_start: Called before starting the monitoring. + * @after_wmarks_check: Called after each schemes' watermarks check. * @after_sampling: Called after each sampling. * @after_aggregation: Called after each aggregation. * @before_terminate: Called before terminating the monitoring. @@ -350,6 +355,11 @@ struct damon_operations { * respectively. Therefore, those are good places for installing and cleaning * @private. * + * The monitoring thread calls @after_wmarks_check after each DAMON-based + * operation schemes' watermarks check. If users need to make changes to the + * attributes of the monitoring context while it's deactivated due to the + * watermarks, this is the good place to do. + * * The monitoring thread calls @after_sampling and @after_aggregation for each * of the sampling intervals and aggregation intervals, respectively. * Therefore, users can safely access the monitoring results without additional @@ -362,6 +372,7 @@ struct damon_callback { void *private; int (*before_start)(struct damon_ctx *context); + int (*after_wmarks_check)(struct damon_ctx *context); int (*after_sampling)(struct damon_ctx *context); int (*after_aggregation)(struct damon_ctx *context); void (*before_terminate)(struct damon_ctx *context); @@ -484,6 +495,8 @@ static inline void damon_insert_region(struct damon_region *r, void damon_add_region(struct damon_region *r, struct damon_target *t); void damon_destroy_region(struct damon_region *r, struct damon_target *t); +int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, + unsigned int nr_ranges); struct damos *damon_new_scheme( unsigned long min_sz_region, unsigned long max_sz_region, @@ -509,6 +522,7 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, ssize_t nr_schemes); int damon_nr_running_ctxs(void); +bool damon_is_registered_ops(enum damon_ops_id id); int damon_register_ops(struct damon_operations *ops); int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id); diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 2d04f6448cde..9f6e25467844 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -20,6 +20,7 @@ struct fault_attr { atomic_t space; unsigned long verbose; bool task_filter; + bool no_warn; unsigned long stacktrace_depth; unsigned long require_start; unsigned long require_end; @@ -39,6 +40,7 @@ struct fault_attr { .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \ .verbose = 2, \ .dname = NULL, \ + .no_warn = false, \ } #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER diff --git a/include/linux/fs.h b/include/linux/fs.h index f58ae40235ab..1225f8c75650 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -162,6 +162,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File is stream-like */ #define FMODE_STREAM ((__force fmode_t)0x200000) +/* File supports DIRECT IO */ +#define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000) + /* File was opened by fanotify and shouldn't generate fanotify events */ #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) @@ -376,6 +379,7 @@ struct address_space_operations { int (*swap_activate)(struct swap_info_struct *sis, struct file *file, sector_t *span); void (*swap_deactivate)(struct file *file); + int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter); }; extern const struct address_space_operations empty_aops; @@ -457,6 +461,11 @@ static inline void i_mmap_unlock_write(struct address_space *mapping) up_write(&mapping->i_mmap_rwsem); } +static inline int i_mmap_trylock_read(struct address_space *mapping) +{ + return down_read_trylock(&mapping->i_mmap_rwsem); +} + static inline void i_mmap_lock_read(struct address_space *mapping) { down_read(&mapping->i_mmap_rwsem); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 3e3d36fc2109..2d2ccae933c2 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -367,7 +367,7 @@ static inline int gfp_migratetype(const gfp_t gfp_flags) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT @@ -613,13 +613,8 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, #ifdef CONFIG_NUMA struct page *alloc_pages(gfp_t gfp, unsigned int order); struct folio *folio_alloc(gfp_t gfp, unsigned order); -struct page *alloc_pages_vma(gfp_t gfp_mask, int order, - struct vm_area_struct *vma, unsigned long addr, - bool hugepage); struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage); -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ - alloc_pages_vma(gfp_mask, order, vma, addr, true) #else static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) { @@ -629,16 +624,17 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) { return __folio_alloc_node(gfp, order, numa_node_id()); } -#define alloc_pages_vma(gfp_mask, order, vma, addr, hugepage) \ - alloc_pages(gfp_mask, order) #define vma_alloc_folio(gfp, order, vma, addr, hugepage) \ folio_alloc(gfp, order) -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ - alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) -#define alloc_page_vma(gfp_mask, vma, addr) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, false) +static inline struct page *alloc_page_vma(gfp_t gfp, + struct vm_area_struct *vma, unsigned long addr) +{ + struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false); + + return &folio->page; +} extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index 337bd9f32921..cddb42ff0473 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -246,9 +246,21 @@ static inline bool is_kmap_addr(const void *x) #endif /* CONFIG_HIGHMEM */ -/* - * Prevent people trying to call kunmap_atomic() as if it were kunmap() - * kunmap_atomic() should get the return value of kmap_atomic, not the page. +/** + * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated! + * @__addr: Virtual address to be unmapped + * + * Unmaps an address previously mapped by kmap_atomic() and re-enables + * pagefaults. Depending on PREEMP_RT configuration, re-enables also + * migration and preemption. Users should not count on these side effects. + * + * Mappings should be unmapped in the reverse order that they were mapped. + * See kmap_local_page() for details on nesting. + * + * @__addr can be any address within the mapped page, so there is no need + * to subtract any offset that has been added. In contrast to kunmap(), + * this function takes the address returned from kmap_atomic(), not the + * page passed to it. The compiler will warn you if you pass the page. */ #define kunmap_atomic(__addr) \ do { \ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 39bb9b47fa9c..3af34de54330 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -37,7 +37,7 @@ static inline void *kmap(struct page *page); /** * kunmap - Unmap the virtual address mapped by kmap() - * @addr: Virtual address to be unmapped + * @page: Pointer to the page which was mapped by kmap() * * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of * pages in the low memory area. @@ -138,23 +138,46 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset); * * Returns: The virtual address of the mapping * - * Effectively a wrapper around kmap_local_page() which disables pagefaults - * and preemption. + * In fact a wrapper around kmap_local_page() which also disables pagefaults + * and, depending on PREEMPT_RT configuration, also CPU migration and + * preemption. Therefore users should not count on the latter two side effects. + * + * Mappings should always be released by kunmap_atomic(). * * Do not use in new code. Use kmap_local_page() instead. - */ -static inline void *kmap_atomic(struct page *page); - -/** - * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - * @addr: Virtual address to be unmapped * - * Counterpart to kmap_atomic(). + * It is used in atomic context when code wants to access the contents of a + * page that might be allocated from high memory (see __GFP_HIGHMEM), for + * example a page in the pagecache. The API has two functions, and they + * can be used in a manner similar to the following: + * + * -- Find the page of interest. -- + * struct page *page = find_get_page(mapping, offset); + * + * -- Gain access to the contents of that page. -- + * void *vaddr = kmap_atomic(page); + * + * -- Do something to the contents of that page. -- + * memset(vaddr, 0, PAGE_SIZE); + * + * -- Unmap that page. -- + * kunmap_atomic(vaddr); * - * Effectively a wrapper around kunmap_local() which additionally undoes - * the side effects of kmap_atomic(), i.e. reenabling pagefaults and - * preemption. + * Note that the kunmap_atomic() call takes the result of the kmap_atomic() + * call, not the argument. + * + * If you need to map two pages because you want to copy from one page to + * another you need to keep the kmap_atomic calls strictly nested, like: + * + * vaddr1 = kmap_atomic(page1); + * vaddr2 = kmap_atomic(page2); + * + * memcpy(vaddr1, vaddr2, PAGE_SIZE); + * + * kunmap_atomic(vaddr2); + * kunmap_atomic(vaddr1); */ +static inline void *kmap_atomic(struct page *page); /* Highmem related interfaces for management code */ static inline unsigned int nr_free_highpages(void); @@ -191,6 +214,8 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr) * @vma: The VMA the page is to be allocated for * @vaddr: The virtual address the page will be inserted into * + * Returns: The allocated and zeroed HIGHMEM page + * * This function will allocate a page for a VMA that the caller knows will * be able to migrate in the future using move_pages() or reclaimed * @@ -358,6 +383,8 @@ static inline void memcpy_to_page(struct page *page, size_t offset, static inline void memzero_page(struct page *page, size_t offset, size_t len) { char *addr = kmap_local_page(page); + + VM_BUG_ON(offset + len > PAGE_SIZE); memset(addr + offset, 0, len); flush_dcache_page(page); kunmap_local(addr); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 2999190adc22..de29821231c9 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -36,8 +36,9 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr); bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); -int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, - pgprot_t newprot, unsigned long cp_flags); +int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr, pgprot_t newprot, + unsigned long cp_flags); vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, pgprot_t pgprot, bool write); @@ -172,6 +173,20 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) return false; } +static inline bool file_thp_enabled(struct vm_area_struct *vma) +{ + struct inode *inode; + + if (!vma->vm_file) + return false; + + inode = vma->vm_file->f_inode; + + return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) && + (vma->vm_flags & VM_EXEC) && + !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); +} + bool transparent_hugepage_active(struct vm_area_struct *vma); #define transparent_hugepage_use_zero_page() \ @@ -347,7 +362,6 @@ static inline void prep_transhuge_page(struct page *page) {} static inline bool can_split_folio(struct folio *folio, int *pextra_pins) { - BUILD_BUG(); return false; } static inline int diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ac2a1d758a80..0f2894d01333 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -137,17 +137,19 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long len); -int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); +int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, + struct vm_area_struct *, struct vm_area_struct *); long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, unsigned long *, long, unsigned int, int *); void unmap_hugepage_range(struct vm_area_struct *, - unsigned long, unsigned long, struct page *); + unsigned long, unsigned long, struct page *, + zap_flags_t); void __unmap_hugepage_range_final(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, - struct page *ref_page); + struct page *ref_page, zap_flags_t zap_flags); void hugetlb_report_meminfo(struct seq_file *); int hugetlb_report_node_meminfo(char *buf, int len, int nid); void hugetlb_show_meminfo(void); @@ -160,7 +162,8 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, unsigned long dst_addr, unsigned long src_addr, enum mcopy_atomic_mode mode, - struct page **pagep); + struct page **pagep, + bool wp_copy); #endif /* CONFIG_USERFAULTFD */ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, @@ -210,7 +213,8 @@ struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, int pmd_huge(pmd_t pmd); int pud_huge(pud_t pud); unsigned long hugetlb_change_protection(struct vm_area_struct *vma, - unsigned long address, unsigned long end, pgprot_t newprot); + unsigned long address, unsigned long end, pgprot_t newprot, + unsigned long cp_flags); bool is_hugetlb_entry_migration(pte_t pte); void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); @@ -266,7 +270,9 @@ static inline struct page *follow_huge_addr(struct mm_struct *mm, } static inline int copy_hugetlb_page_range(struct mm_struct *dst, - struct mm_struct *src, struct vm_area_struct *vma) + struct mm_struct *src, + struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma) { BUG(); return 0; @@ -356,7 +362,8 @@ static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, unsigned long dst_addr, unsigned long src_addr, enum mcopy_atomic_mode mode, - struct page **pagep) + struct page **pagep, + bool wp_copy) { BUG(); return 0; @@ -395,14 +402,16 @@ static inline void move_hugetlb_state(struct page *oldpage, static inline unsigned long hugetlb_change_protection( struct vm_area_struct *vma, unsigned long address, - unsigned long end, pgprot_t newprot) |