summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2024-10-10 14:15:56 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-11-06 20:11:12 -0800
commit6359c39c9de66dede8ff5ff257c9e117483dbc7c (patch)
treee557e4ae93a6c7fab15c7a1dc647d5f74dadd492 /mm
parentf8780515fe914ac03189213c7e485264d65e2ece (diff)
downloadlinux-6359c39c9de66dede8ff5ff257c9e117483dbc7c.tar.gz
linux-6359c39c9de66dede8ff5ff257c9e117483dbc7c.tar.bz2
linux-6359c39c9de66dede8ff5ff257c9e117483dbc7c.zip
mm: remove unused hugepage for vma_alloc_folio()
The hugepage parameter was deprecated since commit ddc1a5cbc05d ("mempolicy: alloc_pages_mpol() for NUMA policy without vma"), for PMD-sized THP, it still tries only preferred node if possible in vma_alloc_folio() by checking the order of the folio allocation. Link: https://lkml.kernel.org/r/20241010061556.1846751-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Barry Song <baohua@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memory.c10
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/userfaultfd.c2
5 files changed, 8 insertions, 11 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c674afd16245..387c046a389e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1143,7 +1143,7 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
const int order = HPAGE_PMD_ORDER;
struct folio *folio;
- folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK, true);
+ folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK);
if (unlikely(!folio)) {
count_vm_event(THP_FAULT_FALLBACK);
diff --git a/mm/ksm.c b/mm/ksm.c
index 556b8a8f37d0..e596bc1b5fa7 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2971,7 +2971,7 @@ struct folio *ksm_might_need_to_copy(struct folio *folio,
if (!folio_test_uptodate(folio))
return folio; /* let do_swap_page report the error */
- new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
if (new_folio &&
mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
folio_put(new_folio);
diff --git a/mm/memory.c b/mm/memory.c
index 5e9d6a22eb08..c51bc45a7009 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1059,8 +1059,7 @@ static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
if (need_zero)
new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
else
- new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
- addr, false);
+ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
if (!new_folio)
return NULL;
@@ -4017,8 +4016,7 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
struct folio *folio;
swp_entry_t entry;
- folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
- vmf->address, false);
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
if (!folio)
return NULL;
@@ -4174,7 +4172,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
gfp = vma_thp_gfp_mask(vma);
while (orders) {
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
- folio = vma_alloc_folio(gfp, order, vma, addr, true);
+ folio = vma_alloc_folio(gfp, order, vma, addr);
if (folio) {
if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
gfp, entry))
@@ -4713,7 +4711,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
gfp = vma_thp_gfp_mask(vma);
while (orders) {
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
- folio = vma_alloc_folio(gfp, order, vma, addr, true);
+ folio = vma_alloc_folio(gfp, order, vma, addr);
if (folio) {
if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9e18a6fc3061..a29eff5d0585 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2290,7 +2290,6 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
* @order: Order of the folio.
* @vma: Pointer to VMA.
* @addr: Virtual address of the allocation. Must be inside @vma.
- * @hugepage: Unused (was: For hugepages try only preferred node if possible).
*
* Allocate a folio for a specific address in @vma, using the appropriate
* NUMA policy. The caller must hold the mmap_lock of the mm_struct of the
@@ -2301,7 +2300,7 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
* Return: The folio on success or NULL if allocation fails.
*/
struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
- unsigned long addr, bool hugepage)
+ unsigned long addr)
{
struct mempolicy *pol;
pgoff_t ilx;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 48b87c62fc3d..60a0be33766f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -251,7 +251,7 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
if (!*foliop) {
ret = -ENOMEM;
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
- dst_addr, false);
+ dst_addr);
if (!folio)
goto out;