summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGe Yang <yangge1116@126.com>2025-02-19 11:46:44 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-03-22 12:56:58 -0700
commit6f339c38c9320db0bd7d863bdc85f83f510ba883 (patch)
tree468932447cb9aafcb6999f980c7ec9164243a0fe
parentd5e3ecbd3cb3fd65f86fda7832b59187231295ad (diff)
downloadlinux-6f339c38c9320db0bd7d863bdc85f83f510ba883.tar.gz
linux-6f339c38c9320db0bd7d863bdc85f83f510ba883.tar.bz2
linux-6f339c38c9320db0bd7d863bdc85f83f510ba883.zip
mm/hugetlb: wait for hugetlb folios to be freed
[ Upstream commit 67bab13307c83fb742c2556b06cdc39dbad27f07 ] Since the introduction of commit c77c0a8ac4c52 ("mm/hugetlb: defer freeing of huge pages if in non-task context"), which supports deferring the freeing of hugetlb pages, the allocation of contiguous memory through cma_alloc() may fail probabilistically. In the CMA allocation process, if it is found that the CMA area is occupied by in-use hugetlb folios, these in-use hugetlb folios need to be migrated to another location. When there are no available hugetlb folios in the free hugetlb pool during the migration of in-use hugetlb folios, new folios are allocated from the buddy system. A temporary state is set on the newly allocated folio. Upon completion of the hugetlb folio migration, the temporary state is transferred from the new folios to the old folios. Normally, when the old folios with the temporary state are freed, it is directly released back to the buddy system. However, due to the deferred freeing of hugetlb pages, the PageBuddy() check fails, ultimately leading to the failure of cma_alloc(). Here is a simplified call trace illustrating the process: cma_alloc() ->__alloc_contig_migrate_range() // Migrate in-use hugetlb folios ->unmap_and_move_huge_page() ->folio_putback_hugetlb() // Free old folios ->test_pages_isolated() ->__test_page_isolated_in_pageblock() ->PageBuddy(page) // Check if the page is in buddy To resolve this issue, we have implemented a function named wait_for_freed_hugetlb_folios(). This function ensures that the hugetlb folios are properly released back to the buddy system after their migration is completed. By invoking wait_for_freed_hugetlb_folios() before calling PageBuddy(), we ensure that PageBuddy() will succeed. Link: https://lkml.kernel.org/r/1739936804-18199-1-git-send-email-yangge1116@126.com Fixes: c77c0a8ac4c5 ("mm/hugetlb: defer freeing of huge pages if in non-task context") Signed-off-by: Ge Yang <yangge1116@126.com> Reviewed-by: Muchun Song <muchun.song@linux.dev> Acked-by: David Hildenbrand <david@redhat.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <21cnbao@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/page_isolation.c10
3 files changed, 23 insertions, 0 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0385103dffcd..39fa83a0f478 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -681,6 +681,7 @@ struct huge_bootmem_page {
};
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
+void wait_for_freed_hugetlb_folios(void);
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
@@ -1061,6 +1062,10 @@ static inline int isolate_or_dissolve_huge_page(struct page *page,
return -ENOMEM;
}
+static inline void wait_for_freed_hugetlb_folios(void)
+{
+}
+
static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr,
int avoid_reserve)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 15ae955c7cbc..6c7607d46f97 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2956,6 +2956,14 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
return ret;
}
+void wait_for_freed_hugetlb_folios(void)
+{
+ if (llist_empty(&hpage_freelist))
+ return;
+
+ flush_work(&free_hpage_work);
+}
+
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 7e04047977cf..6989c5ffd474 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -612,6 +612,16 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int ret;
/*
+ * Due to the deferred freeing of hugetlb folios, the hugepage folios may
+ * not immediately release to the buddy system. This can cause PageBuddy()
+ * to fail in __test_page_isolated_in_pageblock(). To ensure that the
+ * hugetlb folios are properly released back to the buddy system, we
+ * invoke the wait_for_freed_hugetlb_folios() function to wait for the
+ * release to complete.
+ */
+ wait_for_freed_hugetlb_folios();
+
+ /*
* Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free
* pages are not aligned to pageblock_nr_pages.
* Then we just check migratetype first.