summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-11-25 21:01:44 +0000
committerAndrew Morton <akpm@linux-foundation.org>2025-01-13 22:40:33 -0800
commitc972106db3550f7757c1f984ed9852d69cf1fd69 (patch)
tree57c72be7616efbdb0c0368308807c582c16b43ed /mm/page_alloc.c
parenta88de400e3d22035a9bf6e818808013ccccfe410 (diff)
downloadlinux-c972106db3550f7757c1f984ed9852d69cf1fd69.tar.gz
linux-c972106db3550f7757c1f984ed9852d69cf1fd69.tar.bz2
linux-c972106db3550f7757c1f984ed9852d69cf1fd69.zip
mm/page_alloc: move set_page_refcounted() to end of __alloc_pages()
Remove some code duplication by calling set_page_refcounted() at the end of __alloc_pages() instead of after each call that can allocate a page. That means that we free a frozen page if we've exceeded the allowed memcg memory. Link: https://lkml.kernel.org/r/20241125210149.2976098-13-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: David Hildenbrand <david@redhat.com> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: William Kucharski <william.kucharski@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e2e5cd899abd..df5b61592792 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4750,10 +4750,8 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
/* First allocation attempt */
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
- if (likely(page)) {
- set_page_refcounted(page);
+ if (likely(page))
goto out;
- }
alloc_gfp = gfp;
ac.spread_dirty_pages = false;
@@ -4765,15 +4763,15 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
ac.nodemask = nodemask;
page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
- if (page)
- set_page_refcounted(page);
out:
if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
- __free_pages(page, order);
+ free_frozen_pages(page, order);
page = NULL;
}
+ if (page)
+ set_page_refcounted(page);
trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
kmsan_alloc_page(page, order, alloc_gfp);