diff options
| author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-08-16 16:11:52 +0100 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2023-08-21 14:28:43 -0700 |
| commit | 8dc4a8f1e038189cb575f89bcd23364698b88cc1 (patch) | |
| tree | 000cc2ba54fe80c8b41ba25dc5791f55d338a137 | |
| parent | 454a00c40a21c59e99c526fe8cc57bd029cf8f0e (diff) | |
| download | linux-8dc4a8f1e038189cb575f89bcd23364698b88cc1.tar.gz linux-8dc4a8f1e038189cb575f89bcd23364698b88cc1.tar.bz2 linux-8dc4a8f1e038189cb575f89bcd23364698b88cc1.zip | |
mm: convert free_transhuge_folio() to folio_undo_large_rmappable()
Indirect calls are expensive, thanks to Spectre. Test for
TRANSHUGE_PAGE_DTOR and destroy the folio appropriately. Move the
free_compound_page() call into destroy_large_folio() to simplify later
patches.
Link: https://lkml.kernel.org/r/20230816151201.3655946-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
| -rw-r--r-- | include/linux/huge_mm.h | 2 | ||||
| -rw-r--r-- | include/linux/mm.h | 2 | ||||
| -rw-r--r-- | mm/huge_memory.c | 22 | ||||
| -rw-r--r-- | mm/internal.h | 2 | ||||
| -rw-r--r-- | mm/page_alloc.c | 9 |
5 files changed, 19 insertions, 18 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e718dbe928ba..ceda26a20830 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -141,8 +141,6 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); void prep_transhuge_page(struct page *page); -void free_transhuge_page(struct page *page); - bool can_split_folio(struct folio *folio, int *pextra_pins); int split_huge_page_to_list(struct page *page, struct list_head *list); static inline int split_huge_page(struct page *page) diff --git a/include/linux/mm.h b/include/linux/mm.h index 55eb2789794e..0d14e2045658 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1253,9 +1253,7 @@ enum compound_dtor_id { #ifdef CONFIG_HUGETLB_PAGE HUGETLB_PAGE_DTOR, #endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE TRANSHUGE_PAGE_DTOR, -#endif NR_COMPOUND_DTORS, }; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 154c210892a1..b33456683b93 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2776,10 +2776,9 @@ out: return ret; } -void free_transhuge_page(struct page *page) +void folio_undo_large_rmappable(struct folio *folio) { - struct folio *folio = (struct folio *)page; - struct deferred_split *ds_queue = get_deferred_split_queue(folio); + struct deferred_split *ds_queue; unsigned long flags; /* @@ -2787,15 +2786,16 @@ void free_transhuge_page(struct page *page) * deferred_list. If folio is not in deferred_list, it's safe * to check without acquiring the split_queue_lock. */ - if (data_race(!list_empty(&folio->_deferred_list))) { - spin_lock_irqsave(&ds_queue->split_queue_lock, flags); - if (!list_empty(&folio->_deferred_list)) { - ds_queue->split_queue_len--; - list_del(&folio->_deferred_list); - } - spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); + if (data_race(list_empty(&folio->_deferred_list))) + return; + + ds_queue = get_deferred_split_queue(folio); + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); + if (!list_empty(&folio->_deferred_list)) { + ds_queue->split_queue_len--; + list_del(&folio->_deferred_list); } - free_compound_page(page); + spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); } void deferred_split_folio(struct folio *folio) diff --git a/mm/internal.h b/mm/internal.h index d99ffb473f90..30bbfcacc909 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -413,6 +413,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) #endif } +void folio_undo_large_rmappable(struct folio *folio); + static inline void prep_compound_head(struct page *page, unsigned int order) { struct folio *folio = (struct folio *)page; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 30dc444436cc..4047b5897443 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = { static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { [NULL_COMPOUND_DTOR] = NULL, [COMPOUND_PAGE_DTOR] = free_compound_page, -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - [TRANSHUGE_PAGE_DTOR] = free_transhuge_page, -#endif }; int min_free_kbytes = 1024; @@ -614,6 +611,12 @@ void destroy_large_folio(struct folio *folio) return; } + if (folio_test_transhuge(folio) && dtor == TRANSHUGE_PAGE_DTOR) { + folio_undo_large_rmappable(folio); + free_compound_page(&folio->page); + return; + } + VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio); compound_page_dtors[dtor](&folio->page); } |
