summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index da6f85b7db88..b839080a2a6b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1862,7 +1862,46 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
{
struct folio *folio, *t_folio;
+ bool clear_dtor = false;
+ /*
+ * First allocate required vmemmmap (if necessary) for all folios on
+ * list. If vmemmap can not be allocated, we can not free folio to
+ * lower level allocator, so add back as hugetlb surplus page.
+ * add_hugetlb_folio() removes the page from THIS list.
+ * Use clear_dtor to note if vmemmap was successfully allocated for
+ * ANY page on the list.
+ */
+ list_for_each_entry_safe(folio, t_folio, list, lru) {
+ if (folio_test_hugetlb_vmemmap_optimized(folio)) {
+ if (hugetlb_vmemmap_restore(h, &folio->page)) {
+ spin_lock_irq(&hugetlb_lock);
+ add_hugetlb_folio(h, folio, true);
+ spin_unlock_irq(&hugetlb_lock);
+ } else
+ clear_dtor = true;
+ }
+ }
+
+ /*
+ * If vmemmmap allocation was performed on any folio above, take lock
+ * to clear destructor of all folios on list. This avoids the need to
+ * lock/unlock for each individual folio.
+ * The assumption is vmemmap allocation was performed on all or none
+ * of the folios on the list. This is true expect in VERY rare cases.
+ */
+ if (clear_dtor) {
+ spin_lock_irq(&hugetlb_lock);
+ list_for_each_entry(folio, list, lru)
+ __clear_hugetlb_destructor(h, folio);
+ spin_unlock_irq(&hugetlb_lock);
+ }
+
+ /*
+ * Free folios back to low level allocators. vmemmap and destructors
+ * were taken care of above, so update_and_free_hugetlb_folio will
+ * not need to take hugetlb lock.
+ */
list_for_each_entry_safe(folio, t_folio, list, lru) {
update_and_free_hugetlb_folio(h, folio, false);
cond_resched();