diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-05 10:56:27 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-05 10:56:27 -0700 |
| commit | 5eea5820c7340d39e56e169e1b87199391105f6b (patch) | |
| tree | 807d640dc77fa1d9fb9b12cc8ba234925b177031 /mm/page_alloc.c | |
| parent | 893a259caa6f08477bff2bccf1df0cdff38271ac (diff) | |
| parent | e68d343d2720779362cb7160cb7f4bd24979b2b4 (diff) | |
| download | linux-5eea5820c7340d39e56e169e1b87199391105f6b.tar.gz linux-5eea5820c7340d39e56e169e1b87199391105f6b.tar.bz2 linux-5eea5820c7340d39e56e169e1b87199391105f6b.zip | |
Merge tag 'mm-stable-2023-09-04-14-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull more MM updates from Andrew Morton:
- Stefan Roesch has added ksm statistics to /proc/pid/smaps
- Also a number of singleton patches, mainly cleanups and leftovers
* tag 'mm-stable-2023-09-04-14-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm/kmemleak: move up cond_resched() call in page scanning loop
mm: page_alloc: remove stale CMA guard code
MAINTAINERS: add rmap.h to mm entry
rmap: remove anon_vma_link() nommu stub
proc/ksm: add ksm stats to /proc/pid/smaps
mm/hwpoison: rename hwp_walk* to hwpoison_walk*
mm: memory-failure: add PageOffline() check
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 21 |
1 files changed, 4 insertions, 17 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 452459836b71..0c5be12f9336 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2641,12 +2641,6 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, do { page = NULL; spin_lock_irqsave(&zone->lock, flags); - /* - * order-0 request can reach here when the pcplist is skipped - * due to non-CMA allocation context. HIGHATOMIC area is - * reserved for high-order atomic allocation, so order-0 - * request should skip it. - */ if (alloc_flags & ALLOC_HIGHATOMIC) page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); if (!page) { @@ -2780,17 +2774,10 @@ struct page *rmqueue(struct zone *preferred_zone, WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); if (likely(pcp_allowed_order(order))) { - /* - * MIGRATE_MOVABLE pcplist could have the pages on CMA area and - * we need to skip it when CMA area isn't allowed. - */ - if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || - migratetype != MIGRATE_MOVABLE) { - page = rmqueue_pcplist(preferred_zone, zone, order, - migratetype, alloc_flags); - if (likely(page)) - goto out; - } + page = rmqueue_pcplist(preferred_zone, zone, order, + migratetype, alloc_flags); + if (likely(page)) + goto out; } page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, |
