diff options
Diffstat (limited to 'mm')
86 files changed, 4146 insertions, 2486 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index a5b4fee2e3fd..0331f1461f81 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -33,7 +33,7 @@ config ZSWAP pages that are in the process of being swapped out and attempts to compress them into a dynamically allocated RAM-based memory pool. This can result in a significant I/O reduction on swap device and, - in the case where decompressing from RAM is faster that swap device + in the case where decompressing from RAM is faster than swap device reads, can also improve workload performance. This is marked experimental because it is a new feature (as of @@ -639,14 +639,6 @@ config BOUNCE memory available to the CPU. Enabled by default when HIGHMEM is selected, but you may say n to override this. -config VIRT_TO_BUS - bool - help - An architecture should select this if it implements the - deprecated interface virt_to_bus(). All new architectures - should probably not select this. - - config MMU_NOTIFIER bool select SRCU @@ -663,7 +655,7 @@ config KSM the many instances by a single page with that content, so saving memory until one or another app needs to modify the content. Recommended for use with KVM, or with other duplicative applications. - See Documentation/vm/ksm.rst for more information: KSM is inactive + See Documentation/mm/ksm.rst for more information: KSM is inactive until a program has madvised that an area is MADV_MERGEABLE, and root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). @@ -951,9 +943,6 @@ config ARCH_HAS_CURRENT_STACK_POINTER register alias named "current_stack_pointer", this config can be selected. -config ARCH_HAS_VM_GET_PAGE_PROT - bool - config ARCH_HAS_PTE_DEVMAP bool diff --git a/mm/Makefile b/mm/Makefile index 6f9ffa968a1a..9a564f836403 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -133,3 +133,4 @@ obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o obj-$(CONFIG_IO_MAPPING) += io-mapping.o obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o +obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 4b8eab4b3f45..22c96fed70b5 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -228,10 +228,8 @@ static void balloon_page_putback(struct page *page) spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); } - /* move_to_new_page() counterpart for a ballooned page */ -static int balloon_page_migrate(struct address_space *mapping, - struct page *newpage, struct page *page, +static int balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode) { struct balloon_dev_info *balloon = balloon_page_device(page); @@ -250,11 +248,11 @@ static int balloon_page_migrate(struct address_space *mapping, return balloon->migratepage(balloon, newpage, page, mode); } -const struct address_space_operations balloon_aops = { - .migratepage = balloon_page_migrate, +const struct movable_operations balloon_mops = { + .migrate_page = balloon_page_migrate, .isolate_page = balloon_page_isolate, .putback_page = balloon_page_putback, }; -EXPORT_SYMBOL_GPL(balloon_aops); +EXPORT_SYMBOL_GPL(balloon_mops); #endif /* CONFIG_BALLOON_COMPACTION */ diff --git a/mm/cma_debug.c b/mm/cma_debug.c index 2e7704955f4f..c3ffe253e055 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -163,7 +163,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n"); static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry) { struct dentry *tmp; - char name[16]; + char name[CMA_MAX_NAME]; scnprintf(name, sizeof(name), "cma-%s", cma->name); diff --git a/mm/compaction.c b/mm/compaction.c index 1f89b969c12b..640fa76228dd 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -110,28 +110,27 @@ static void split_map_pages(struct list_head *list) } #ifdef CONFIG_COMPACTION - -int PageMovable(struct page *page) +bool PageMovable(struct page *page) { - struct address_space *mapping; + const struct movable_operations *mops; VM_BUG_ON_PAGE(!PageLocked(page), page); if (!__PageMovable(page)) - return 0; + return false; - mapping = page_mapping(page); - if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) - return 1; + mops = page_movable_ops(page); + if (mops) + return true; - return 0; + return false; } EXPORT_SYMBOL(PageMovable); -void __SetPageMovable(struct page *page, struct address_space *mapping) +void __SetPageMovable(struct page *page, const struct movable_operations *mops) { VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); - page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); + VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page); + page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE); } EXPORT_SYMBOL(__SetPageMovable); @@ -139,12 +138,10 @@ void __ClearPageMovable(struct page *page) { VM_BUG_ON_PAGE(!PageMovable(page), page); /* - * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE - * flag so that VM can catch up released page by driver after isolation. - * With it, VM migration doesn't try to put it back. + * This page still has the type of a movable page, but it's + * actually not movable any more. */ - page->mapping = (void *)((unsigned long)page->mapping & - PAGE_MAPPING_MOVABLE); + page->mapping = (void *)PAGE_MAPPING_MOVABLE; } EXPORT_SYMBOL(__ClearPageMovable); @@ -616,6 +613,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, break; set_page_private(page, order); + nr_scanned += isolated - 1; total_isolated += isolated; cc->nr_freepages += isolated; list_add_tail(&page->lru, freelist); @@ -1034,7 +1032,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* * Only pages without mappings or that have a - * ->migratepage callback are possible to migrate + * ->migrate_folio callback are possible to migrate * without blocking. However, we can be racing with * truncation so it's necessary to lock the page * to stabilise the mapping as truncation holds @@ -1045,7 +1043,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, goto isolate_fail_put; mapping = page_mapping(page); - migrate_dirty = !mapping || mapping->a_ops->migratepage; + migrate_dirty = !mapping || + mapping->a_ops->migrate_folio; unlock_page(page); if (!migrate_dirty) goto isolate_fail_put; @@ -1101,6 +1100,7 @@ isolate_success: isolate_success_no_list: cc->nr_migratepages += compound_nr(page); nr_isolated += compound_nr(page); + nr_scanned += compound_nr(page) - 1; /* * Avoid isolating too much unless this block is being @@ -1504,6 +1504,7 @@ fast_isolate_freepages(struct compact_control *cc) if (__isolate_free_page(page, order)) { set_page_private(page, order); nr_isolated = 1 << order; + nr_scanned += nr_isolated - 1; cc->nr_freepages += nr_isolated; list_add_tail(&page->lru, &cc->freepages); count_compact_events(COMPACTISOLATED, nr_isolated); @@ -3011,7 +3012,7 @@ void kcompactd_run(int nid) /* * Called by memory hotplug when all memory in a node is offlined. Caller must - * hold mem_hotplug_begin/end(). + * be holding mem_hotplug_begin/done(). */ void kcompactd_stop(int nid) { diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig index 9b559c76d6dd..66265e3a9c65 100644 --- a/mm/damon/Kconfig +++ b/mm/damon/Kconfig @@ -92,4 +92,12 @@ config DAMON_RECLAIM reclamation under light memory pressure, while the traditional page scanning-based reclamation is used for heavy pressure. +config DAMON_LRU_SORT + bool "Build DAMON-based LRU-lists sorting (DAMON_LRU_SORT)" + depends on DAMON_PADDR + help + This builds the DAMON-based LRU-lists sorting subsystem. It tries to + protect frequently accessed (hot) pages while rarely accessed (cold) + pages reclaimed first under memory pressure. + endmenu diff --git a/mm/damon/Makefile b/mm/damon/Makefile index dbf7190b4144..3e6b8ad73858 100644 --- a/mm/damon/Makefile +++ b/mm/damon/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_DAMON_PADDR) += ops-common.o paddr.o obj-$(CONFIG_DAMON_SYSFS) += sysfs.o obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o obj-$(CONFIG_DAMON_RECLAIM) += reclaim.o +obj-$(CONFIG_DAMON_LRU_SORT) += lru_sort.o diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index a0dab8b5e45f..cb8a7e9926a4 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -97,6 +97,31 @@ out: return ret; } +/* + * Return corresponding dbgfs' scheme action value (int) for the given + * damos_action if the given damos_action value is valid and supported by + * dbgfs, negative error code otherwise. + */ +static int damos_action_to_dbgfs_scheme_action(enum damos_action action) +{ + switch (action) { + case DAMOS_WILLNEED: + return 0; + case DAMOS_COLD: + return 1; + case DAMOS_PAGEOUT: + return 2; + case DAMOS_HUGEPAGE: + return 3; + case DAMOS_NOHUGEPAGE: + return 4; + case DAMOS_STAT: + return 5; + default: + return -EINVAL; + } +} + static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len) { struct damos *s; @@ -109,7 +134,7 @@ static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len) s->min_sz_region, s->max_sz_region, s->min_nr_accesses, s->max_nr_accesses, s->min_age_region, s->max_age_region, - s->action, + damos_action_to_dbgfs_scheme_action(s->action), s->quota.ms, s->quota.sz, s->quota.reset_interval, s->quota.weight_sz, @@ -160,18 +185,27 @@ static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes) kfree(schemes); } -static bool damos_action_valid(int action) +/* + * Return corresponding damos_action for the given dbgfs input for a scheme + * action if the input is valid, negative error code otherwise. + */ +static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action) { - switch (action) { - case DAMOS_WILLNEED: - case DAMOS_COLD: - case DAMOS_PAGEOUT: - case DAMOS_HUGEPAGE: - case DAMOS_NOHUGEPAGE: - case DAMOS_STAT: - return true; + switch (dbgfs_action) { + case 0: + return DAMOS_WILLNEED; + case 1: + return DAMOS_COLD; + case 2: + return DAMOS_PAGEOUT; + case 3: + return DAMOS_HUGEPAGE; + case 4: + return DAMOS_NOHUGEPAGE; + case 5: + return DAMOS_STAT; default: - return false; + return -EINVAL; } } @@ -189,7 +223,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len, int pos = 0, parsed, ret; unsigned long min_sz, max_sz; unsigned int min_nr_a, max_nr_a, min_age, max_age; - unsigned int action; + unsigned int action_input; + enum damos_action action; schemes = kmalloc_array(max_nr_schemes, sizeof(scheme), GFP_KERNEL); @@ -204,7 +239,7 @@ static struct damos **str_to_schemes(const char *str, ssize_t len, ret = sscanf(&str[pos], "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n", &min_sz, &max_sz, &min_nr_a, &max_nr_a, - &min_age, &max_age, &action, "a.ms, + &min_age, &max_age, &action_input, "a.ms, "a.sz, "a.reset_interval, "a.weight_sz, "a.weight_nr_accesses, "a.weight_age, &wmarks.metric, @@ -212,7 +247,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len, &wmarks.low, &parsed); if (ret != 18) break; - if (!damos_action_valid(action)) + action = dbgfs_scheme_action_to_damos_action(action_input); + if ((int)action < 0) goto fail; if (min_sz > max_sz || min_nr_a > max_nr_a || min_age > max_age) @@ -275,11 +311,6 @@ out: return ret; } -static inline bool target_has_pid(const struct damon_ctx *ctx) -{ - return ctx->ops.id == DAMON_OPS_VADDR; -} - static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len) { struct damon_target *t; @@ -288,7 +319,7 @@ static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len) int rc; damon_for_each_target(t, ctx) { - if (target_has_pid(ctx))< |
