diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/Kconfig | 16 | ||||
| -rw-r--r-- | mm/kfence/kfence_test.c | 7 | ||||
| -rw-r--r-- | mm/slab.c | 29 | ||||
| -rw-r--r-- | mm/slab.h | 5 | ||||
| -rw-r--r-- | mm/slab_common.c | 25 |
5 files changed, 39 insertions, 43 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 041f0da42f2b..956fb36015c6 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -218,10 +218,17 @@ choice help This option allows to select a slab allocator. -config SLAB - bool "SLAB" +config SLAB_DEPRECATED + bool "SLAB (DEPRECATED)" depends on !PREEMPT_RT help + Deprecated and scheduled for removal in a few cycles. Replaced by + SLUB. + + If you cannot migrate to SLUB, please contact linux-mm@kvack.org + and the people listed in the SLAB ALLOCATOR section of MAINTAINERS + file, explaining why. + The regular slab allocator that is established and known to work well in all environments. It organizes cache hot objects in per cpu and per node queues. @@ -238,6 +245,11 @@ config SLUB endchoice +config SLAB + bool + default y + depends on SLAB_DEPRECATED + config SLUB_TINY bool "Configure SLUB for minimal memory footprint" depends on SLUB && EXPERT diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 6aee19a79236..9e008a336d9f 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -191,11 +191,10 @@ static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t fla kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor); /* - * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any - * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to - * allocate via memcg, if enabled. + * Use SLAB_NO_MERGE to prevent merging with existing caches. + * Use SLAB_ACCOUNT to allocate via memcg, if enabled. */ - flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT; + flags |= SLAB_NO_MERGE | SLAB_ACCOUNT; test_cache = kmem_cache_create("test", size, 1, flags, ctor); KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache"); diff --git a/mm/slab.c b/mm/slab.c index 30d241908739..f7dc178b9058 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2355,44 +2355,34 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab) #ifdef CONFIG_SLAB_FREELIST_RANDOM /* Hold information during a freelist initialization */ -union freelist_init_state { - struct { - unsigned int pos; - unsigned int *list; - unsigned int count; - }; - struct rnd_state rnd_state; +struct freelist_init_state { + unsigned int pos; + unsigned int *list; + unsigned int count; }; /* * Initialize the state based on the randomization method available. * return true if the pre-computed list is available, false otherwise. */ -static bool freelist_state_initialize(union freelist_init_state *state, +static bool freelist_state_initialize(struct freelist_init_state *state, struct kmem_cache *cachep, unsigned int count) { bool ret; - unsigned int rand; - - /* Use best entropy available to define a random shift */ - rand = get_random_u32(); - - /* Use a random state if the pre-computed list is not available */ if (!cachep->random_seq) { - prandom_seed_state(&state->rnd_state, rand); ret = false; } else { state->list = cachep->random_seq; state->count = count; - state->pos = rand % count; + state->pos = get_random_u32_below(count); ret = true; } return ret; } /* Get the next entry on the list and randomize it using a random shift */ -static freelist_idx_t next_random_slot(union freelist_init_state *state) +static freelist_idx_t next_random_slot(struct freelist_init_state *state) { if (state->pos >= state->count) state->pos = 0; @@ -2413,7 +2403,7 @@ static void swap_free_obj(struct slab *slab, unsigned int a, unsigned int b) static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) { unsigned int objfreelist = 0, i, rand, count = cachep->num; - union freelist_init_state state; + struct freelist_init_state state; bool precomputed; if (count < 2) @@ -2442,8 +2432,7 @@ static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) /* Fisher-Yates shuffle */ for (i = count - 1; i > 0; i--) { - rand = prandom_u32_state(&state.rnd_state); - rand %= (i + 1); + rand = get_random_u32_below(i + 1); swap_free_obj(slab, i, rand); } } else { diff --git a/mm/slab.h b/mm/slab.h index ae41c0c50ba6..ad8f6af3a22f 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -294,11 +294,11 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s) #if defined(CONFIG_SLAB) #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ - SLAB_ACCOUNT) + SLAB_ACCOUNT | SLAB_NO_MERGE) #elif defined(CONFIG_SLUB) #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | SLAB_ACCOUNT | \ - SLAB_NO_USER_FLAGS | SLAB_KMALLOC) + SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE) #else #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) #endif @@ -319,6 +319,7 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s) SLAB_TEMPORARY | \ SLAB_ACCOUNT | \ SLAB_KMALLOC | \ + SLAB_NO_MERGE | \ SLAB_NO_USER_FLAGS) bool __kmem_cache_empty(struct kmem_cache *); diff --git a/mm/slab_common.c b/mm/slab_common.c index f6fe35105774..90ecaface410 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -47,7 +47,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, */ #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ - SLAB_FAILSLAB | kasan_never_merge()) + SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge()) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ SLAB_CACHE_DMA32 | SLAB_ACCOUNT) @@ -876,17 +876,17 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) flags |= SLAB_CACHE_DMA; } - kmalloc_caches[type][idx] = create_kmalloc_cache( - kmalloc_info[idx].name[type], - kmalloc_info[idx].size, flags, 0, - kmalloc_info[idx].size); - /* * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for * KMALLOC_NORMAL caches. */ if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL)) - kmalloc_caches[type][idx]->refcount = -1; + flags |= SLAB_NO_MERGE; + + kmalloc_caches[type][idx] = create_kmalloc_cache( + kmalloc_info[idx].name[type], + kmalloc_info[idx].size, flags, 0, + kmalloc_info[idx].size); } /* @@ -1139,7 +1139,7 @@ EXPORT_SYMBOL(kmalloc_large_node); #ifdef CONFIG_SLAB_FREELIST_RANDOM /* Randomize a generic freelist */ -static void freelist_randomize(struct rnd_state *state, unsigned int *list, +static void freelist_randomize(unsigned int *list, unsigned int count) { unsigned int rand; @@ -1150,8 +1150,7 @@ static void freelist_randomize(struct rnd_state *state, unsigned int *list, /* Fisher-Yates shuffle */ for (i = count - 1; i > 0; i--) { - rand = prandom_u32_state(state); - rand %= (i + 1); + rand = get_random_u32_below(i + 1); swap(list[i], list[rand]); } } @@ -1160,7 +1159,6 @@ static void freelist_randomize(struct rnd_state *state, unsigned int *list, int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, gfp_t gfp) { - struct rnd_state state; if (count < 2 || cachep->random_seq) return 0; @@ -1169,10 +1167,7 @@ int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, if (!cachep->random_seq) return -ENOMEM; - /* Get best entropy at this stage of boot */ - prandom_seed_state(&state, get_random_long()); - - freelist_randomize(&state, cachep->random_seq, count); + freelist_randomize(cachep->random_seq, count); return 0; } |
