diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-29 13:04:15 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-29 13:04:15 -0700 |
| commit | 651a00bc56403161351090a9d7ddbd7095975324 (patch) | |
| tree | 921e3e06b419384d76b8ebd7c08b610dbe0d6b7b /mm/slab_common.c | |
| parent | 9d6b14cd1e993d2ff98df0cef6d935ce6fd4dbec (diff) | |
| parent | 3d053e8060430b86bad0854b7c7f03f15be3a7e5 (diff) | |
| download | linux-651a00bc56403161351090a9d7ddbd7095975324.tar.gz linux-651a00bc56403161351090a9d7ddbd7095975324.tar.bz2 linux-651a00bc56403161351090a9d7ddbd7095975324.zip | |
Merge tag 'slab-for-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka:
"This happens to be a small one (due to summer I guess), and all
hardening related:
- Randomized kmalloc caches, by GONG, Ruiqi.
A new opt-in hardening feature to make heap spraying harder. It
creates multiple (16) copies of kmalloc caches, reducing the chance
of an attacker-controllable allocation site to land in the same
slab as e.g. an allocation site with use-after-free vulnerability.
The selection of the copy is derived from the allocation site
address, including a per-boot random seed.
- Stronger typing for hardened freelists in SLUB, by Jann Horn
Introduces a custom type for hardened freelist entries instead of
"void *" as those are not directly dereferencable. While reviewing
this, I've noticed opportunities for further cleanups in that code
and added those on top"
* tag 'slab-for-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
Randomized slab caches for kmalloc()
mm/slub: remove freelist_dereference()
mm/slub: remove redundant kasan_reset_tag() from freelist_ptr calculations
mm/slub: refactor freelist to use custom type
Diffstat (limited to 'mm/slab_common.c')
| -rw-r--r-- | mm/slab_common.c | 49 |
1 files changed, 44 insertions, 5 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index d1555ea2981a..01cdbf122463 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -678,6 +678,11 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init = { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ }; EXPORT_SYMBOL(kmalloc_caches); +#ifdef CONFIG_RANDOM_KMALLOC_CACHES +unsigned long random_kmalloc_seed __ro_after_init; +EXPORT_SYMBOL(random_kmalloc_seed); +#endif + /* * Conversion table for small slabs sizes / 8 to the index in the * kmalloc array. This is necessary for slabs < 192 since we have non power @@ -720,7 +725,7 @@ static inline unsigned int size_index_elem(unsigned int bytes) * Find the kmem_cache structure that serves a given size of * allocation */ -struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) +struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller) { unsigned int index; @@ -735,7 +740,7 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) index = fls(size - 1); } - return kmalloc_caches[kmalloc_type(flags)][index]; + return kmalloc_caches[kmalloc_type(flags, caller)][index]; } size_t kmalloc_size_roundup(size_t size) @@ -752,8 +757,11 @@ size_t kmalloc_size_roundup(size_t size) if (size > KMALLOC_MAX_CACHE_SIZE) return PAGE_SIZE << get_order(size); - /* The flags don't matter since size_index is common to all. */ - c = kmalloc_slab(size, GFP_KERNEL); + /* + * The flags don't matter since size_index is common to all. + * Neither does the caller for just getting ->object_size. + */ + c = kmalloc_slab(size, GFP_KERNEL, 0); return c ? c->object_size : 0; } EXPORT_SYMBOL(kmalloc_size_roundup); @@ -776,12 +784,35 @@ EXPORT_SYMBOL(kmalloc_size_roundup); #define KMALLOC_RCL_NAME(sz) #endif +#ifdef CONFIG_RANDOM_KMALLOC_CACHES +#define __KMALLOC_RANDOM_CONCAT(a, b) a ## b +#define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz) +#define KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 1] = "kmalloc-rnd-01-" #sz, +#define KMA_RAND_2(sz) KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 2] = "kmalloc-rnd-02-" #sz, +#define KMA_RAND_3(sz) KMA_RAND_2(sz) .name[KMALLOC_RANDOM_START + 3] = "kmalloc-rnd-03-" #sz, +#define KMA_RAND_4(sz) KMA_RAND_3(sz) .name[KMALLOC_RANDOM_START + 4] = "kmalloc-rnd-04-" #sz, +#define KMA_RAND_5(sz) KMA_RAND_4(sz) .name[KMALLOC_RANDOM_START + 5] = "kmalloc-rnd-05-" #sz, +#define KMA_RAND_6(sz) KMA_RAND_5(sz) .name[KMALLOC_RANDOM_START + 6] = "kmalloc-rnd-06-" #sz, +#define KMA_RAND_7(sz) KMA_RAND_6(sz) .name[KMALLOC_RANDOM_START + 7] = "kmalloc-rnd-07-" #sz, +#define KMA_RAND_8(sz) KMA_RAND_7(sz) .name[KMALLOC_RANDOM_START + 8] = "kmalloc-rnd-08-" #sz, +#define KMA_RAND_9(sz) KMA_RAND_8(sz) .name[KMALLOC_RANDOM_START + 9] = "kmalloc-rnd-09-" #sz, +#define KMA_RAND_10(sz) KMA_RAND_9(sz) .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz, +#define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz, +#define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz, +#define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz, +#define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz, +#define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz, +#else // CONFIG_RANDOM_KMALLOC_CACHES +#define KMALLOC_RANDOM_NAME(N, sz) +#endif + #define INIT_KMALLOC_INFO(__size, __short_size) \ { \ .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \ KMALLOC_RCL_NAME(__short_size) \ KMALLOC_CGROUP_NAME(__short_size) \ KMALLOC_DMA_NAME(__short_size) \ + KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \ .size = __size, \ } @@ -890,6 +921,11 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) flags |= SLAB_CACHE_DMA; } +#ifdef CONFIG_RANDOM_KMALLOC_CACHES + if (type >= KMALLOC_RANDOM_START && type <= KMALLOC_RANDOM_END) + flags |= SLAB_NO_MERGE; +#endif + /* * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for * KMALLOC_NORMAL caches. @@ -941,6 +977,9 @@ void __init create_kmalloc_caches(slab_flags_t flags) new_kmalloc_cache(2, type, flags); } } +#ifdef CONFIG_RANDOM_KMALLOC_CACHES + random_kmalloc_seed = get_random_u64(); +#endif /* Kmalloc array is now usable */ slab_state = UP; @@ -976,7 +1015,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller return ret; } - s = kmalloc_slab(size, flags); + s = kmalloc_slab(size, flags, caller); if (unlikely(ZERO_OR_NULL_PTR(s))) return s; |
