diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-01 11:46:58 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-01 11:46:58 -0700 |
| commit | 0dd1cabe8a4a568252ca70f7530c3ca10e728513 (patch) | |
| tree | ce5d35280959d797060419c68e4a092cd18552e7 /mm/slab_common.c | |
| parent | 0cec3f24a7cedc726f8790d693aaff2c535dc4ce (diff) | |
| parent | 3041808b522031dccfbd898e520109569f039860 (diff) | |
| download | linux-0dd1cabe8a4a568252ca70f7530c3ca10e728513.tar.gz linux-0dd1cabe8a4a568252ca70f7530c3ca10e728513.tar.bz2 linux-0dd1cabe8a4a568252ca70f7530c3ca10e728513.zip | |
Merge tag 'slab-for-5.20_or_6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka:
- An addition of 'accounted' flag to slab allocation tracepoints to
indicate memcg_kmem accounting, by Vasily
- An optimization of memcg handling in freeing paths, by Muchun
- Various smaller fixes and cleanups
* tag 'slab-for-5.20_or_6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
mm/slab_common: move generic bulk alloc/free functions to SLOB
mm/sl[au]b: use own bulk free function when bulk alloc failed
mm: slab: optimize memcg_slab_free_hook()
mm/tracing: add 'accounted' entry into output of allocation tracepoints
tools/vm/slabinfo: Handle files in debugfs
mm/slub: Simplify __kmem_cache_alias()
mm, slab: fix bad alignments
Diffstat (limited to 'mm/slab_common.c')
| -rw-r--r-- | mm/slab_common.c | 36 |
1 files changed, 4 insertions, 32 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 77c3adf40e50..17996649cfe3 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -26,13 +26,12 @@ #include <linux/memcontrol.h> #include <linux/stackdepot.h> -#define CREATE_TRACE_POINTS -#include <trace/events/kmem.h> - #include "internal.h" - #include "slab.h" +#define CREATE_TRACE_POINTS +#include <trace/events/kmem.h> + enum slab_state slab_state; LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); @@ -105,33 +104,6 @@ static inline int kmem_cache_sanity_check(const char *name, unsigned int size) } #endif -void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) -{ - size_t i; - - for (i = 0; i < nr; i++) { - if (s) - kmem_cache_free(s, p[i]); - else - kfree(p[i]); - } -} - -int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, - void **p) -{ - size_t i; - - for (i = 0; i < nr; i++) { - void *x = p[i] = kmem_cache_alloc(s, flags); - if (!x) { - __kmem_cache_free_bulk(s, i, p); - return 0; - } - } - return i; -} - /* * Figure out what the alignment of the objects will be given a set of * flags, a user specified alignment and the size of the objects. @@ -959,7 +931,7 @@ EXPORT_SYMBOL(kmalloc_order); void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) { void *ret = kmalloc_order(size, flags, order); - trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); + trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags); return ret; } EXPORT_SYMBOL(kmalloc_order_trace); |
