// SPDX-License-Identifier: GPL-2.0
/*
* Generic infrastructure for lifetime debugging of objects.
*
* Copyright (C) 2008, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
*/
#define pr_fmt(fmt) "ODEBUG: " fmt
#include <linux/cpu.h>
#include <linux/debugobjects.h>
#include <linux/debugfs.h>
#include <linux/hash.h>
#include <linux/kmemleak.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/task_stack.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/static_key.h>
#define ODEBUG_HASH_BITS 14
#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
/* Must be power of two */
#define ODEBUG_BATCH_SIZE 16
/* Initial values. Must all be a multiple of batch size */
#define ODEBUG_POOL_SIZE (64 * ODEBUG_BATCH_SIZE)
#define ODEBUG_POOL_MIN_LEVEL (ODEBUG_POOL_SIZE / 4)
#define ODEBUG_POOL_PERCPU_SIZE (8 * ODEBUG_BATCH_SIZE)
#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
/*
* We limit the freeing of debug objects via workqueue at a maximum
* frequency of 10Hz and about 1024 objects for each freeing operation.
* So it is freeing at most 10k debug objects per second.
*/
#define ODEBUG_FREE_WORK_MAX (1024 / ODEBUG_BATCH_SIZE)
#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
struct debug_bucket {
struct hlist_head list;
raw_spinlock_t lock;
};
struct pool_stats {
unsigned int cur_used;
unsigned int max_used;
unsigned int min_fill;
};
struct obj_pool {
struct hlist_head objects;
unsigned int cnt;
unsigned int min_cnt;
unsigned int max_cnt;
struct pool_stats stats;
} ____cacheline_aligned;
static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu) = {
.max_cnt = ODEBUG_POOL_PERCPU_SIZE,
};
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
static DEFINE_RAW_SPINLOCK(pool_lock);
static struct obj_pool pool_global = {
.min_cnt = ODEBUG_POOL_MIN_LEVEL,
.max_cnt = ODEBUG_POOL_SIZE,
.stats = {
.min_fill = ODEBUG_POOL_SIZE,
},
};
static struct obj_pool pool_to_free = {
.max_cnt = UINT_MAX,
};
static HLIST_HEAD(pool_boot);
static unsigned long avg_usage;
static bool obj_freeing;
static int __data_racy debug_objects_maxchain __read_mostly;
static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly;
static int __data_racy debug_objects_fixups __read_mostly;
static int __data_racy debug_objects_warnings __read_mostly;
static bool __data_racy debug_objects_enabled __read_mostly
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
static const struct debug_obj_descr *descr_test __read_mostly;
static struct kmem_cache *obj_cache __ro_after_init;
/*
* Track numbers of kmem_cache_alloc()/free() calls done.
*/
static int __data_racy debug_objects_allocated;
static int __data_racy debug_objects_freed;
static void free_obj_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled);
static int __init enable_object_debug(char *str)
{
debug_objects_enabled = true;
return 0;
}
early_param("debug_objects", enable_object_debug);
static int __init disable_object_debug(char *str)
{
debug_objects_enabled = false;
return 0;
}
early_param("no_debug_objects", disable_object_debug);
static const char *obj_states[ODEBUG_STATE_MAX] = {
[ODEBUG_STATE_NONE] = "none",
[ODEBUG_STATE_INIT] = "initialized",
[ODEBUG_STATE_INACTIVE] = "inactive",
[ODEBUG_STATE_ACTIVE] = "active",
[ODEBUG_STATE_DESTROYED] = "destroyed",
[ODEBUG_STATE_NOTAVAILABLE] = "not available",
};
static __always_inline unsigned int pool_count(struct obj_pool *pool)
{
return READ_ONCE(pool->cnt);
}
static __always_inline bool pool_should_refill(struct obj_pool *pool)
{
return pool_count(pool) < pool->min_cnt;
}
static __always_inline bool pool_must_refill(struct obj_pool *pool)
{
return pool_count(pool) < pool->min_cnt / 2;
}
static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
{
struct hlist_node *last, *next_batch, *first_batch;
struct debug_obj *obj;
if (dst->cnt >= dst->max_cnt || !src->cnt)
return false;
first_batch = src->objects.first;
obj = hlist_entry(first_batch, typeof(*obj), node);
last = obj->batch_last;
next_batch = last->next;
/* Move the next batch to the front of the source pool */
src->objects.first = next_batch;
if (next_batch)
next_batch->pprev = &src->objects.first;
/* Add the extracted batch to the destination pool */
last->next = dst->objects.first;
if (last->next)
last->next->pprev = &last->next;
first_batch->pprev = &dst->objects.first;
dst->objects.first = first_batch;
WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
return true;
}
static bool pool_push_batch(struct obj_pool *dst, struct hlist_head *head)
{
struct hlist_node *last;
struct debug_obj *obj;
if (dst->cnt >= dst->max_cnt)
return false;
obj = hlist_entry(head->first, typeof(*obj), node);
last = obj->batch_last;
hlist_splice_init(head, last, &dst->objects);
WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
return true;
}
static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
{
struct hlist_node *last, *next;
struct debug_obj *obj;
if (!src->cnt)
return false;
/* Move the complete list to the head */
hlist_move_list(&src->objects, head);
obj = hlist_entry(head->first, typeof(*obj), node);
last = obj->batch_last;
next = last->next;
/* Disconnect the batch from the list */
last->next = NULL;
/* Move the node after last back to the source pool. */
src->objects.first = next;
if (next)
next->pprev = &src->objects.first;
WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
return true;
}
static struct debug_obj *__alloc_object(struct hlist_head *list)
{
struct debug_obj *obj;
if (unlikely(!list->first))
return NULL;
obj = hlist_entry(list->first, typeof(*obj), node);
hlist_del(&obj->node);
return obj;
}
static void pcpu_refill_stats(void)
{
struct pool_stats *stats = &pool_global.stats;
WRITE_ONCE(stats->cur_used, stats->cur_used + ODEBUG_BATCH_SIZE);
if (stats->cur_used > stats->max_used)
stats->max_used = stats->cur_used;
if (pool_global.cnt < stats->min_fill)
stats->min_fill = pool_global.cnt;
}
static struct debug_obj *pcpu_alloc(void)
{
struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
lockdep_assert_irqs_disabled();
for (;;) {
struct debug_obj *obj = __alloc_object(&pcp->objects);
if (likely(obj)) {
pcp->cnt--;
/*
* If this emptied a batch try to refill from the
* free pool. Don't do that if this was the top-most
* batch as pcpu_free() expects the per CPU pool
* to be less than ODEBUG_POOL_PERCPU_SIZE.
*/
if (unlikely(pcp->cnt < (ODEBUG_POOL_PERCPU_SIZE - ODEBUG_BATCH_SIZE) &&
!(pcp->cnt % ODEBUG_BATCH_SIZE))) {
/*
* Don't try to allocate from the regular pool here
* to not exhaust it prematurely.
*/
if (pool_count(&pool_to_free)) {
guard(raw_spinlock)(&pool_lock);
pool_move_batch(pcp, &pool_to_free);
pcpu_refill_stats();
}
}
return obj;
}
guard(raw_spinlock)(&pool_lock);
if (!pool_move_batch(pcp, &pool_to_free)) {
if (!pool_move_batch(pcp, &pool_global))
return NULL;
}
pcpu_refill_stats();
}
}
static void pcpu_free(struct debug_obj *obj)
{
struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
struct debug_obj *first;
lockdep_assert_irqs_disabled();
if (!(pcp->cnt % ODEBUG_BATCH_SIZE)) {
obj->batch_last = &obj->node;
} else {
first = hlist_entry(
|