summaryrefslogtreecommitdiff
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-10-31 18:14:17 +0100
committerIngo Molnar <mingo@kernel.org>2013-11-06 07:55:08 +0100
commit8eddac3f103736163f49255bcb109edadea167f6 (patch)
treecd3161b76bb7a7e2614817d0ba66446676e6b677 /kernel/lockdep.c
parent01768b42dc97a67b4fb33a2535c49fc1969880df (diff)
downloadlinux-8eddac3f103736163f49255bcb109edadea167f6.tar.gz
linux-8eddac3f103736163f49255bcb109edadea167f6.tar.bz2
linux-8eddac3f103736163f49255bcb109edadea167f6.zip
locking: Move the lockdep code to kernel/locking/
Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-wl7s3tta5isufzfguc23et06@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c4257
1 files changed, 0 insertions, 4257 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
deleted file mode 100644
index 4e8e14c34e42..000000000000
--- a/kernel/lockdep.c
+++ /dev/null
@@ -1,4257 +0,0 @@
-/*
- * kernel/lockdep.c
- *
- * Runtime locking correctness validator
- *
- * Started by Ingo Molnar:
- *
- * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
- *
- * this code maps all the lock dependencies as they occur in a live kernel
- * and will warn about the following classes of locking bugs:
- *
- * - lock inversion scenarios
- * - circular lock dependencies
- * - hardirq/softirq safe/unsafe locking bugs
- *
- * Bugs are reported even if the current locking scenario does not cause
- * any deadlock at this point.
- *
- * I.e. if anytime in the past two locks were taken in a different order,
- * even if it happened for another task, even if those were different
- * locks (but of the same class as this lock), this code will detect it.
- *
- * Thanks to Arjan van de Ven for coming up with the initial idea of
- * mapping lock dependencies runtime.
- */
-#define DISABLE_BRANCH_PROFILING
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/spinlock.h>
-#include <linux/kallsyms.h>
-#include <linux/interrupt.h>
-#include <linux/stacktrace.h>
-#include <linux/debug_locks.h>
-#include <linux/irqflags.h>
-#include <linux/utsname.h>
-#include <linux/hash.h>
-#include <linux/ftrace.h>
-#include <linux/stringify.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-#include <linux/kmemcheck.h>
-
-#include <asm/sections.h>
-
-#include "lockdep_internals.h"
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/lock.h>
-
-#ifdef CONFIG_PROVE_LOCKING
-int prove_locking = 1;
-module_param(prove_locking, int, 0644);
-#else
-#define prove_locking 0
-#endif
-
-#ifdef CONFIG_LOCK_STAT
-int lock_stat = 1;
-module_param(lock_stat, int, 0644);
-#else
-#define lock_stat 0
-#endif
-
-/*
- * lockdep_lock: protects the lockdep graph, the hashes and the
- * class/list/hash allocators.
- *
- * This is one of the rare exceptions where it's justified
- * to use a raw spinlock - we really dont want the spinlock
- * code to recurse back into the lockdep code...
- */
-static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
-
-static int graph_lock(void)
-{
- arch_spin_lock(&lockdep_lock);
- /*
- * Make sure that if another CPU detected a bug while
- * walking the graph we dont change it (while the other
- * CPU is busy printing out stuff with the graph lock
- * dropped already)
- */
- if (!debug_locks) {
- arch_spin_unlock(&lockdep_lock);
- return 0;
- }
- /* prevent any recursions within lockdep from causing deadlocks */
- current->lockdep_recursion++;
- return 1;
-}
-
-static inline int graph_unlock(void)
-{
- if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
- /*
- * The lockdep graph lock isn't locked while we expect it to
- * be, we're confused now, bye!
- */
- return DEBUG_LOCKS_WARN_ON(1);
- }
-
- current->lockdep_recursion--;
- arch_spin_unlock(&lockdep_lock);
- return 0;
-}
-
-/*
- * Turn lock debugging off and return with 0 if it was off already,
- * and also release the graph lock:
- */
-static inline int debug_locks_off_graph_unlock(void)
-{
- int ret = debug_locks_off();
-
- arch_spin_unlock(&lockdep_lock);
-
- return ret;
-}
-
-static int lockdep_initialized;
-
-unsigned long nr_list_entries;
-static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
-
-/*
- * All data structures here are protected by the global debug_lock.
- *
- * Mutex key structs only get allocated, once during bootup, and never
- * get freed - this significantly simplifies the debugging code.
- */
-unsigned long nr_lock_classes;
-static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
-
-static inline struct lock_class *hlock_class(struct held_lock *hlock)
-{
- if (!hlock->class_idx) {
- /*
- * Someone passed in garbage, we give up.
- */
- DEBUG_LOCKS_WARN_ON(1);
- return NULL;
- }
- return lock_classes + hlock->class_idx - 1;
-}
-
-#ifdef CONFIG_LOCK_STAT
-static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
- cpu_lock_stats);
-
-static inline u64 lockstat_clock(void)
-{
- return local_clock();
-}
-
-static int lock_point(unsigned long points[], unsigned long ip)
-{
- int i;
-
- for (i = 0; i < LOCKSTAT_POINTS; i++) {
- if (points[i] == 0) {
- points[i] = ip;
- break;
- }
- if (points[i] == ip)
- break;
- }
-
- return i;
-}
-
-static void lock_time_inc(struct lock_time *lt, u64 time)
-{
- if (time > lt->max)
- lt->max = time;
-
- if (time < lt->min || !lt->nr)
- lt->min = time;
-
- lt->total += time;
- lt->nr++;
-}
-
-static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
-{
- if (!src->nr)
- return;
-
- if (src->max > dst->max)
- dst->max = src->max;
-
- if (src->min < dst->min || !dst->nr)
- dst->min = src->min;
-
- dst->total += src->total;
- dst->nr += src->nr;
-}
-
-struct lock_class_stats lock_stats(struct lock_class *class)
-{
- struct lock_class_stats stats;
- int cpu, i;
-
- memset(&stats, 0, sizeof(struct lock_class_stats));
- for_each_possible_cpu(cpu) {
- struct lock_class_stats *pcs =
- &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
-
- for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
- stats.contention_point[i] += pcs->contention_point[i];
-
- for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
- stats.contending_point[i] += pcs->contending_point[i];
-
- lock_time_add(&pcs->read_waittime, &stats.read_waittime);
- lock_time_add(&pcs->write_waittime, &stats.write_waittime);
-
- lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
- lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
-
- for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
- stats.bounces[i] += pcs->bounces[i];
- }
-
- return stats;
-}
-
-void clear_lock_stats(struct lock_class *class)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct lock_class_stats *cpu_stats =
- &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
-
- memset(cpu_stats, 0, sizeof(struct lock_class_stats));
- }
- memset(class->contention_point, 0, sizeof(class->contention_point));
- memset(class->contending_point, 0, sizeof(class->contending_point));
-}
-
-static struct lock_class_stats *get_lock_stats(struct lock_class *class)
-{
- return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
-}
-
-static void put_lock_stats(struct lock_class_stats *stats)
-{
- put_cpu_var(cpu_lock_stats);
-}
-
-static void lock_release_holdtime(struct held_lock *hlock)
-{
- struct lock_class_stats *stats;
- u64 holdtime;
-
- if (!lock_stat)
- return;
-
- holdtime = lockstat_clock() - hlock->holdtime_stamp;
-
- stats = get_lock_stats(hlock_class(hlock));
- if (hlock->read)
- lock_time_inc(&stats->read_holdtime, holdtime);
- else
- lock_time_inc(&stats->write_holdtime, holdtime);
- put_lock_stats(stats);
-}
-#else
-static inline void lock_release_holdtime(struct held_lock *hlock)
-{
-}
-#endif
-
-/*
- * We keep a global list of all lock classes. The list only grows,
- * never shrinks. The list is only accessed with the lockdep
- * spinlock lock held.
- */
-LIST_HEAD(all_lock_classes);
-
-/*
- * The lockdep classes are in a hash-table as well, for fast lookup:
- */
-#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
-#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
-#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
-#define classhashentry(key) (classhash_table + __classhashfn((key)))
-
-static struct list_head classhash_table[CLASSHASH_SIZE];
-
-/*
- * We put the lock dependency chains into a hash-table as well, to cache
- * their existence:
- */
-#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
-#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
-#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
-#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
-
-static struct list_head chainhash_table[CHAINHASH_SIZE];
-
-/*
- * The hash key of the lock dependency chains is a hash itself too:
- * it's a hash of all locks taken up to that lock, including that lock.
- * It's a 64-bit hash, because it's important for the keys to be
- * unique.
- */
-#define iterate_chain_key(key1, key2) \
- (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
- ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
- (key2))
-
-void lockdep_off(void)
-{
- current->lockdep_recursion++;
-}
-EXPORT_SYMBOL(lockdep_off);
-
-void lockdep_on(void)
-{
- current->lockdep_recursion--;
-}
-EXPORT_SYMBOL(lockdep_on);
-
-/*
- * Debugging switches:
- */
-
-#define VERBOSE 0
-#define VERY_VERBOSE 0
-
-#if VERBOSE
-# define HARDIRQ_VERBOSE 1
-# define SOFTIRQ_VERBOSE 1
-# define RECLAIM_VERBOSE 1
-#else
-# define HARDIRQ_VERBOSE 0
-# define SOFTIRQ_VERBOSE 0
-# define RECLAIM_VERBOSE 0
-#endif
-
-#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
-/*
- * Quick filtering for interesting events:
- */
-static int class_filter(struct lock_class *class)
-{
-#if 0
- /* Example */
- if (class->name_version == 1 &&
- !strcmp(class->name, "lockname"))
- return 1;
- if (class->name_version == 1 &&
- !strcmp(class->name, "&struct->lockfield"))
- return 1;
-#endif
- /* Filter everything else. 1 would be to allow everything else */
- return 0;
-}
-#endif
-
-static int verbose(struct lock_class *class)
-{
-#if VERBOSE
- return class_filter(class);
-#endif
- return 0;
-}
-
-/*
- * Stack-trace: tightly packed array of stack backtrace
- * addresses. Protected by the graph_lock.
- */
-unsigned long nr_stack_trace_entries;
-static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
-
-static void print_lockdep_off(const char *bug_msg)
-{
- printk(KERN_DEBUG "%s\n", bug_msg);
- printk(KERN_DEBUG "turning off the locking correctness validator.\n");
- printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
-}
-
-static int save_trace(struct stack_trace *trace)
-{
- trace->nr_entries = 0;
- trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
- trace->entries = stack_trace + nr_stack_trace_entries;
-
- trace->skip = 3;
-
- save_stack_trace(trace);
-
- /*
- * Some daft arches put -1 at the end to indicate its a full trace.
- *
- * <rant> this is buggy anyway, since it takes a whole extra entry so a
- * complete trace that maxes out the entries provided will be reported
- * as incomplete, friggin useless </rant>
- */
- if (trace->nr_entries != 0 &&
- trace->entries[trace->nr_entries-1] == ULONG_MAX)
- trace->nr_entries--;
-
- trace->max_entries = trace->nr_entries;
-
- nr_stack_trace_entries += trace->nr_entries;
-
- if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
- if (!debug_locks_off_graph_unlock())
- return 0;
-
- print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
- dump_stack();
-
- return 0;
- }
-
- return 1;
-}
-
-unsigned int nr_hardirq_chains;
-unsigned int nr_softirq_chains;
-unsigned int nr_process_chains;
-unsigned int max_lockdep_depth;
-
-#ifdef CONFIG_DEBUG_LOCKDEP
-/*
- * We cannot printk in early bootup code. Not even early_printk()
- * might work. So we mark any initialization errors and printk
- * about it later on, in lockdep_info().
- */
-static int lockdep_init_error;
-static const char *lock_init_error;
-static unsigned long lockdep_init_trace_data[20];
-static struct stack_trace lockdep_init_trace = {
- .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
- .entries = lockdep_init_trace_data,
-};
-
-/*
- * Various lockdep statistics:
- */
-DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
-#endif
-
-/*
- * Locking printouts:
- */
-
-#define __USAGE(__STATE) \
- [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
- [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
- [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
- [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
-
-static const char *usage_str[] =
-{
-#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
- [LOCK_USED] = "INITIAL USE",
-};
-
-const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
-{
- return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
-}
-
-static inline unsigned long lock_flag(enum lock_usage_bit bit)
-{
- return 1UL << bit;
-}
-
-static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
-{
- char c = '.';
-
- if (class->usage_mask & lock_flag(bit + 2))
- c = '+';
- if (class->usage_mask & lock_flag(bit)) {
- c = '-';
- if (class->usage_mask & lock_flag(bit + 2))
- c = '?';
- }
-
- return c;
-}
-
-void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
-{
- int i = 0;
-
-#define LOCKDEP_STATE(__STATE) \
- usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
- usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
-
- usage[i] = '\0';
-}
-
-static void __print_lock_name(struct lock_class *class)
-{
- char str[KSYM_NAME_LEN];
- const char *name;
-
- name = class->name;
- if (!name) {
- name = __get_key_name(class->key, str);
- printk("%s", name);
- } else {
- printk("%s", name);
- if (class->name_version > 1)
- printk("#%d", class->name_version);
- if (class->subclass)
- printk("/%d", class->subclass);
- }
-}
-
-static void print_lock_name(struct lock_class *class)
-{
- char usage[LOCK_USAGE_CHARS];
-
- get_usage_chars(class, usage);
-
- printk(" (");
- __print_lock_name(class);
- printk("){%s}", usage);
-}
-
-static void print_lockdep_cache(struct lockdep_map *lock)
-{
- const char *name;
- char str[KSYM_NAME_LEN];
-
- name = lock->name;
- if (!name)
- name = __get_key_name(lock->key->subkeys, str);
-
- printk("%s", name);
-}
-
-static void print_lock(struct held_lock *hlock)
-{
- print_lock_name(hlock_class(hlock));
- printk(", at: ");
- print_ip_sym(hlock->acquire_ip);
-}
-
-static void lockdep_print_held_locks(struct task_struct *curr)
-{
- int i, depth = curr->lockdep_depth;
-
- if (!depth) {
- printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
- return;
- }
- printk("%d lock%s held by %s/%d:\n",
- depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
-
- for (i = 0; i < depth; i++) {
- printk(" #%d: ", i);
- print_lock(curr->held_locks + i);
- }
-}
-
-static void print_kernel_ident(void)
-{
- printk("%s %.*s %s\n", init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version,
- print_tainted());
-}
-
-static int very_verbose(struct lock_class *class)
-{
-#if VERY_VERBOSE
- return class_filter(class);
-#endif
- return 0;
-}
-
-/*
- * Is this the address of a static object:
- */
-static int static_obj(void *obj)
-{
- unsigned long start = (unsigned long) &_stext,
- end = (unsigned long) &_end,
- addr = (unsigned long) obj;
-
- /*
- * static variable?
- */
- if ((addr >= start) && (addr < end))
- return 1;
-
- if (arch_is_kernel_data(addr))
- return 1;
-
- /*
- * in-kernel percpu var?
- */
- if (is_kernel_percpu_address(addr))
- return 1;
-
- /*
- * module static or percpu var?
- */
- return is_module_address(addr) || is_module_percpu_address(addr);
-}
-
-/*
- * To make lock name printouts unique, we calculate a unique
- * class->name_version generation counter:
- */
-static int count_matching_names(struct lock_class *new_class)
-{
- struct lock_class *class;
- int count = 0;
-
- if (!new_class->name)
- return 0;
-
- list_for_each_entry(class, &all_lock_classes, lock_entry) {
- if (new_class->key - new_class->subclass == class->key)
- return class->name_version;
- if (class->name && !strcmp(class->name, new_class->name))
- count = max(count, class->name_version);
- }
-
- return count + 1;
-}
-
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
-static inline struct lock_class *
-look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
-{
- struct lockdep_subclass_key *key;
- struct list_head *hash_head;
- struct lock_class *class;
-
-#ifdef CONFIG_DEBUG_LOCKDEP
- /*
- * If the architecture calls into lockdep before initializing
- * the hashes then we'll warn about it later. (we cannot printk
- * right now)
- */
- if (unlikely(!lockdep_initialized)) {
- lockdep_init();
- lockdep_init_error = 1;
- lock_init_error = lock->name;
- save_stack_trace(&lockdep_init_trace);
- }
-#endif
-
- if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
- debug_locks_off();
- printk(KERN_ERR
- "BUG: looking up invalid subclass: %u\n", subclass);
- printk(KERN_ERR
- "turning off the locking correctness validator.\n");
- dump_stack();
- return NULL;
- }
-
- /*
- * Static locks do not have their class-keys yet - for them the key
- * is the lock object itself:
- */
- if (unlikely(!lock->key))
- lock->key = (void *)lock;
-
- /*
- * NOTE: the class-key must be unique. For dynamic locks, a static
- * lock_class_key variable is passed in through the mutex_init()
- * (or spin_lock_init()) call - which acts as the key. For static
- * locks we use the lock object itself as the key.
- */
- BUILD_BUG_ON(sizeof(struct lock_class_key) >
- sizeof(struct lockdep_map));
-
- key = lock->key->subkeys + subclass;
-
- hash_head = classhashentry(key);
-
- /*
- * We can walk the hash lockfree, because the hash only
- * grows, and we are careful when adding entries to the end:
- */
- list_for_each_entry(class, hash_head, hash_entry) {
- if (class->key == key) {
- /*
- * Huh! same key, different name? Did someone trample
- * on some memory? We're most confused.
- */
- WARN_ON_ONCE(class->name != lock->name);
- return class;
- }
- }
-
- return NULL;
-}
-
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
-static inline struct lock_class *
-register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
-{
- struct lockdep_subclass_key *key;
- struct list_head *hash_head;
- struct lock_class *class;
- unsigned long flags;
-
- class = look_up_lock_class(lock, subclass);
- if (likely(class))
- goto out_set_class_cache;
-
- /*
- * Debug-check: all keys must be persistent!
- */
- if (!static_obj(lock->key)) {
- debug_locks_off();
- printk("INFO: trying to register non-static key.\n");
- printk("the code is fine but needs lockdep annotation.\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
-
- return NULL;
- }
-
- key = lock->key->subkeys + subclass;
- hash_head = classhashentry(key);
-
- raw_local_irq_save(flags);
- if (!graph_lock()) {
- raw_local_irq_restore(flags);
- return NULL;
- }
- /*
- * We have to do the hash-walk again, to avoid races
- * with another CPU:
- */
- list_for_each_entry(class, hash_head, hash_entry)
- if (class->key == key)
- goto out_unlock_set;
- /*
- * Allocate a new key from the static array, and add it to
- * the hash:
- */
- if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
- if (!debug_locks_off_graph_unlock()) {
- raw_local_irq_restore(flags);
- return NULL;
- }
- raw_local_irq_restore(flags);
-
- print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
- dump_stack();
- return NULL;
- }
- class = lock_classes + nr_lock_classes++;
- debug_atomic_inc(nr_unused_locks);
- class->key = key;
- class->name = lock->name;
- class->subclass = subclass;
- INIT_LIST_HEAD(&class->lock_entry);
- INIT_LIST_HEAD(&class->locks_before);
- INIT_LIST_HEAD(&class->locks_after);
- class->name_version = count_matching_names(class);
- /*
- * We use RCU's safe list-add method to make
- * parallel walking of the hash-list safe:
- */
- list_add_tail_rcu(&class->hash_entry, hash_head);
- /*
- * Add it to the global list of classes:
- */
- list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
-
- if (verbose(class)) {
- graph_unlock();
- raw_local_irq_restore(flags);
-
- printk("\nnew class %p: %s", class->key, class->name);
- if (class->name_version > 1)
- printk("#%d", class->name_version);
- printk("\n");
- dump_stack();
-
- raw_local_irq_save(flags);
- if (!graph_lock()) {
- raw_local_irq_restore(flags);
- return NULL;
- }
- }
-out_unlock_set:
- graph_unlock();
- raw_local_irq_restore(flags);
-
-out_set_class_cache:
- if (!subclass || force)
- lock->class_cache[0] = class;
- else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
- lock->class_cache[subclass] = class;
-
- /*
- * Hash collision, did we smoke some? We found a class with a matching
- * hash but the subclass -- which is hashed in -- didn't match.
- */
- if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
- return NULL;
-
- return class;
-}
-
-#ifdef CONFIG_PROVE_LOCKING
-/*
- * Allocate a lockdep entry. (assumes the graph_lock held, returns
- * with NULL on failure)
- */
-static struct lock_list *alloc_list_entry(void)
-{
- if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
- if (!debug_locks_off_graph_unlock())
- return NULL;
-
- print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
- dump_stack();
- return NULL;
- }
- return list_entries + nr_list_entries++;
-}
-
-/*
- * Add a new dependency to the head of the list:
- */
-static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
- struct list_head *head, unsigned long ip,
- int distance, struct stack_trace *trace)
-{
- struct lock_list *entry;
- /*
- * Lock not present yet - get a new dependency struct and
- * add it to the list:
- */
- entry = alloc_list_entry();
- if (!entry)
- return 0;
-
- entry->class = this;
- entry->distance = distance;
- entry->trace = *trace;
- /*
- * Since we never remove from the dependency list, the list can
- * be walked lockless by other CPUs, it's only allocation
- * that must be protected by the spinlock. But this also means
- * we must make new entries visible only once writes to the
- * entry become visible - hence the RCU op:
- */
- list_add_tail_rcu(&entry->entry, head);
-
- return 1;
-}
-
-/*
- * For good efficiency of modular, we use power of 2
- */
-#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
-#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
-
-/*
- * The circular_queue and helpers is used to implement the
- * breadth-first search(BFS)algorithem, by which we can build
- * the shortest path from the next lock to be acquired to the
- * previous held lock if there is a circular between them.
- */
-struct circular_queue {
- unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
- unsigned int front, rear;
-};
-
-static struct circular_queue lock_cq;
-
-unsigned int max_bfs_queue_depth;
-
-static unsigned int lockdep_dependency_gen_id;
-
-static inline void __cq_init(struct circular_queue *cq)
-{
- cq->front = cq->rear = 0;
- lockdep_dependency_gen_id++;
-}
-
-static inline int __cq_empty(struct circular_queue *cq)
-{
- return (cq->front == cq->rear);
-}
-
-static inline int __cq_full(struct circular_queue *cq)
-{
- return ((cq->rear + 1) & CQ_MASK) == cq->front;
-}
-
-static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
-{
- if (__cq_full(cq))
- return -1;
-
- cq->element[cq->rear] = elem;
- cq->rear = (cq->rear + 1) & CQ_MASK;
- return 0;
-}
-
-static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
-{
- if (__cq_empty(cq))
- return -1;
-
- *elem = cq->element[cq->front];
- cq->front = (cq->front + 1) & CQ_MASK;
- return 0;
-}
-
-static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
-{
- return (cq->rear - cq->front) & CQ_MASK;
-}
-
-static inline void mark_lock_accessed(struct lock_list *lock,
- struct lock_list *parent)
-{
- unsigned long nr;
-
- nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
- lock->parent = parent;
- lock->class->dep_gen_id = lockdep_dependency_gen_id;
-}
-
-static inline unsigned long lock_accessed(struct lock_list *lock)
-{
- unsigned long nr;
-
- nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
- return lock->class->dep_gen_id == lockdep_dependency_gen_id;
-}
-
-static inline struct lock_list *get_lock_parent(struct lock_list *child)
-{
- return child->parent;
-}
-
-static inline int get_lock_depth(struct lock_list *child)
-{
- int depth = 0;
- struct lock_list *parent;
-
- while ((parent = get_lock_parent(child))) {
- child = parent;
- depth++;
- }
- return depth;
-}
-
-static int __bfs(struct lock_list *source_entry,
- void *data,
- int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry,
- int forward)
-{
- struct lock_list *entry;
- struct list_head *head;
- struct circular_queue *cq = &lock_cq;
- int ret = 1;
-
- if (match(source_entry, data)) {
- *target_entry = source_entry;
- ret = 0;
- goto exit;
- }
-
- if (forward)
- head = &source_entry->class->locks_after;
- else
- head = &source_entry->class->locks_before;
-
- if (list_empty(head))
- goto exit;
-
- __cq_init(cq);
- __cq_enqueue(cq, (unsigned long)source_entry);
-
- while (!__cq_empty(cq)) {
- struct lock_list *lock;
-
- __cq_dequeue(cq, (unsigned long *)&lock);
-
- if (!lock->class) {
- ret = -2;
- goto exit;
- }
-
- if (forward)
- head = &lock->class->locks_after;
- else
- head = &lock->class->locks_before;
-
- list_for_each_entry(entry, head, entry) {
- if (!lock_accessed(entry)) {
- unsigned int cq_depth;
- mark_lock_accessed(entry, lock);
- if (match(entry, data)) {
- *target_entry = entry;
- ret = 0;
- goto exit;
- }
-
- if (__cq_enqueue(cq, (unsigned long)entry)) {
- ret = -1;
- goto exit;
- }
- cq_depth = __cq_get_elem_count(cq);
- if (max_bfs_queue_depth < cq_depth)
- max_bfs_queue_depth = cq_depth;
- }
- }
- }
-exit:
- return ret;
-}
-
-static inline int __bfs_forwards(struct lock_list *src_entry,
- void *data,
- int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry)
-{
- return __bfs(src_entry, data, match, target_entry, 1);
-
-}
-
-static inline int __bfs_backwards(struct lock_list *src_entry,
- void *data,
- int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry)
-{
- return __bfs(src_entry, data, match, target_entry, 0);
-
-}
-
-/*
- * Recursive, forwards-direction lock-dependency checking, used for
- * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
- * checking.
- */
-
-/*
- * Print a dependency chain entry (this is only done when a deadlock
- * has been detected):
- */
-static noinline int
-print_circular_bug_entry(struct lock_list *target, int depth)
-{
- if (debug_locks_silent)
- return 0;
- printk("\n-> #%u", depth);
- print_lock_name(target->class);
- printk(":\n");
- print_stack_trace(&target->trace, 6);
-
- return 0;
-}
-
-static void
-print_circular_lock_scenario(struct held_lock *src,
- struct held_lock *tgt,
- struct lock_list *prt)
-{
- struct lock_class *source = hlock_class(src);
- struct lock_class *target = hlock_class(tgt);
- struct lock_class *parent = prt->class;
-
- /*
- * A direct locking problem where unsafe_class lock is taken
- * directly by safe_class lock, then all we need to show
- * is the deadlock scenario, as it is obvious that the
- * unsafe lock is taken under the safe lock.
- *
- * But if there is a chain instead, where the safe lock takes
- * an intermediate lock (middle_class) where this lock is
- * not the same as the safe lock, then the lock chain is
- * used to describe the problem. Otherwise we would need
- * to show a different CPU case for each link in the chain
- * from the safe_class lock to the unsafe_class lock.
- */
- if (parent != source) {
- printk("Chain exists of:\n ");
- __print_lock_name(source);
- printk(" --> ");
- __print_lock_name(parent);
- printk(" --> ");
- __print_lock_name(target);
- printk("\n\n");
- }
-
- printk(" Possible unsafe locking scenario:\n\n");
- printk(" CPU0 CPU1\n");
- printk(" ---- ----\n");
- printk(" lock(");
- __print_lock_name(target);
- printk(");\n");
- printk(" lock(");
- __print_lock_name(parent);
- printk(");\n");
- printk(" lock(");
- __print_lock_name(target);
- printk(");\n");
- printk(" lock(");
- __print_lock_name(source);
- printk(");\n");
- printk("\n *** DEADLOCK ***\n\n");
-}
-
-/*
- * When a circular dependency is detected, print the
- * header first:
- */
-static noinline int
-print_circular_bug_header(struct lock_list *entry, unsigned int depth,
- struct held_lock *check_src,
- struct held_lock *check_tgt)
-{
- struct task_struct *curr = current;
-
- if (debug_locks_silent)
- return 0;
-
- printk("\n");
- printk("======================================================\n");
- printk("[ INFO: possible circular locking dependency detected ]\n");
- print_kernel_ident();
- printk("-------------------------------------------------------\n");
- printk("%s/%d is trying to acquire lock:\n",
- curr->comm, task_pid_nr(curr));
- print_lock(check_src);
- printk("\nbut task is already holding lock:\n");
- print_lock(check_tgt);
- printk("\nwhich lock already depends on the new lock.\n\n");
- printk("\nthe existing dependency chain (in reverse order) is:\n");
-
- print_circular_bug_entry(entry, depth);
-
- return 0;
-}
-
-static inline int class_equal(struct lock_list *entry, void *data)
-{
- return entry->cl