diff options
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/Makefile | 2 | ||||
-rw-r--r-- | kernel/locking/lockdep.c | 19 | ||||
-rw-r--r-- | kernel/locking/lockdep_proc.c | 2 | ||||
-rw-r--r-- | kernel/locking/locktorture.c | 146 | ||||
-rw-r--r-- | kernel/locking/mcs_spinlock.h | 2 | ||||
-rw-r--r-- | kernel/locking/mutex.c | 4 | ||||
-rw-r--r-- | kernel/locking/osq_lock.c | 4 | ||||
-rw-r--r-- | kernel/locking/rtmutex-debug.c | 182 | ||||
-rw-r--r-- | kernel/locking/rtmutex-debug.h | 37 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 432 | ||||
-rw-r--r-- | kernel/locking/rtmutex.h | 35 | ||||
-rw-r--r-- | kernel/locking/rtmutex_common.h | 105 | ||||
-rw-r--r-- | kernel/locking/rwsem.c | 4 | ||||
-rw-r--r-- | kernel/locking/spinlock.c | 4 |
14 files changed, 326 insertions, 652 deletions
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 8838f1d7c4a2..3572808223e4 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -12,7 +12,6 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) endif obj-$(CONFIG_DEBUG_IRQFLAGS) += irqflag-debug.o @@ -26,7 +25,6 @@ obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o obj-$(CONFIG_PROVE_LOCKING) += spinlock.o obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o obj-$(CONFIG_RT_MUTEXES) += rtmutex.o -obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index ef28a0b9cf1e..48d736aa03b2 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -54,6 +54,7 @@ #include <linux/nmi.h> #include <linux/rcupdate.h> #include <linux/kprobes.h> +#include <linux/lockdep.h> #include <asm/sections.h> @@ -1747,7 +1748,7 @@ static enum bfs_result __bfs(struct lock_list *source_entry, /* * Step 4: if not match, expand the path by adding the - * forward or backwards dependencis in the search + * forward or backwards dependencies in the search * */ first = true; @@ -1916,7 +1917,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth, * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the * dependency graph, as any strong path ..-> A -> B ->.. we can get with * having dependency A -> B, we could already get a equivalent path ..-> A -> - * .. -> B -> .. with A -> .. -> B. Therefore A -> B is reduntant. + * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant. * * We need to make sure both the start and the end of A -> .. -> B is not * weaker than A -> B. For the start part, please see the comment in @@ -5253,13 +5254,13 @@ int __lock_is_held(const struct lockdep_map *lock, int read) if (match_held_lock(hlock, lock)) { if (read == -1 || hlock->read == read) - return 1; + return LOCK_STATE_HELD; - return 0; + return LOCK_STATE_NOT_HELD; } } - return 0; + return LOCK_STATE_NOT_HELD; } static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) @@ -5538,10 +5539,14 @@ EXPORT_SYMBOL_GPL(lock_release); noinstr int lock_is_held_type(const struct lockdep_map *lock, int read) { unsigned long flags; - int ret = 0; + int ret = LOCK_STATE_NOT_HELD; + /* + * Avoid false negative lockdep_assert_held() and + * lockdep_assert_not_held(). + */ if (unlikely(!lockdep_enabled())) - return 1; /* avoid false negative lockdep_assert_held() */ + return LOCK_STATE_UNKNOWN; raw_local_irq_save(flags); check_flags(flags); diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 02ef87f50df2..806978314496 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -348,7 +348,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) debug_locks); /* - * Zappped classes and lockdep data buffers reuse statistics. + * Zapped classes and lockdep data buffers reuse statistics. */ seq_puts(m, "\n"); seq_printf(m, " zapped classes: %11lu\n", diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 0ab94e1f1276..b3adb40549bf 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -76,13 +76,13 @@ static void lock_torture_cleanup(void); struct lock_torture_ops { void (*init)(void); void (*exit)(void); - int (*writelock)(void); + int (*writelock)(int tid); void (*write_delay)(struct torture_random_state *trsp); void (*task_boost)(struct torture_random_state *trsp); - void (*writeunlock)(void); - int (*readlock)(void); + void (*writeunlock)(int tid); + int (*readlock)(int tid); void (*read_delay)(struct torture_random_state *trsp); - void (*readunlock)(void); + void (*readunlock)(int tid); unsigned long flags; /* for irq spinlocks */ const char *name; @@ -105,7 +105,7 @@ static struct lock_torture_cxt cxt = { 0, 0, false, false, * Definitions for lock torture testing. */ -static int torture_lock_busted_write_lock(void) +static int torture_lock_busted_write_lock(int tid __maybe_unused) { return 0; /* BUGGY, do not use in real life!!! */ } @@ -122,7 +122,7 @@ static void torture_lock_busted_write_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_lock_busted_write_unlock(void) +static void torture_lock_busted_write_unlock(int tid __maybe_unused) { /* BUGGY, do not use in real life!!! */ } @@ -145,7 +145,8 @@ static struct lock_torture_ops lock_busted_ops = { static DEFINE_SPINLOCK(torture_spinlock); -static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) +static int torture_spin_lock_write_lock(int tid __maybe_unused) +__acquires(torture_spinlock) { spin_lock(&torture_spinlock); return 0; @@ -169,7 +170,8 @@ static void torture_spin_lock_write_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) +static void torture_spin_lock_write_unlock(int tid __maybe_unused) +__releases(torture_spinlock) { spin_unlock(&torture_spinlock); } @@ -185,7 +187,7 @@ static struct lock_torture_ops spin_lock_ops = { .name = "spin_lock" }; -static int torture_spin_lock_write_lock_irq(void) +static int torture_spin_lock_write_lock_irq(int tid __maybe_unused) __acquires(torture_spinlock) { unsigned long flags; @@ -195,7 +197,7 @@ __acquires(torture_spinlock) return 0; } -static void torture_lock_spin_write_unlock_irq(void) +static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused) __releases(torture_spinlock) { spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); @@ -214,7 +216,8 @@ static struct lock_torture_ops spin_lock_irq_ops = { static DEFINE_RWLOCK(torture_rwlock); -static int torture_rwlock_write_lock(void) __acquires(torture_rwlock) +static int torture_rwlock_write_lock(int tid __maybe_unused) +__acquires(torture_rwlock) { write_lock(&torture_rwlock); return 0; @@ -235,12 +238,14 @@ static void torture_rwlock_write_delay(struct torture_random_state *trsp) udelay(shortdelay_us); } -static void torture_rwlock_write_unlock(void) __releases(torture_rwlock) +static void torture_rwlock_write_unlock(int tid __maybe_unused) +__releases(torture_rwlock) { write_unlock(&torture_rwlock); } -static int torture_rwlock_read_lock(void) __acquires(torture_rwlock) +static int torture_rwlock_read_lock(int tid __maybe_unused) +__acquires(torture_rwlock) { read_lock(&torture_rwlock); return 0; @@ -261,7 +266,8 @@ static void torture_rwlock_read_delay(struct torture_random_state *trsp) udelay(shortdelay_us); } -static void torture_rwlock_read_unlock(void) __releases(torture_rwlock) +static void torture_rwlock_read_unlock(int tid __maybe_unused) +__releases(torture_rwlock) { read_unlock(&torture_rwlock); } @@ -277,7 +283,8 @@ static struct lock_torture_ops rw_lock_ops = { .name = "rw_lock" }; -static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) +static int torture_rwlock_write_lock_irq(int tid __maybe_unused) +__acquires(torture_rwlock) { unsigned long flags; @@ -286,13 +293,14 @@ static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) return 0; } -static void torture_rwlock_write_unlock_irq(void) +static void torture_rwlock_write_unlock_irq(int tid __maybe_unused) __releases(torture_rwlock) { write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); } -static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) +static int torture_rwlock_read_lock_irq(int tid __maybe_unused) +__acquires(torture_rwlock) { unsigned long flags; @@ -301,7 +309,7 @@ static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) return 0; } -static void torture_rwlock_read_unlock_irq(void) +static void torture_rwlock_read_unlock_irq(int tid __maybe_unused) __releases(torture_rwlock) { read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); @@ -320,7 +328,8 @@ static struct lock_torture_ops rw_lock_irq_ops = { static DEFINE_MUTEX(torture_mutex); -static int torture_mutex_lock(void) __acquires(torture_mutex) +static int torture_mutex_lock(int tid __maybe_unused) +__acquires(torture_mutex) { mutex_lock(&torture_mutex); return 0; @@ -340,7 +349,8 @@ static void torture_mutex_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_mutex_unlock(void) __releases(torture_mutex) +static void torture_mutex_unlock(int tid __maybe_unused) +__releases(torture_mutex) { mutex_unlock(&torture_mutex); } @@ -357,12 +367,34 @@ static struct lock_torture_ops mutex_lock_ops = { }; #include <linux/ww_mutex.h> +/* + * The torture ww_mutexes should belong to the same lock class as + * torture_ww_class to avoid lockdep problem. The ww_mutex_init() + * function is called for initialization to ensure that. + */ static DEFINE_WD_CLASS(torture_ww_class); -static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class); -static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class); -static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class); +static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2; +static struct ww_acquire_ctx *ww_acquire_ctxs; + +static void torture_ww_mutex_init(void) +{ + ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class); + ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class); + ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class); + + ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress, + sizeof(*ww_acquire_ctxs), + GFP_KERNEL); + if (!ww_acquire_ctxs) + VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory"); +} + +static void torture_ww_mutex_exit(void) +{ + kfree(ww_acquire_ctxs); +} -static int torture_ww_mutex_lock(void) +static int torture_ww_mutex_lock(int tid) __acquires(torture_ww_mutex_0) __acquires(torture_ww_mutex_1) __acquires(torture_ww_mutex_2) @@ -372,7 +404,7 @@ __acquires(torture_ww_mutex_2) struct list_head link; struct ww_mutex *lock; } locks[3], *ll, *ln; - struct ww_acquire_ctx ctx; + struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; locks[0].lock = &torture_ww_mutex_0; list_add(&locks[0].link, &list); @@ -383,12 +415,12 @@ __acquires(torture_ww_mutex_2) locks[2].lock = &torture_ww_mutex_2; list_add(&locks[2].link, &list); - ww_acquire_init(&ctx, &torture_ww_class); + ww_acquire_init(ctx, &torture_ww_class); list_for_each_entry(ll, &list, link) { int err; - err = ww_mutex_lock(ll->lock, &ctx); + err = ww_mutex_lock(ll->lock, ctx); if (!err) continue; @@ -399,25 +431,29 @@ __acquires(torture_ww_mutex_2) if (err != -EDEADLK) return err; - ww_mutex_lock_slow(ll->lock, &ctx); + ww_mutex_lock_slow(ll->lock, ctx); list_move(&ll->link, &list); } - ww_acquire_fini(&ctx); return 0; } -static void torture_ww_mutex_unlock(void) +static void torture_ww_mutex_unlock(int tid) __releases(torture_ww_mutex_0) __releases(torture_ww_mutex_1) __releases(torture_ww_mutex_2) { + struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; + ww_mutex_unlock(&torture_ww_mutex_0); ww_mutex_unlock(&torture_ww_mutex_1); ww_mutex_unlock(&torture_ww_mutex_2); + ww_acquire_fini(ctx); } static struct lock_torture_ops ww_mutex_lock_ops = { + .init = torture_ww_mutex_init, + .exit = torture_ww_mutex_exit, .writelock = torture_ww_mutex_lock, .write_delay = torture_mutex_delay, .task_boost = torture_boost_dummy, @@ -431,7 +467,8 @@ static struct lock_torture_ops ww_mutex_lock_ops = { #ifdef CONFIG_RT_MUTEXES static DEFINE_RT_MUTEX(torture_rtmutex); -static int torture_rtmutex_lock(void) __acquires(torture_rtmutex) +static int torture_rtmutex_lock(int tid __maybe_unused) +__acquires(torture_rtmutex) { rt_mutex_lock(&torture_rtmutex); return 0; @@ -487,7 +524,8 @@ static void torture_rtmutex_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_rtmutex_unlock(void) __releases(torture_rtmutex) +static void torture_rtmutex_unlock(int tid __maybe_unused) +__releases(torture_rtmutex) { rt_mutex_unlock(&torture_rtmutex); } @@ -505,7 +543,8 @@ static struct lock_torture_ops rtmutex_lock_ops = { #endif static DECLARE_RWSEM(torture_rwsem); -static int torture_rwsem_down_write(void) __acquires(torture_rwsem) +static int torture_rwsem_down_write(int tid __maybe_unused) +__acquires(torture_rwsem) { down_write(&torture_rwsem); return 0; @@ -525,12 +564,14 @@ static void torture_rwsem_write_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_rwsem_up_write(void) __releases(torture_rwsem) +static void torture_rwsem_up_write(int tid __maybe_unused) +__releases(torture_rwsem) { up_write(&torture_rwsem); } -static int torture_rwsem_down_read(void) __acquires(torture_rwsem) +static int torture_rwsem_down_read(int tid __maybe_unused) +__acquires(torture_rwsem) { down_read(&torture_rwsem); return 0; @@ -550,7 +591,8 @@ static void torture_rwsem_read_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_rwsem_up_read(void) __releases(torture_rwsem) +static void torture_rwsem_up_read(int tid __maybe_unused) +__releases(torture_rwsem) { up_read(&torture_rwsem); } @@ -579,24 +621,28 @@ static void torture_percpu_rwsem_exit(void) percpu_free_rwsem(&pcpu_rwsem); } -static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem) +static int torture_percpu_rwsem_down_write(int tid __maybe_unused) +__acquires(pcpu_rwsem) { percpu_down_write(&pcpu_rwsem); return 0; } -static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem) +static void torture_percpu_rwsem_up_write(int tid __maybe_unused) +__releases(pcpu_rwsem) { percpu_up_write(&pcpu_rwsem); } -static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem) +static int torture_percpu_rwsem_down_read(int tid __maybe_unused) +__acquires(pcpu_rwsem) { percpu_down_read(&pcpu_rwsem); return 0; } -static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem) +static void torture_percpu_rwsem_up_read(int tid __maybe_unused) +__releases(pcpu_rwsem) { percpu_up_read(&pcpu_rwsem); } @@ -621,6 +667,7 @@ static struct lock_torture_ops percpu_rwsem_lock_ops = { static int lock_torture_writer(void *arg) { struct lock_stress_stats *lwsp = arg; + int tid = lwsp - cxt.lwsa; DEFINE_TORTURE_RANDOM(rand); VERBOSE_TOROUT_STRING("lock_torture_writer task started"); @@ -631,7 +678,7 @@ static int lock_torture_writer(void *arg) schedule_timeout_uninterruptible(1); cxt.cur_ops->task_boost(&rand); - cxt.cur_ops->writelock(); + cxt.cur_ops->writelock(tid); if (WARN_ON_ONCE(lock_is_write_held)) lwsp->n_lock_fail++; lock_is_write_held = true; @@ -642,7 +689,7 @@ static int lock_torture_writer(void *arg) cxt.cur_ops->write_delay(&rand); lock_is_write_held = false; WRITE_ONCE(last_lock_release, jiffies); - cxt.cur_ops->writeunlock(); + cxt.cur_ops->writeunlock(tid); stutter_wait("lock_torture_writer"); } while (!torture_must_stop()); @@ -659,6 +706,7 @@ static int lock_torture_writer(void *arg) static int lock_torture_reader(void *arg) { struct lock_stress_stats *lrsp = arg; + int tid = lrsp - cxt.lrsa; DEFINE_TORTURE_RANDOM(rand); VERBOSE_TOROUT_STRING("lock_torture_reader task started"); @@ -668,7 +716,7 @@ static int lock_torture_reader(void *arg) if ((torture_random(&rand) & 0xfffff) == 0) schedule_timeout_uninterruptible(1); - cxt.cur_ops->readlock(); + cxt.cur_ops->readlock(tid); lock_is_read_held = true; if (WARN_ON_ONCE(lock_is_write_held)) lrsp->n_lock_fail++; /* rare, but... */ @@ -676,7 +724,7 @@ static int lock_torture_reader(void *arg) lrsp->n_lock_acquired++; cxt.cur_ops->read_delay(&rand); lock_is_read_held = false; - cxt.cur_ops->readunlock(); + cxt.cur_ops->readunlock(tid); stutter_wait("lock_torture_reader"); } while (!torture_must_stop()); @@ -891,16 +939,16 @@ static int __init lock_torture_init(void) goto unwind; } - if (cxt.cur_ops->init) { - cxt.cur_ops->init(); - cxt.init_called = true; - } - if (nwriters_stress >= 0) cxt.nrealwriters_stress = nwriters_stress; else cxt.nrealwriters_stress = 2 * num_online_cpus(); + if (cxt.cur_ops->init) { + cxt.cur_ops->init(); + cxt.init_called = true; + } + #ifdef CONFIG_DEBUG_MUTEXES if (str_has_prefix(torture_type, "mutex")) cxt.debug_lock = true; diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 5e10153b4d3c..85251d8771d9 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -7,7 +7,7 @@ * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock * with the desirable properties of being fair, and with each cpu trying * to acquire the lock spinning on a local variable. - * It avoids expensive cache bouncings that common test-and-set spin-lock + * It avoids expensive cache bounces that common test-and-set spin-lock * implementations incur. */ #ifndef __LINUX_MCS_SPINLOCK_H diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 622ebdfcd083..cb6b112ce155 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -92,7 +92,7 @@ static inline unsigned long __owner_flags(unsigned long owner) } /* - * Trylock variant that retuns the owning task on failure. + * Trylock variant that returns the owning task on failure. */ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) { @@ -207,7 +207,7 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, /* * Give up ownership to a specific task, when @task = NULL, this is equivalent - * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves + * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves * WAITERS. Provides RELEASE semantics like a regular unlock, the * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. */ diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 1de006ed3aa8..d5610ad52b92 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -135,7 +135,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) */ /* - * Wait to acquire the lock or cancelation. Note that need_resched() + * Wait to acquire the lock or cancellation. Note that need_resched() * will come with an IPI, which will wake smp_cond_load_relaxed() if it * is implemented with a monitor-wait. vcpu_is_preempted() relies on * polling, be careful. @@ -164,7 +164,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) /* * We can only fail the cmpxchg() racing against an unlock(), - * in which case we should observe @node->locked becomming + * in which case we should observe @node->locked becoming * true. */ if (smp_load_acquire(&node->locked)) diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c deleted file mode 100644 index 36e69100e8e0..000000000000 --- a/kernel/locking/rtmutex-debug.c +++ /dev/null @@ -1,182 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * RT-Mutexes: blocking mutual exclusion locks with PI support - * - * started by Ingo Molnar and Thomas Gleixner: - * - * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> - * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> - * - * This code is based on the rt.c implementation in the preempt-rt tree. - * Portions of said code are - * - * Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey - * Copyright (C) 2006 Esben Nielsen - * Copyright (C) 2006 Kihon Technologies Inc., - * Steven Rostedt <rostedt@goodmis.org> - * - * See rt.c in preempt-rt for proper credits and further information - */ -#include <linux/sched.h> -#include <linux/sched/rt.h> -#include <linux/sched/debug.h> -#include <linux/delay.h> -#include <linux/export.h> -#include <linux/spinlock.h> -#include <linux/kallsyms.h> -#include <linux/syscalls.h> -#include <linux/interrupt.h> -#include <linux/rbtree.h> -#include <linux/fs.h> -#include <linux/debug_locks.h> - -#include "rtmutex_common.h" - -static void printk_task(struct task_struct *p) -{ - if (p) - printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio); - else - printk("<none>"); -} - -static void printk_lock(struct rt_mutex *lock, int print_owner) -{ - if (lock->name) - printk(" [%p] {%s}\n", - lock, lock->name); - else - printk(" [%p] {%s:%d}\n", - lock, lock->file, lock->line); - - if (print_owner && rt_mutex_owner(lock)) { - printk(".. ->owner: %p\n", lock->owner); - printk(".. held by: "); - printk_task(rt_mutex_owner(lock)); - printk("\n"); - } -} - -void rt_mutex_debug_task_free(struct task_struct *task) -{ - DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root)); - DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); -} - -/* - * We fill out the fields in the waiter to store the information about - * the deadlock. We print when we return. act_waiter can be NULL in - * case of a remove waiter operation. - */ -void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, - struct rt_mutex_waiter *act_waiter, - struct rt_mutex *lock) -{ - struct task_struct *task; - - if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter) - return; - - task = rt_mutex_owner(act_waiter->lock); - if (task && task != current) { - act_waiter->deadlock_task_pid = get_pid(task_pid(task)); - act_waiter->deadlock_lock = lock; - } -} - -void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) -{ - struct task_struct *task; - - if (!waiter->deadlock_lock || !debug_locks) - return; - - rcu_read_lock(); - task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID); - if (!task) { - rcu_read_unlock(); - return; - } - - if (!debug_locks_off()) { - rcu_read_unlock(); - return; - } - - pr_warn("\n"); - pr_warn("============================================\n"); - pr_warn("WARNING: circular locking deadlock detected!\n"); - pr_warn("%s\n", print_tainted()); - pr_warn("--------------------------------------------\n"); - printk("%s/%d is deadlocking current task %s/%d\n\n", - task->comm, task_pid_nr(task), - current->comm, task_pid_nr(current)); - - printk("\n1) %s/%d is trying to acquire this lock:\n", - current->comm, task_pid_nr(current)); - printk_lock(waiter->lock, 1); - - printk("\n2) %s/%d is blocked on this lock:\n", - task->comm, task_pid_nr(task)); - printk_lock(waiter->deadlock_lock, 1); - - debug_show_held_locks(current); - debug_show_held_locks(task); - - printk("\n%s/%d's [blocked] stackdump:\n\n", - task->comm, task_pid_nr(task)); - show_stack(task, NULL, KERN_DEFAULT); - printk("\n%s/%d's [current] stackdump:\n\n", - current->comm, task_pid_nr(current)); - dump_stack(); - debug_show_all_locks(); - rcu_read_unlock(); - - printk("[ turning off deadlock detection." - "Please report this trace. ]\n\n"); -} - -void debug_rt_mutex_lock(struct rt_mutex *lock) -{ -} - -void debug_rt_mutex_unlock(struct rt_mutex *lock) -{ - DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); -} - -void -debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner) -{ -} - -void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) -{ - DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); -} - -void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) -{ - memset(waiter, 0x11, sizeof(*waiter)); - waiter->deadlock_task_pid = NULL; -} - -void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) -{ - put_pid(waiter->deadlock_task_pid); - memset(waiter, 0x22, sizeof(*waiter)); -} - -void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key) -{ - /* - * Make sure we are not reinitializing a held lock: - */ - debug_check_no_locks_freed((void *)lock, sizeof(*lock)); - lock->name = name; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC - lockdep_init_map(&lock->dep_map, name, key, 0); -#endif -} - diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h deleted file mode 100644 index fc549713bba3..000000000000 --- a/kernel/locking/rtmutex-debug.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * RT-Mutexes: blocking mutual exclusion locks with PI support - * - * started by Ingo Molnar and Thomas Gleixner: - * - * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> - * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> - * - * This file contains macros used solely by rtmutex.c. Debug version. - */ - -extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); -extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); -extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); -extern void debug_rt_mutex_lock(struct rt_mutex *lock); -extern void debug_rt_mutex_unlock(struct rt_mutex *lock); -extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, - struct task_struct *powner); -extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); -extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, - struct rt_mutex_waiter *waiter, - struct rt_mutex *lock); -extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); -# define debug_rt_mutex_reset_waiter(w) \ - do { (w)->deadlock_lock = NULL; } while (0) - -static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, - enum rtmutex_chainwalk walk) -{ - return (waiter != NULL); -} - -static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) -{ - debug_rt_mutex_print_deadlock(w); -} diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 48fff6437901..406818196a9f 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -49,7 +49,7 @@ * set this bit before looking at the lock. */ -static void +static __always_inline void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) { unsigned long val = (unsigned long)owner; @@ -60,13 +60,13 @@ rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) WRITE_ONCE(lock->owner, (struct task_struct *)val); } -static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void clear_rt_mutex_waiters(struct rt_mutex *lock) { lock->owner = (struct task_struct *) ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); } -static void fixup_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock) { unsigned long owner, *p = (unsigned long *) &lock->owner; @@ -149,7 +149,7 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) * all future threads that attempt to [Rmw] the lock to the slowpath. As such * relaxed semantics suffice. */ -static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) { unsigned long owner, *p = (unsigned long *) &lock->owner; @@ -165,8 +165,8 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) * 2) Drop lock->wait_lock * 3) Try to unlock the lock with cmpxchg */ -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, - unsigned long flags) +static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, + unsigned long flags) __releases(lock->wait_lock) { struct task_struct *owner = rt_mutex_owner(lock); @@ -204,7 +204,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, # define rt_mutex_cmpxchg_acquire(l,c,n) (0) # define rt_mutex_cmpxchg_release(l,c,n) (0) -static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) { lock->owner = (struct task_struct *) ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); @@ -213,8 +213,8 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) /* * Simple slow path only version: lock->owner is protected by lock->wait_lock. */ -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, - unsigned long flags) +static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, + unsigned long flags) __releases(lock->wait_lock) { lock->owner = NULL; @@ -229,9 +229,8 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, #define task_to_waiter(p) \ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } -static inline int -rt_mutex_waiter_less(struct rt_mutex_waiter *left, - struct rt_mutex_waiter *right) +static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) { if (left->prio < right->prio) return 1; @@ -248,9 +247,8 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left, return 0; } -static inline int -rt_mutex_waiter_equal(struct rt_mutex_waiter *left, - struct rt_mutex_waiter *right) +static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) { if (left->prio != right->prio) return 0; @@ -270,18 +268,18 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left, #define __node_2_waiter(node) \ rb_entry((node), struct rt_mutex_waiter, tree_entry) -static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) +static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) { return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b)); } -static void +static __always_inline void rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); } -static void +static __always_inline void rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { if (RB_EMPTY_NODE(&waiter->tree_entry)) @@ -294,18 +292,19 @@ rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) #define __node_2_pi_waiter(node) \ rb_entry((node), struct rt_mutex_waiter, pi_tree_entry) -static inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b) +static __always_inline bool +__pi_waiter_less(struct rb_node *a, const struct rb_node *b) { return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b)); } -static void +static __always_inline void rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) { rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less); |