diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-28 12:37:53 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-28 12:37:53 -0700 |
| commit | 0ff0edb550e256597e505eff308f90d9a0b6677c (patch) | |
| tree | 6d96f53e70f2bdec49627e30c2645ee97b987848 | |
| parent | 9a45da9270b64b14e154093c28f746d861ab8c61 (diff) | |
| parent | f4abe9967c6fdb511ee567e129a014b60945ab93 (diff) | |
| download | linux-0ff0edb550e256597e505eff308f90d9a0b6677c.tar.gz linux-0ff0edb550e256597e505eff308f90d9a0b6677c.tar.bz2 linux-0ff0edb550e256597e505eff308f90d9a0b6677c.zip | |
Merge tag 'locking-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- rtmutex cleanup & spring cleaning pass that removes ~400 lines of
code
- Futex simplifications & cleanups
- Add debugging to the CSD code, to help track down a tenacious race
(or hw problem)
- Add lockdep_assert_not_held(), to allow code to require a lock to not
be held, and propagate this into the ath10k driver
- Misc LKMM documentation updates
- Misc KCSAN updates: cleanups & documentation updates
- Misc fixes and cleanups
- Fix locktorture bugs with ww_mutexes
* tag 'locking-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (44 commits)
kcsan: Fix printk format string
static_call: Relax static_call_update() function argument type
static_call: Fix unused variable warn w/o MODULE
locking/rtmutex: Clean up signal handling in __rt_mutex_slowlock()
locking/rtmutex: Restrict the trylock WARN_ON() to debug
locking/rtmutex: Fix misleading comment in rt_mutex_postunlock()
locking/rtmutex: Consolidate the fast/slowpath invocation
locking/rtmutex: Make text section and inlining consistent
locking/rtmutex: Move debug functions as inlines into common header
locking/rtmutex: Decrapify __rt_mutex_init()
locking/rtmutex: Remove pointless CONFIG_RT_MUTEXES=n stubs
locking/rtmutex: Inline chainwalk depth check
locking/rtmutex: Move rt_mutex_debug_task_free() to rtmutex.c
locking/rtmutex: Remove empty and unused debug stubs
locking/rtmutex: Consolidate rt_mutex_init()
locking/rtmutex: Remove output from deadlock detector
locking/rtmutex: Remove rtmutex deadlock tester leftovers
locking/rtmutex: Remove rt_mutex_timed_lock()
MAINTAINERS: Add myself as futex reviewer
locking/mutex: Remove repeated declaration
...
44 files changed, 1246 insertions, 827 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index fa08ec0dfbe7..54582ca6c4f9 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -782,6 +782,16 @@ cs89x0_media= [HW,NET] Format: { rj45 | aui | bnc } + csdlock_debug= [KNL] Enable debug add-ons of cross-CPU function call + handling. When switched on, additional debug data is + printed to the console in case a hanging CPU is + detected, and that CPU is pinged again in order to try + to resolve the hang situation. + 0: disable csdlock debugging (default) + 1: enable basic csdlock debugging (minor impact) + ext: enable extended csdlock debugging (more impact, + but more data) + dasd= [HW,NET] See header of drivers/s390/block/dasd_devmap.c. diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index be7a0b0e1f28..d85ce238ace7 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -1,3 +1,6 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. Copyright (C) 2019, Google LLC. + The Kernel Concurrency Sanitizer (KCSAN) ======================================== diff --git a/MAINTAINERS b/MAINTAINERS index e32c844a154a..85a39e13c6c6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7452,6 +7452,7 @@ M: Thomas Gleixner <tglx@linutronix.de> M: Ingo Molnar <mingo@redhat.com> R: Peter Zijlstra <peterz@infradead.org> R: Darren Hart <dvhart@infradead.org> +R: Davidlohr Bueso <dave@stgolabs.net> L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 8f009e788ad4..f610a773f2be 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -22,7 +22,7 @@ * assembler to insert a extra (16-bit) IT instruction, depending on the * presence or absence of neighbouring conditional instructions. * - * To avoid this unpredictableness, an approprite IT is inserted explicitly: + * To avoid this unpredictability, an appropriate IT is inserted explicitly: * the assembler won't change IT instructions which are explicitly present * in the input. */ diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 5ce342b91ff3..610a05374c02 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -14,7 +14,7 @@ #include <linux/stringify.h> #include <linux/types.h> -static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) { asm_volatile_goto("1:" ".byte " __stringify(BYTES_NOP5) "\n\t" @@ -30,7 +30,7 @@ l_yes: return true; } -static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) { asm_volatile_goto("1:" ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t" diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index bb6c5ee43ac0..5ce4f8d038b9 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4727,6 +4727,8 @@ out: /* Must not be called with conf_mutex held as workers can use that also. */ void ath10k_drain_tx(struct ath10k *ar) { + lockdep_assert_not_held(&ar->conf_mutex); + /* make sure rcu-protected mac80211 tx path itself is drained */ synchronize_net(); diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index cf14840609ce..9fd0ad80fef6 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -1,4 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * KCSAN access checks and modifiers. These can be used to explicitly check + * uninstrumented accesses, or change KCSAN checking behaviour of accesses. + * + * Copyright (C) 2019, Google LLC. + */ #ifndef _LINUX_KCSAN_CHECKS_H #define _LINUX_KCSAN_CHECKS_H diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h index 53340d8789f9..fc266ecb2a4d 100644 --- a/include/linux/kcsan.h +++ b/include/linux/kcsan.h @@ -1,4 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and + * data structures to set up runtime. See kcsan-checks.h for explicit checks and + * modifiers. For more info please see Documentation/dev-tools/kcsan.rst. + * + * Copyright (C) 2019, Google LLC. + */ #ifndef _LINUX_KCSAN_H #define _LINUX_KCSAN_H diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 1c746139d5e3..5cf387813754 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -155,7 +155,7 @@ extern void lockdep_set_selftest_task(struct task_struct *task); extern void lockdep_init_task(struct task_struct *task); /* - * Split the recrursion counter in two to readily detect 'off' vs recursion. + * Split the recursion counter in two to readily detect 'off' vs recursion. */ #define LOCKDEP_RECURSION_BITS 16 #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) @@ -268,6 +268,11 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, extern void lock_release(struct lockdep_map *lock, unsigned long ip); +/* lock_is_held_type() returns */ +#define LOCK_STATE_UNKNOWN -1 +#define LOCK_STATE_NOT_HELD 0 +#define LOCK_STATE_HELD 1 + /* * Same "read" as for lock_acquire(), except -1 means any. */ @@ -301,8 +306,14 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) -#define lockdep_assert_held(l) do { \ - WARN_ON(debug_locks && !lockdep_is_held(l)); \ +#define lockdep_assert_held(l) do { \ + WARN_ON(debug_locks && \ + lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \ + } while (0) + +#define lockdep_assert_not_held(l) do { \ + WARN_ON(debug_locks && \ + lockdep_is_held(l) == LOCK_STATE_HELD); \ } while (0) #define lockdep_assert_held_write(l) do { \ @@ -397,7 +408,8 @@ extern int lockdep_is_held(const void *); #define lockdep_is_held_type(l, r) (1) #define lockdep_assert_held(l) do { (void)(l); } while (0) -#define lockdep_assert_held_write(l) do { (void)(l); } while (0) +#define lockdep_assert_not_held(l) do { (void)(l); } while (0) +#define lockdep_assert_held_write(l) do { (void)(l); } while (0) #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) #define lockdep_assert_none_held_once() do { } while (0) diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 515cff77a4f4..e19323521f9c 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -20,6 +20,7 @@ #include <linux/osq_lock.h> #include <linux/debug_locks.h> +struct ww_class; struct ww_acquire_ctx; /* @@ -65,9 +66,6 @@ struct mutex { #endif }; -struct ww_class; -struct ww_acquire_ctx; - struct ww_mutex { struct mutex base; struct ww_acquire_ctx *ctx; diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 6fd615a0eea9..d1672de9ca89 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -31,12 +31,6 @@ struct rt_mutex { raw_spinlock_t wait_lock; struct rb_root_cached waiters; struct task_struct *owner; -#ifdef CONFIG_DEBUG_RT_MUTEXES - int save_state; - const char *name, *file; - int line; - void *magic; -#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -46,35 +40,17 @@ struct rt_mutex_waiter; struct hrtimer_sleeper; #ifdef CONFIG_DEBUG_RT_MUTEXES - extern int rt_mutex_debug_check_no_locks_freed(const void *from, - unsigned long len); - extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); +extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else - static inline int rt_mutex_debug_check_no_locks_freed(const void *from, - unsigned long len) - { - return 0; - } -# define rt_mutex_debug_check_no_locks_held(task) do { } while (0) +static inline void rt_mutex_debug_task_free(struct task_struct *tsk) { } #endif -#ifdef CONFIG_DEBUG_RT_MUTEXES -# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ - , .name = #mutexname, .file = __FILE__, .line = __LINE__ - -# define rt_mutex_init(mutex) \ +#define rt_mutex_init(mutex) \ do { \ static struct lock_class_key __key; \ __rt_mutex_init(mutex, __func__, &__key); \ } while (0) - extern void rt_mutex_debug_task_free(struct task_struct *tsk); -#else -# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL) -# define rt_mutex_debug_task_free(t) do { } while (0) -#endif - #ifdef CONFIG_DEBUG_LOCK_ALLOC #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ , .dep_map = { .name = #mutexname } @@ -86,7 +62,6 @@ do { \ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ , .waiters = RB_ROOT_CACHED \ , .owner = NULL \ - __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} #define DEFINE_RT_MUTEX(mutexname) \ @@ -104,7 +79,6 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock) } extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); -extern void rt_mutex_destroy(struct rt_mutex *lock); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); @@ -115,9 +89,6 @@ extern void rt_mutex_lock(struct rt_mutex *lock); #endif extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); -extern int rt_mutex_timed_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *timeout); - extern int rt_mutex_trylock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 4c715be48717..a66038d88878 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -110,7 +110,7 @@ do { \ /* * This is the same regardless of which rwsem implementation that is being used. - * It is just a heuristic meant to be called by somebody alreadying holding the + * It is just a heuristic meant to be called by somebody already holding the * rwsem to see if somebody from an incompatible type is wanting access to the * lock. */ diff --git a/include/linux/static_call.h b/include/linux/static_call.h index e01b61ab86b1..fc94faa53b5b 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -118,9 +118,9 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool #define static_call_update(name, func) \ ({ \ - BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \ + typeof(&STATIC_CALL_TRAMP(name)) __F = (func); \ __static_call_update(&STATIC_CALL_KEY(name), \ - STATIC_CALL_TRAMP_ADDR(name), func); \ + STATIC_CALL_TRAMP_ADDR(name), __F); \ }) #define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func)) diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 6ecf2a0220db..b77f39f319ad 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -48,39 +48,26 @@ struct ww_acquire_ctx { #endif }; -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \ - , .ww_class = class -#else -# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) -#endif - #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \ { .stamp = ATOMIC_LONG_INIT(0) \ , .acquire_name = #ww_class "_acquire" \ , .mutex_name = #ww_class "_mutex" \ , .is_wait_die = _is_wait_die } -#define __WW_MUTEX_INITIALIZER(lockname, class) \ - { .base = __MUTEX_INITIALIZER(lockname.base) \ - __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } - #define DEFINE_WD_CLASS(classname) \ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1) #define DEFINE_WW_CLASS(classname) \ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0) -#define DEFINE_WW_MUTEX(mutexname, ww_class) \ - struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) - /** * ww_mutex_init - initialize the w/w mutex * @lock: the mutex to be initialized * @ww_class: the w/w class the mutex should belong to * * Initialize the w/w mutex to unlocked state and associate it with the given - * class. + * class. Static define macro for w/w mutex is not provided and this function + * is the only way to properly initialize the w/w mutex. * * It is not allowed to initialize an already locked mutex. */ diff --git a/kernel/futex.c b/kernel/futex.c index 00febd6dea9c..c98b825da9cf 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -981,6 +981,7 @@ static inline void exit_pi_state_list(struct task_struct *curr) { } * p->pi_lock: * * p->pi_state_list -> pi_state->list, relation + * pi_mutex->owner -> pi_state->owner, relation * * pi_state->refcount: * @@ -1494,13 +1495,14 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) { u32 curval, newval; + struct rt_mutex_waiter *top_waiter; struct task_struct *new_owner; bool postunlock = false; DEFINE_WAKE_Q(wake_q); int ret = 0; - new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); - if (WARN_ON_ONCE(!new_owner)) { + top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex); + if (WARN_ON_ONCE(!top_waiter)) { /* * As per the comment in futex_unlock_pi() this should not happen. * @@ -1513,6 +1515,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ goto out_unlock; } + new_owner = top_waiter->task; + /* * We pass it to the next owner. The WAITERS bit is always kept * enabled while there is PI state around. We cleanup the owner @@ -2315,19 +2319,15 @@ retry: /* * PI futexes can not be requeued and must remove themself from the - * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry - * and dropped here. + * hash bucket. The hash bucket lock (i.e. lock_ptr) is held. */ static void unqueue_me_pi(struct futex_q *q) - __releases(q->lock_ptr) { __unqueue_futex(q); BUG_ON(!q->pi_state); put_pi_state(q->pi_state); q->pi_state = NULL; - - spin_unlock(q->lock_ptr); } static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, @@ -2909,8 +2909,8 @@ no_block: if (res) ret = (res < 0) ? res : 0; - /* Unqueue and drop the lock */ unqueue_me_pi(&q); + spin_unlock(q.lock_ptr); goto out; out_unlock_put_key: @@ -3237,15 +3237,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * reference count. */ - /* Check if the requeue code acquired the second futex for us. */ + /* + * Check if the requeue code acquired the second futex for us and do + * any pertinent fixup. + */ if (!q.rt_waiter) { - /* - * Got the lock. We might not be the anticipated owner if we - * did a lock-steal - fix up the PI-state in that case. - */ if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); - ret = fixup_pi_state_owner(uaddr2, &q, current); + ret = fixup_owner(uaddr2, &q, true); /* * Drop the reference to the pi state which * the requeue_pi() code acquired for us. @@ -3287,8 +3286,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (res) ret = (res < 0) ? res : 0; - /* Unqueue and drop the lock. */ unqueue_me_pi(&q); + spin_unlock(q.lock_ptr); } if (ret == -EINTR) { diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile index 65ca5539c470..c2bb07f5bcc7 100644 --- a/kernel/kcsan/Makefile +++ b/kernel/kcsan/Makefile @@ -13,5 +13,5 @@ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \ obj-y := core.o debugfs.o report.o obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o -CFLAGS_kcsan-test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer -obj-$(CONFIG_KCSAN_TEST) += kcsan-test.o +CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer +obj-$(CONFIG_KCSAN_KUNIT_TEST) += kcsan_test.o diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h index 75fe701f4127..530ae1bda8e7 100644 --- a/kernel/kcsan/atomic.h +++ b/kernel/kcsan/atomic.h @@ -1,4 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * Rules for implicitly atomic memory accesses. + * + * Copyright (C) 2019, Google LLC. + */ #ifndef _KERNEL_KCSAN_ATOMIC_H #define _KERNEL_KCSAN_ATOMIC_H diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 3bf98db9c702..45c821d4e8bd 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 +/* + * KCSAN core runtime. + * + * Copyright (C) 2019, Google LLC. + */ #define pr_fmt(fmt) "kcsan: " fmt @@ -639,8 +644,6 @@ void __init kcsan_init(void) BUG_ON(!in_task()); - kcsan_debugfs_init(); - for_each_possible_cpu(cpu) per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles(); diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index 3c8093a371b1..c1dd02f3be8b 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 +/* + * KCSAN debugfs interface. + * + * Copyright (C) 2019, Google LLC. + */ #define pr_fmt(fmt) "kcsan: " fmt @@ -261,7 +266,9 @@ static const struct file_operations debugfs_ops = .release = single_release }; -void __init kcsan_debugfs_init(void) +static void __init kcsan_debugfs_init(void) { debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops); } + +late_initcall(kcsan_debugfs_init); diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h index 7ee405524904..170a2bb22f53 100644 --- a/kernel/kcsan/encoding.h +++ b/kernel/kcsan/encoding.h @@ -1,4 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * KCSAN watchpoint encoding. + * + * Copyright (C) 2019, Google LLC. + */ #ifndef _KERNEL_KCSAN_ENCODING_H #define _KERNEL_KCSAN_ENCODING_H diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 8d4bf3431b3c..9881099d4179 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -1,8 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ - /* * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. For more info please * see Documentation/dev-tools/kcsan.rst. + * + * Copyright (C) 2019, Google LLC. */ #ifndef _KERNEL_KCSAN_KCSAN_H @@ -31,11 +32,6 @@ void kcsan_save_irqtrace(struct task_struct *task); void kcsan_restore_irqtrace(struct task_struct *task); /* - * Initialize debugfs file. - */ -void kcsan_debugfs_init(void); - -/* * Statistics counters displayed via debugfs; should only be modified in * slow-paths. */ diff --git a/kernel/kcsan/kcsan-test.c b/kernel/kcsan/kcsan_test.c index ebe7fd245104..8bcffbdef3d3 100644 --- a/kernel/kcsan/kcsan-test.c +++ b/kernel/kcsan/kcsan_test.c @@ -13,6 +13,8 @@ * Author: Marco Elver <elver@google.com> */ +#define pr_fmt(fmt) "kcsan_test: " fmt + #include <kunit/test.h> #include <linux/jiffies.h> #include <linux/kcsan-checks.h> @@ -951,22 +953,53 @@ static void test_atomic_builtins(struct kunit *test) } /* - * Each test case is run with different numbers of threads. Until KUnit supports - * passing arguments for each test case, we encode #threads in the test case - * name (read by get_num_threads()). [The '-' was chosen as a stylistic - * preference to separate test name and #threads.] + * Generate thread counts for all test cases. Values generated are in interval + * [2, 5] followed by exponentially increasing thread counts from 8 to 32. * * The thread counts are chosen to cover potentially interesting boundaries and - * corner cases (range 2-5), and then stress the system with larger counts. + * corner cases (2 to 5), and t |
