From 258ca95e2cd9a0fcc4508a1bf1742b1a3e9a7bbb Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Feb 2021 01:10:04 +0100 Subject: timer: Revert "timer: Add timer_curr_running()" This reverts commit dcd42591ebb8a25895b551a5297ea9c24414ba54. The only user was RCU/nocb. Signed-off-by: Frederic Weisbecker Cc: Josh Triplett Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Boqun Feng Cc: Thomas Gleixner Signed-off-by: Paul E. McKenney --- include/linux/timer.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/timer.h b/include/linux/timer.h index 4118a97e62fb..fda13c9d1256 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -192,8 +192,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer); #define del_singleshot_timer_sync(t) del_timer_sync(t) -extern bool timer_curr_running(struct timer_list *timer); - extern void init_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); -- cgit v1.2.3 From 7bf0a6141ab9c1d113bd85d6d13d43903a4278ba Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 9 Apr 2021 00:38:58 +0200 Subject: srcu: Unconditionally embed struct lockdep_map Since struct lockdep_map has zero size when CONFIG_DEBUG_LOCK_ALLOC=n, this commit removes the #ifdef from the srcu_struct structure's ->dep_map. This change will simplify further manipulations of this field. Signed-off-by: Frederic Weisbecker Cc: Uladzislau Rezki Cc: Boqun Feng Cc: Lai Jiangshan Cc: Neeraj Upadhyay Cc: Josh Triplett Cc: Joel Fernandes Signed-off-by: Paul E. McKenney --- include/linux/srcutree.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 9cfcc8a756ae..cb1f4351e8ba 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -82,9 +82,7 @@ struct srcu_struct { /* callback for the barrier */ /* operation. */ struct delayed_work work; -#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ }; /* Values for state variable (bottom bits of ->srcu_gp_seq). */ -- cgit v1.2.3 From 8e9c01c717df7e05c5bd1ca86aaa3a74b31f37f1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 9 Apr 2021 00:38:59 +0200 Subject: srcu: Initialize SRCU after timers Once srcu_init() is called, the SRCU core will make use of delayed workqueues, which rely on timers. However init_timers() is called several steps after rcu_init(). This means that a call_srcu() after rcu_init() but before init_timers() would find itself within a dangerously uninitialized timer core. This commit therefore creates a separate call to srcu_init() after init_timer() completes, which ensures that we stay in early SRCU mode until timers are safe(r). Signed-off-by: Frederic Weisbecker Cc: Uladzislau Rezki Cc: Boqun Feng Cc: Lai Jiangshan Cc: Neeraj Upadhyay Cc: Josh Triplett Cc: Joel Fernandes Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/srcu.h b/include/linux/srcu.h index a0895bbf71ce..e6011a9975af 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -64,6 +64,12 @@ unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); +#ifdef CONFIG_SRCU +void srcu_init(void); +#else /* #ifdef CONFIG_SRCU */ +static inline void srcu_init(void) { } +#endif /* #else #ifdef CONFIG_SRCU */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC /** -- cgit v1.2.3 From ce7c169dee28866539abb0e603b9a23055d30fdc Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 30 Mar 2021 13:23:49 -0700 Subject: rcu: Remove the unused rcu_irq_exit_preempt() function Commit 9ee01e0f69a9 ("x86/entry: Clean up idtentry_enter/exit() leftovers") left the rcu_irq_exit_preempt() in place in order to avoid conflicts with the -rcu tree. Now that this change has long since hit mainline, this commit removes the no-longer-used rcu_irq_exit_preempt() function. Signed-off-by: Paul E. McKenney --- include/linux/rcutiny.h | 1 - include/linux/rcutree.h | 1 - 2 files changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 35e0be326ffc..953e70fafe38 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -86,7 +86,6 @@ static inline void rcu_irq_enter(void) { } static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } -static inline void rcu_irq_exit_preempt(void) { } static inline void rcu_irq_exit_check_preempt(void) { } #define rcu_is_idle_cpu(cpu) \ (is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq()) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index b89b54130f49..53209d669400 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -49,7 +49,6 @@ void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); -void rcu_irq_exit_preempt(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); bool rcu_is_idle_cpu(int cpu); -- cgit v1.2.3 From 3066820034b5dd4e89bd74a7739c51c2d6f5e554 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 5 Apr 2021 09:51:05 -0700 Subject: rcu: Reject RCU_LOCKDEP_WARN() false positives If another lockdep report runs concurrently with an RCU lockdep report from RCU_LOCKDEP_WARN(), the following sequence of events can occur: 1. debug_lockdep_rcu_enabled() sees that lockdep is enabled when called from (say) synchronize_rcu(). 2. Lockdep is disabled by a concurrent lockdep report. 3. debug_lockdep_rcu_enabled() evaluates its lockdep-expression argument, for example, lock_is_held(&rcu_bh_lock_map). 4. Because lockdep is now disabled, lock_is_held() plays it safe and returns the constant 1. 5. But in this case, the constant 1 is not safe, because invoking synchronize_rcu() under rcu_read_lock_bh() is disallowed. 6. debug_lockdep_rcu_enabled() wrongly invokes lockdep_rcu_suspicious(), resulting in a false-positive splat. This commit therefore changes RCU_LOCKDEP_WARN() to check debug_lockdep_rcu_enabled() after checking the lockdep expression, so that any "safe" returns from lock_is_held() are rejected by debug_lockdep_rcu_enabled(). This requires memory ordering, which is supplied by READ_ONCE(debug_locks). The resulting volatile accesses prevent the compiler from reordering and the fact that only one variable is being accessed prevents the underlying hardware from reordering. The combination works for IA64, which can reorder reads to the same location, but this is defeated by the volatile accesses, which compile to load instructions that provide ordering. Reported-by: syzbot+dde0cc33951735441301@syzkaller.appspotmail.com Reported-by: Matthew Wilcox Reported-by: syzbot+88e4f02896967fe1ab0d@syzkaller.appspotmail.com Reported-by: Thomas Gleixner Suggested-by: Boqun Feng Reviewed-by: Boqun Feng Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9455476c5ba2..1199ffd305d1 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -315,7 +315,7 @@ static inline int rcu_read_lock_any_held(void) #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(".data.unlikely") __warned; \ - if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ + if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ -- cgit v1.2.3 From 76c8eaafe4f061f3790112842a2fbb297e4bea88 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 21 Apr 2021 14:30:54 -0700 Subject: rcu: Create an unrcu_pointer() to remove __rcu from a pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The xchg() and cmpxchg() functions are sometimes used to carry out RCU updates. Unfortunately, this can result in sparse warnings for both the old-value and new-value arguments, as well as for the return value. The arguments can be dealt with using RCU_INITIALIZER(): old_p = xchg(&p, RCU_INITIALIZER(new_p)); But a sparse warning still remains due to assigning the __rcu pointer returned from xchg to the (most likely) non-__rcu pointer old_p. This commit therefore provides an unrcu_pointer() macro that strips the __rcu. This macro can be used as follows: old_p = unrcu_pointer(xchg(&p, RCU_INITIALIZER(new_p))); Reported-by: Toke Høiland-Jørgensen Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 1199ffd305d1..b071d02a028a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -363,6 +363,20 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ +/** + * unrcu_pointer - mark a pointer as not being RCU protected + * @p: pointer needing to lose its __rcu property + * + * Converts @p from an __rcu pointer to a __kernel pointer. + * This allows an __rcu pointer to be used with xchg() and friends. + */ +#define unrcu_pointer(p) \ +({ \ + typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \ + rcu_check_sparse(p, __rcu); \ + ((typeof(*p) __force __kernel *)(_________p1)); \ +}) + #define __rcu_access_pointer(p, space) \ ({ \ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ -- cgit v1.2.3 From 1893afd63409111c6edcee9d6e1196fc06cf4fd7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 29 Apr 2021 11:18:01 -0700 Subject: rcu: Improve comments describing RCU read-side critical sections There are a number of places that call out the fact that preempt-disable regions of code now act as RCU read-side critical sections, where preempt-disable regions of code include irq-disable regions of code, bh-disable regions of code, hardirq handlers, and NMI handlers. However, someone relying solely on (for example) the call_rcu() header comment might well have no idea that preempt-disable regions of code have RCU semantics. This commit therefore updates the header comments for call_rcu(), synchronize_rcu(), rcu_dereference_bh_check(), and rcu_dereference_sched_check() to call out these new(ish) forms of RCU readers. Reported-by: Michel Lespinasse [ paulmck: Apply Matthew Wilcox and Michel Lespinasse feedback. ] Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b071d02a028a..f0eecb9e49c8 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -532,7 +532,12 @@ do { \ * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * - * This is the RCU-bh counterpart to rcu_dereference_check(). + * This is the RCU-bh counterpart to rcu_dereference_check(). However, + * please note that starting in v5.0 kernels, vanilla RCU grace periods + * wait for local_bh_disable() regions of code in addition to regions of + * code demarked by rcu_read_lock() and rcu_read_unlock(). This means + * that synchronize_rcu(), call_rcu, and friends all take not only + * rcu_read_lock() but also rcu_read_lock_bh() into account. */ #define rcu_dereference_bh_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) @@ -543,6 +548,11 @@ do { \ * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). + * However, please note that starting in v5.0 kernels, vanilla RCU grace + * periods wait for preempt_disable() regions of code in addition to + * regions of code demarked by rcu_read_lock() and rcu_read_unlock(). + * This means that synchronize_rcu(), call_rcu, and friends all take not + * only rcu_read_lock() but also rcu_read_lock_sched() into account. */ #define rcu_dereference_sched_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ @@ -634,6 +644,12 @@ do { \ * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * + * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also + * wait for regions of code with preemption disabled, including regions of + * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which + * define synchronize_sched(), only code enclosed within rcu_read_lock() + * and rcu_read_unlock() are guaranteed to be waited for. + * * Note, however, that RCU callbacks are permitted to run concurrently * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU @@ -728,9 +744,11 @@ static inline void rcu_read_unlock(void) /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * - * This is equivalent of rcu_read_lock(), but also disables softirqs. - * Note that anything else that disables softirqs can also serve as - * an RCU read-side critical section. + * This is equivalent to rcu_read_lock(), but also disables softirqs. + * Note that anything else that disables softirqs can also serve as an RCU + * read-side critical section. However, please note that this equivalence + * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and + * rcu_read_lock_bh() were unrelated. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke @@ -763,9 +781,12 @@ static inline void rcu_read_unlock_bh(void) /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * - * This is equivalent of rcu_read_lock(), but disables preemption. - * Read-side critical sections can also be introduced by anything else - * that disables preemption, including local_irq_disable() and friends. + * This is equivalent to rcu_read_lock(), but also disables preemption. + * Read-side critical sections can also be introduced by anything else that + * disables preemption, including local_irq_disable() and friends. However, + * please note that the equivalence to rcu_read_lock() applies only to + * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched() + * were unrelated. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke -- cgit v1.2.3 From 0223846010750e28e4330f1beefb5564ba406ef7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 29 Apr 2021 11:30:49 -0700 Subject: rcu: Remove obsolete rcu_read_unlock() deadlock commentary The deferred quiescent states resulting from the consolidation of RCU-bh and RCU-sched into RCU means that rcu_read_unlock() will no longer attempt to acquire scheduler locks if interrupts were disabled across that call to rcu_read_unlock(). The cautions in the rcu_read_unlock() header comment are therefore obsolete. This commit therefore removes them. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 33 ++++++--------------------------- 1 file changed, 6 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index f0eecb9e49c8..d9680b798b21 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -702,33 +702,12 @@ static __always_inline void rcu_read_lock(void) /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * - * In most situations, rcu_read_unlock() is immune from deadlock. - * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() - * is responsible for deboosting, which it does via rt_mutex_unlock(). - * Unfortunately, this function acquires the scheduler's runqueue and - * priority-inheritance spinlocks. This means that deadlock could result - * if the caller of rcu_read_unlock() already holds one of these locks or - * any lock that is ever acquired while holding them. - * - * That said, RCU readers are never priority boosted unless they were - * preempted. Therefore, one way to avoid deadlock is to make sure - * that preemption never happens within any RCU read-side critical - * section whose outermost rcu_read_unlock() is called with one of - * rt_mutex_unlock()'s locks held. Such preemption can be avoided in - * a number of ways, for example, by invoking preempt_disable() before - * critical section's outermost rcu_read_lock(). - * - * Given that the set of locks acquired by rt_mutex_unlock() might change - * at any time, a somewhat more future-proofed approach is to make sure - * that that preemption never happens within any RCU read-side critical - * section whose outermost rcu_read_unlock() is called with irqs disabled. - * This approach relies on the fact that rt_mutex_unlock() currently only - * acquires irq-disabled locks. - * - * The second of these two approaches is best in most situations, - * however, the first approach can also be useful, at least to those - * developers willing to keep abreast of the set of locks acquired by - * rt_mutex_unlock(). + * In almost all situations, rcu_read_unlock() is immune from deadlock. + * In recent kernels that have consolidated synchronize_sched() and + * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity + * also extends to the scheduler's runqueue and priority-inheritance + * spinlocks, courtesy of the quiescent-state deferral that is carried + * out when rcu_read_unlock() is invoked with interrupts disabled. * * See rcu_read_lock() for more information. */ -- cgit v1.2.3