diff options
43 files changed, 1129 insertions, 453 deletions
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst index a648b423ba0e..11cdab037bff 100644 --- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst +++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst @@ -21,7 +21,7 @@ Any code that happens after the end of a given RCU grace period is guaranteed to see the effects of all accesses prior to the beginning of that grace period that are within RCU read-side critical sections. Similarly, any code that happens before the beginning of a given RCU grace -period is guaranteed to see the effects of all accesses following the end +period is guaranteed to not see the effects of all accesses following the end of that grace period that are within RCU read-side critical sections. Note well that RCU-sched read-side critical sections include any region @@ -339,14 +339,14 @@ The diagram below shows the path of ordering if the leftmost leftmost ``rcu_node`` structure offlines its last CPU and if the next ``rcu_node`` structure has no online CPUs). -.. kernel-figure:: TreeRCU-gp-init-1.svg +.. kernel-figure:: TreeRCU-gp-init-2.svg The final ``rcu_gp_init()`` pass through the ``rcu_node`` tree traverses breadth-first, setting each ``rcu_node`` structure's ``->gp_seq`` field to the newly advanced value from the ``rcu_state`` structure, as shown in the following diagram. -.. kernel-figure:: TreeRCU-gp-init-1.svg +.. kernel-figure:: TreeRCU-gp-init-3.svg This change will also cause each CPU's next call to ``__note_gp_changes()`` to notice that a new grace period has started, diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst index 3996b54158bf..01ba293a2d70 100644 --- a/Documentation/admin-guide/kernel-parameters.rst +++ b/Documentation/admin-guide/kernel-parameters.rst @@ -76,6 +76,11 @@ to change, such as less cores in the CPU list, then N and any ranges using N will also change. Use the same on a small 4 core system, and "16-N" becomes "16-3" and now the same boot input will be flagged as invalid (start > end). +The special case-tolerant group name "all" has a meaning of selecting all CPUs, +so that "nohz_full=all" is the equivalent of "nohz_full=0-N". + +The semantics of "N" and "all" is supported on a level of bitmaps and holds for +all users of bitmap_parse(). This document may not be entirely up to date and comprehensive. The command "modinfo -p ${modulename}" shows a current list of all parameters of a loadable diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index cb89dbdedc46..4405fd32e8ab 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4290,6 +4290,11 @@ whole algorithm to behave better in low memory condition. + rcutree.rcu_delay_page_cache_fill_msec= [KNL] + Set the page-cache refill delay (in milliseconds) + in response to low-memory conditions. The range + of permitted values is in the range 0:100000. + rcutree.jiffies_till_first_fqs= [KNL] Set delay from grace-period initialization to first attempt to force quiescent states. diff --git a/include/linux/srcu.h b/include/linux/srcu.h index a0895bbf71ce..e6011a9975af 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -64,6 +64,12 @@ unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); +#ifdef CONFIG_SRCU +void srcu_init(void); +#else /* #ifdef CONFIG_SRCU */ +static inline void srcu_init(void) { } +#endif /* #else #ifdef CONFIG_SRCU */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC /** diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 9cfcc8a756ae..cb1f4351e8ba 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -82,9 +82,7 @@ struct srcu_struct { /* callback for the barrier */ /* operation. */ struct delayed_work work; -#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ }; /* Values for state variable (bottom bits of ->srcu_gp_seq). */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 4118a97e62fb..fda13c9d1256 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -192,8 +192,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer); #define del_singleshot_timer_sync(t) del_timer_sync(t) -extern bool timer_curr_running(struct timer_list *timer); - extern void init_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 6768b64bc738..670e41783edd 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -278,6 +278,7 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock, * "WakeNot": Don't wake rcuo kthread. * "WakeNotPoll": Don't wake rcuo kthread because it is polling. * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge. + * "WakeBypassIsDeferred": Wake rcuo kthread later, bypass list is contended. * "WokeEmpty": rcuo CB kthread woke to find empty list. */ TRACE_EVENT_RCU(rcu_nocb_wake, diff --git a/init/main.c b/init/main.c index eb01e121d2f1..7b6f49c4d388 100644 --- a/init/main.c +++ b/init/main.c @@ -42,6 +42,7 @@ #include <linux/profile.h> #include <linux/kfence.h> #include <linux/rcupdate.h> +#include <linux/srcu.h> #include <linux/moduleparam.h> #include <linux/kallsyms.h> #include <linux/writeback.h> @@ -979,6 +980,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) tick_init(); rcu_init_nohz(); init_timers(); + srcu_init(); hrtimers_init(); softirq_init(); timekeeping_init(); diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index bf0827d4b659..24b5f2c2de87 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -308,6 +308,8 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) } } +extern void rcu_init_geometry(void); + /* Returns a pointer to the first leaf rcu_node structure. */ #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) @@ -422,12 +424,6 @@ do { \ #endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */ -#ifdef CONFIG_SRCU -void srcu_init(void); -#else /* #ifdef CONFIG_SRCU */ -static inline void srcu_init(void) { } -#endif /* #else #ifdef CONFIG_SRCU */ - #ifdef CONFIG_TINY_RCU /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ static inline bool rcu_gp_is_normal(void) { return true; } @@ -441,7 +437,11 @@ bool rcu_gp_is_expedited(void); /* Internal RCU use. */ void rcu_expedite_gp(void); void rcu_unexpedite_gp(void); void rcupdate_announce_bootup_oddness(void); +#ifdef CONFIG_TASKS_RCU_GENERIC void show_rcu_tasks_gp_kthreads(void); +#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ +static inline void show_rcu_tasks_gp_kthreads(void) {} +#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ void rcu_request_urgent_qs_task(struct task_struct *t); #endif /* #else #ifdef CONFIG_TINY_RCU */ @@ -519,6 +519,7 @@ static inline unsigned long rcu_exp_batches_completed(void) { return 0; } static inline unsigned long srcu_batches_completed(struct srcu_struct *sp) { return 0; } static inline void rcu_force_quiescent_state(void) { } +static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; } static inline void show_rcu_gp_kthreads(void) { } static inline int rcu_get_gp_kthreads_prio(void) { return 0; } static inline void rcu_fwd_progress_check(unsigned long j) { } @@ -527,6 +528,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp); unsigned long rcu_get_gp_seq(void); unsigned long rcu_exp_batches_completed(void); unsigned long srcu_batches_completed(struct srcu_struct *sp); +bool rcu_check_boost_fail(unsigned long gp_state, int *cpup); void show_rcu_gp_kthreads(void); int rcu_get_gp_kthreads_prio(void); void rcu_fwd_progress_check(unsigned long j); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 29d2f4c647d3..ec69273898af 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -245,12 +245,6 @@ static const char *rcu_torture_writer_state_getname(void) return rcu_torture_writer_state_names[i]; } -#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_PREEMPT_RT) -# define rcu_can_boost() 1 -#else -# define rcu_can_boost() 0 -#endif - #ifdef CONFIG_RCU_TRACE static u64 notrace rcu_trace_clock_local(void) { @@ -331,6 +325,7 @@ struct rcu_torture_ops { void (*read_delay)(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp); void (*readunlock)(int idx); + int (*readlock_held)(void); unsigned long (*get_gp_seq)(void); unsigned long (*gp_diff)(unsigned long new, unsigned long old); void (*deferred_free)(struct rcu_torture *p); @@ -345,6 +340,7 @@ struct rcu_torture_ops { void (*fqs)(void); void (*stats)(void); void (*gp_kthread_dbg)(void); + bool (*check_boost_failed)(unsigned long gp_state, int *cpup); int (*stall_dur)(void); int irq_capable; int can_boost; @@ -359,6 +355,11 @@ static struct rcu_torture_ops *cur_ops; * Definitions for rcu torture testing. */ +static int torture_readlock_not_held(void) +{ + return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); +} + static int rcu_torture_read_lock(void) __acquires(RCU) { rcu_read_lock(); @@ -483,30 +484,32 @@ static void rcu_sync_torture_init(void) } static struct rcu_torture_ops rcu_ops = { - .ttype = RCU_FLAVOR, - .init = rcu_sync_torture_init, - .readlock = rcu_torture_read_lock, - .read_delay = rcu_read_delay, - .readunlock = rcu_torture_read_unlock, - .get_gp_seq = rcu_get_gp_seq, - .gp_diff = rcu_seq_diff, - .deferred_free = rcu_torture_deferred_free, - .sync = synchronize_rcu, - .exp_sync = synchronize_rcu_expedited, - .get_gp_state = get_state_synchronize_rcu, - .start_gp_poll = start_poll_synchronize_rcu, - .poll_gp_state = poll_state_synchronize_rcu, - .cond_sync = cond_synchronize_rcu, - .call = call_rcu, - .cb_barrier = rcu_barrier, - .fqs = rcu_force_quiescent_state, - .stats = NULL, - .gp_kthread_dbg = show_rcu_gp_kthreads, - .stall_dur = rcu_jiffies_till_stall_check, - .irq_capable = 1, - .can_boost = rcu_can_boost(), - .extendables = RCUTORTURE_MAX_EXTEND, - .name = "rcu" + .ttype = RCU_FLAVOR, + .init = rcu_sync_torture_init, + .readlock = rcu_torture_read_lock, + .read_delay = rcu_read_delay, + .readunlock = rcu_torture_read_unlock, + .readlock_held = torture_readlock_not_held, + .get_gp_seq = rcu_get_gp_seq, + .gp_diff = rcu_seq_diff, + .deferred_free = rcu_torture_deferred_free, + .sync = synchronize_rcu, + .exp_sync = synchronize_rcu_expedited, + .get_gp_state = get_state_synchronize_rcu, + .start_gp_poll = start_poll_synchronize_rcu, + .poll_gp_state = poll_state_synchronize_rcu, + .cond_sync = cond_synchronize_rcu, + .call = call_rcu, + .cb_barrier = rcu_barrier, + .fqs = rcu_force_quiescent_state, + .stats = NULL, + .gp_kthread_dbg = show_rcu_gp_kthreads, + .check_boost_failed = rcu_check_boost_fail, + .stall_dur = rcu_jiffies_till_stall_check, + .irq_capable = 1, + .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), + .extendables = RCUTORTURE_MAX_EXTEND, + .name = "rcu" }; /* @@ -540,6 +543,7 @@ static struct rcu_torture_ops rcu_busted_ops = { .readlock = rcu_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = rcu_torture_read_unlock, + .readlock_held = torture_readlock_not_held, .get_gp_seq = rcu_no_completed, .deferred_free = rcu_busted_torture_deferred_free, .sync = synchronize_rcu_busted, @@ -589,6 +593,11 @@ static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) srcu_read_unlock(srcu_ctlp, idx); } +static int torture_srcu_read_lock_held(void) +{ + return srcu_read_lock_held(srcu_ctlp); +} + static unsigned long srcu_torture_completed(void) { return srcu_batches_completed(srcu_ctlp); @@ -646,6 +655,7 @@ static struct rcu_torture_ops srcu_ops = { .readlock = srcu_torture_read_lock, .read_delay = srcu_read_delay, .readunlock = srcu_torture_read_unlock, + .readlock_held = torture_srcu_read_lock_held, .get_gp_seq = srcu_torture_completed, .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, @@ -681,6 +691,7 @@ static struct rcu_torture_ops srcud_ops = { .readlock = srcu_torture_read_lock, .read_delay = srcu_read_delay, .readunlock = srcu_torture_read_unlock, + .readlock_held = torture_srcu_read_lock_held, .get_gp_seq = srcu_torture_completed, .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, @@ -700,6 +711,7 @@ static struct rcu_torture_ops busted_srcud_ops = { .readlock = srcu_torture_read_lock, .read_delay = rcu_read_delay, .readunlock = srcu_torture_read_unlock, + .readlock_held = torture_srcu_read_lock_held, .get_gp_seq = srcu_torture_completed, .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, @@ -787,6 +799,7 @@ static struct rcu_torture_ops trivial_ops = { .readlock = rcu_torture_read_lock_trivial, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = rcu_torture_read_unlock_trivial, + .readlock_held = torture_readlock_not_held, .get_gp_seq = rcu_no_completed, .sync = synchronize_rcu_trivial, .exp_sync = synchronize_rcu_trivial, @@ -850,6 +863,7 @@ static struct rcu_torture_ops tasks_tracing_ops = { .readlock = tasks_tracing_torture_read_lock, .read_delay = srcu_read_delay, /* just reuse srcu's version. */ .readunlock = tasks_tracing_torture_read_unlock, + .readlock_held = rcu_read_lock_trace_held, .get_gp_seq = rcu_no_completed, .deferred_free = rcu_tasks_tracing_torture_deferred_free, .sync = synchronize_rcu_tasks_trace, @@ -871,32 +885,13 @@ static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) return cur_ops->gp_diff(new, old); } -static bool __maybe_unused torturing_tasks(void) -{ - return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops; -} - /* * RCU torture priority-boost testing. Runs one real-time thread per - * CPU for moderate bursts, repeatedly registering RCU callbacks and - * spinning waiting for them to be invoked. If a given callback takes - * too long to be invoked, we assume that priority inversion has occurred. + * CPU for moderate bursts, repeatedly starting grace periods and waiting + * for them to complete. If a given grace period takes too long, we assume + * that priority inversion has occurred. */ -struct rcu_boost_inflight { - struct rcu_head rcu; - int inflight; -}; - -static void rcu_torture_boost_cb(struct rcu_head *head) -{ - struct rcu_boost_inflight *rbip = - container_of(head, struct rcu_boost_inflight, rcu); - - /* Ensure RCU-core accesses precede clearing ->inflight */ - smp_store_release(&rbip->inflight, 0); -} - static int old_rt_runtime = -1; static void rcu_torture_disable_rt_throttle(void) @@ -923,49 +918,68 @@ static void rcu_torture_enable_rt_throttle(void) old_rt_runtime = -1; } -static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) +static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) { + int cpu; static int dbg_done; - - if (end - start > test_boost_duration * HZ - HZ / 2) { + unsigned long end = jiffies; + bool gp_done; + unsigned long j; + static unsigned long last_persist; + unsigned long lp; + unsigned long mininterval = test_boost_duration * HZ - HZ / 2; + + if (end - *start > mininterval) { + // Recheck after checking time to avoid false positives. + smp_mb(); // Time check before grace-period check. + if (cur_ops->poll_gp_state(gp_state)) + return false; // passed, though perhaps just barely + if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { + // At most one persisted message per boost test. + j = jiffies; + lp = READ_ONCE(last_persist); + if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) + pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); + return false; // passed on a technicality + } VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); n_rcu_torture_boost_failure++; - if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) + if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { + pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", + current->rt_priority, gp_state, end - *start); cur_ops->gp_kthread_dbg(); + // Recheck after print to flag grace period ending during splat. + gp_done = cur_ops->poll_gp_state(gp_state); + pr_info("Boost inversion: GP %lu %s.\n", gp_state, + gp_done ? "ended already" : "still pending"); - return true; /* failed */ + } + + return true; // failed + } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { + *start = jiffies; } - return false; /* passed */ + return false; // passed } static int rcu_torture_boost(void *arg) { - unsigned long call_rcu_time; unsigned long endtime; + unsigned long gp_state; + unsigned long gp_state_time; unsigned long oldstarttime; - struct rcu_boost_inflight rbi = { .inflight = 0 }; VERBOSE_TOROUT_STRING("rcu_torture_boost started"); /* Set real-time priority. */ sched_set_fifo_low(current); - init_rcu_head_on_stack(&rbi.rcu); /* Each pass through the following loop does one boost-test cycle. */ do { bool failed = false; // Test failed already in this test interval - bool firsttime = true; + bool gp_initiated = false; - /* Increment n_rcu_torture_boosts once per boost-test */ - while (!kthread_should_stop()) { - if (mutex_trylock(&boost_mutex)) { - n_rcu_torture_boosts++; - mutex_unlock(&boost_mutex); - break; - } - schedule_timeout_uninterruptible(1); - } if (kthread_should_stop()) goto checkwait; @@ -979,33 +993,33 @@ static int rcu_torture_boost(void *arg) goto checkwait; } - /* Do one boost-test interval. */ + // Do one boost-test interval. endtime = oldstarttime + test_boost_duration * HZ; while (time_before(jiffies, endtime)) { - /* If we don't have a callback in flight, post one. */ - if (!smp_load_acquire(&rbi.inflight)) { - /* RCU core before ->inflight = 1. */ - smp_store_release(&rbi.inflight, 1); - cur_ops->call(&rbi.rcu, rcu_torture_boost_cb); - /* Check if the boost test failed */ - if (!firsttime && !failed) - failed = rcu_torture_boost_failed(call_rcu_time, jiffies); - call_rcu_time = jiffies; - firsttime = false; + // Has current GP gone too long? + if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) + failed = rcu_torture_boost_failed(gp_state, &gp_state_time); + // If we don't have a grace period in flight, start one. + if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { + gp_state = cur_ops->start_gp_poll(); + gp_initiated = true; + gp_state_time = jiffies; } - if (stutter_wait("rcu_torture_boost")) + if (stutter_wait("rcu_torture_boost")) { sched_set_fifo_low(current); + // If the grace period already ended, + // we don't know when that happened, so + // start over. + if (cur_ops->poll_gp_state(gp_state)) + gp_initiated = false; + } if (torture_must_stop()) goto checkwait; } - /* - * If boost never happened, then inflight will always be 1, in - * this case the boost check would never happen in the above - * loop so do another one here. - */ - if (!firsttime && !failed && smp_load_acquire(&rbi.inflight)) - rcu_torture_boost_failed(call_rcu_time, jiffies); + // In case the grace period extended beyond the end of the loop. + if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) + rcu_torture_boost_failed(gp_state, &gp_state_time); /* * Set the start time of the next test interval. @@ -1014,11 +1028,12 @@ static int rcu_torture_boost(void *arg) * interval. Besides, we are running at RT priority, * so delays should be relatively rare. */ - while (oldstarttime == boost_starttime && - !kthread_should_stop()) { + while (oldstarttime == boost_starttime && !kthread_should_stop()) { if (mutex_trylock(&boost_mutex)) { - boost_starttime = jiffies + - test_boost_interval * HZ; + if (oldstarttime == boost_starttime) { + boost_starttime = jiffies + test_boost_interval * HZ; + n_rcu_torture_boosts++; + } mutex_unlock(&boost_mutex); break; } @@ -1030,15 +1045,11 @@ checkwait: if (stutter_wait("rcu_torture_boost")) sched_set_fifo_low(current); } while (!torture_must_stop()); - while (smp_load_acquire(&rbi.inflight)) - schedule_timeout_uninterruptible(1); // rcu_barrier() deadlocks. - /* Clean up and exit. */ - while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { + while (!kthread_should_stop()) { torture_shutdown_absorb("rcu_torture_boost"); schedule_timeout_uninterruptible(1); } - destroy_rcu_head_on_stack(&rbi.rcu); torture_kthread_stopping("rcu_torture_boost"); return 0; } @@ -1553,11 +1564,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) started = cur_ops->get_gp_seq(); ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, - rcu_read_lock_bh_held() || - rcu_read_lock_sched_held() || - srcu_read_lock_held(srcu_ctlp) || - rcu_read_lock_trace_held() || - torturing_tasks()); + !cur_ops->readlock_held || cur_ops->readlock_held()); if (p == NULL) { /* Wait for rcu_torture_writer to get underway */ rcutorture_one_extend(&readstate, 0, trsp, rtrsp); @@ -1861,48 +1868,49 @@ rcu_torture_stats(void *arg) torture_shutdown_absorb("rcu_torture_stats"); } while (!torture_must_stop()); torture_kthread_stopping("rcu_torture_stats"); - - { - struct rcu_head *rhp; - struct kmem_cache *kcp; - static int z; - - kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); - rhp = kmem_cache_alloc(kcp, GFP_KERNEL); - pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); - pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); - mem_dump_obj(ZERO_SIZE_PTR); - pr_alert("mem_dump_obj(NULL):"); - mem_dump_obj(NULL); - pr_alert("mem_dump_obj(%px):", &rhp); - mem_dump_obj(&rhp); - pr_alert("mem_dump_obj(%px):", rhp); - mem_dump_obj(rhp); - pr_alert("mem_dump_obj(%px):", &rhp->func); - mem_dump_obj(&rhp->func); - pr_alert("mem_dump_obj(%px):", &z); - mem_dump_obj(&z); - kmem_cache_free(kcp, rhp); - kmem_cache_destroy(kcp); - rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); - pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); - pr_alert("mem_dump_obj(kmalloc %px):", rhp); - mem_dump_obj(rhp); - pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); - mem_dump_obj(&rhp->func); - kfree(rhp); - rhp = vmalloc(4096); - pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); - pr_alert("mem_dump_obj(vmalloc %px):", rhp); - mem_dump_obj(rhp); - pr_alert("mem_dump_obj(vmalloc %px):", &rhp->fu |
