Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Read-Copy Update module-based torture test facility
4 *
5 * Copyright (C) IBM Corporation, 2005, 2006
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
9 *
10 * See also: Documentation/RCU/torture.rst
11 */
12
13#define pr_fmt(fmt) fmt
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/kthread.h>
20#include <linux/err.h>
21#include <linux/spinlock.h>
22#include <linux/smp.h>
23#include <linux/rcupdate_wait.h>
24#include <linux/rcu_notifier.h>
25#include <linux/interrupt.h>
26#include <linux/sched/signal.h>
27#include <uapi/linux/sched/types.h>
28#include <linux/atomic.h>
29#include <linux/bitops.h>
30#include <linux/completion.h>
31#include <linux/moduleparam.h>
32#include <linux/percpu.h>
33#include <linux/notifier.h>
34#include <linux/reboot.h>
35#include <linux/freezer.h>
36#include <linux/cpu.h>
37#include <linux/delay.h>
38#include <linux/stat.h>
39#include <linux/srcu.h>
40#include <linux/slab.h>
41#include <linux/trace_clock.h>
42#include <asm/byteorder.h>
43#include <linux/torture.h>
44#include <linux/vmalloc.h>
45#include <linux/sched/debug.h>
46#include <linux/sched/sysctl.h>
47#include <linux/oom.h>
48#include <linux/tick.h>
49#include <linux/rcupdate_trace.h>
50#include <linux/nmi.h>
51
52#include "rcu.h"
53
54MODULE_LICENSE("GPL");
55MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
56
57/* Bits for ->extendables field, extendables param, and related definitions. */
58#define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */
59#define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1)
60#define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */
61#define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2)
62#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
63#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
64#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
65#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
66#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
67#define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */
68#define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */
69#define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */
70#define RCUTORTURE_MAX_EXTEND \
71 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
72 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
73#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
74 /* Must be power of two minus one. */
75#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
76
77torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
78 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
79torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
80torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
81torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
82torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
83torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
84torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
85torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
86torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
87torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
88torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
89torture_param(bool, gp_cond_exp_full, false,
90 "Use conditional/async full-stateexpedited GP wait primitives");
91torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
92torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
93torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
94torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
95torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
96torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
97torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
98torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
99torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
100torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
101torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
102torture_param(int, nreaders, -1, "Number of RCU reader threads");
103torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
104torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
105torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
106torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
107torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
108torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
109torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
110torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
111torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
112torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
113torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
114torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
115torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
116torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
117torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
118torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
119torture_param(int, stutter, 5, "Number of seconds to run/halt test");
120torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
121torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
122torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
123torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
124torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
125torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario.");
126torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
127
128static char *torture_type = "rcu";
129module_param(torture_type, charp, 0444);
130MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
131
132static int nrealnocbers;
133static int nrealreaders;
134static struct task_struct *writer_task;
135static struct task_struct **fakewriter_tasks;
136static struct task_struct **reader_tasks;
137static struct task_struct **nocb_tasks;
138static struct task_struct *stats_task;
139static struct task_struct *fqs_task;
140static struct task_struct *boost_tasks[NR_CPUS];
141static struct task_struct *stall_task;
142static struct task_struct **fwd_prog_tasks;
143static struct task_struct **barrier_cbs_tasks;
144static struct task_struct *barrier_task;
145static struct task_struct *read_exit_task;
146
147#define RCU_TORTURE_PIPE_LEN 10
148
149// Mailbox-like structure to check RCU global memory ordering.
150struct rcu_torture_reader_check {
151 unsigned long rtc_myloops;
152 int rtc_chkrdr;
153 unsigned long rtc_chkloops;
154 int rtc_ready;
155 struct rcu_torture_reader_check *rtc_assigner;
156} ____cacheline_internodealigned_in_smp;
157
158// Update-side data structure used to check RCU readers.
159struct rcu_torture {
160 struct rcu_head rtort_rcu;
161 int rtort_pipe_count;
162 struct list_head rtort_free;
163 int rtort_mbtest;
164 struct rcu_torture_reader_check *rtort_chkp;
165};
166
167static LIST_HEAD(rcu_torture_freelist);
168static struct rcu_torture __rcu *rcu_torture_current;
169static unsigned long rcu_torture_current_version;
170static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
171static DEFINE_SPINLOCK(rcu_torture_lock);
172static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
173static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
174static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
175static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
176static atomic_t n_rcu_torture_alloc;
177static atomic_t n_rcu_torture_alloc_fail;
178static atomic_t n_rcu_torture_free;
179static atomic_t n_rcu_torture_mberror;
180static atomic_t n_rcu_torture_mbchk_fail;
181static atomic_t n_rcu_torture_mbchk_tries;
182static atomic_t n_rcu_torture_error;
183static long n_rcu_torture_barrier_error;
184static long n_rcu_torture_boost_ktrerror;
185static long n_rcu_torture_boost_failure;
186static long n_rcu_torture_boosts;
187static atomic_long_t n_rcu_torture_timers;
188static long n_barrier_attempts;
189static long n_barrier_successes; /* did rcu_barrier test succeed? */
190static unsigned long n_read_exits;
191static struct list_head rcu_torture_removed;
192static unsigned long shutdown_jiffies;
193static unsigned long start_gp_seq;
194static atomic_long_t n_nocb_offload;
195static atomic_long_t n_nocb_deoffload;
196
197static int rcu_torture_writer_state;
198#define RTWS_FIXED_DELAY 0
199#define RTWS_DELAY 1
200#define RTWS_REPLACE 2
201#define RTWS_DEF_FREE 3
202#define RTWS_EXP_SYNC 4
203#define RTWS_COND_GET 5
204#define RTWS_COND_GET_FULL 6
205#define RTWS_COND_GET_EXP 7
206#define RTWS_COND_GET_EXP_FULL 8
207#define RTWS_COND_SYNC 9
208#define RTWS_COND_SYNC_FULL 10
209#define RTWS_COND_SYNC_EXP 11
210#define RTWS_COND_SYNC_EXP_FULL 12
211#define RTWS_POLL_GET 13
212#define RTWS_POLL_GET_FULL 14
213#define RTWS_POLL_GET_EXP 15
214#define RTWS_POLL_GET_EXP_FULL 16
215#define RTWS_POLL_WAIT 17
216#define RTWS_POLL_WAIT_FULL 18
217#define RTWS_POLL_WAIT_EXP 19
218#define RTWS_POLL_WAIT_EXP_FULL 20
219#define RTWS_SYNC 21
220#define RTWS_STUTTER 22
221#define RTWS_STOPPING 23
222static const char * const rcu_torture_writer_state_names[] = {
223 "RTWS_FIXED_DELAY",
224 "RTWS_DELAY",
225 "RTWS_REPLACE",
226 "RTWS_DEF_FREE",
227 "RTWS_EXP_SYNC",
228 "RTWS_COND_GET",
229 "RTWS_COND_GET_FULL",
230 "RTWS_COND_GET_EXP",
231 "RTWS_COND_GET_EXP_FULL",
232 "RTWS_COND_SYNC",
233 "RTWS_COND_SYNC_FULL",
234 "RTWS_COND_SYNC_EXP",
235 "RTWS_COND_SYNC_EXP_FULL",
236 "RTWS_POLL_GET",
237 "RTWS_POLL_GET_FULL",
238 "RTWS_POLL_GET_EXP",
239 "RTWS_POLL_GET_EXP_FULL",
240 "RTWS_POLL_WAIT",
241 "RTWS_POLL_WAIT_FULL",
242 "RTWS_POLL_WAIT_EXP",
243 "RTWS_POLL_WAIT_EXP_FULL",
244 "RTWS_SYNC",
245 "RTWS_STUTTER",
246 "RTWS_STOPPING",
247};
248
249/* Record reader segment types and duration for first failing read. */
250struct rt_read_seg {
251 int rt_readstate;
252 unsigned long rt_delay_jiffies;
253 unsigned long rt_delay_ms;
254 unsigned long rt_delay_us;
255 bool rt_preempted;
256};
257static int err_segs_recorded;
258static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
259static int rt_read_nsegs;
260
261static const char *rcu_torture_writer_state_getname(void)
262{
263 unsigned int i = READ_ONCE(rcu_torture_writer_state);
264
265 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
266 return "???";
267 return rcu_torture_writer_state_names[i];
268}
269
270#ifdef CONFIG_RCU_TRACE
271static u64 notrace rcu_trace_clock_local(void)
272{
273 u64 ts = trace_clock_local();
274
275 (void)do_div(ts, NSEC_PER_USEC);
276 return ts;
277}
278#else /* #ifdef CONFIG_RCU_TRACE */
279static u64 notrace rcu_trace_clock_local(void)
280{
281 return 0ULL;
282}
283#endif /* #else #ifdef CONFIG_RCU_TRACE */
284
285/*
286 * Stop aggressive CPU-hog tests a bit before the end of the test in order
287 * to avoid interfering with test shutdown.
288 */
289static bool shutdown_time_arrived(void)
290{
291 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
292}
293
294static unsigned long boost_starttime; /* jiffies of next boost test start. */
295static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
296 /* and boost task create/destroy. */
297static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
298static bool barrier_phase; /* Test phase. */
299static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
300static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
301static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
302
303static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
304
305/*
306 * Allocate an element from the rcu_tortures pool.
307 */
308static struct rcu_torture *
309rcu_torture_alloc(void)
310{
311 struct list_head *p;
312
313 spin_lock_bh(&rcu_torture_lock);
314 if (list_empty(&rcu_torture_freelist)) {
315 atomic_inc(&n_rcu_torture_alloc_fail);
316 spin_unlock_bh(&rcu_torture_lock);
317 return NULL;
318 }
319 atomic_inc(&n_rcu_torture_alloc);
320 p = rcu_torture_freelist.next;
321 list_del_init(p);
322 spin_unlock_bh(&rcu_torture_lock);
323 return container_of(p, struct rcu_torture, rtort_free);
324}
325
326/*
327 * Free an element to the rcu_tortures pool.
328 */
329static void
330rcu_torture_free(struct rcu_torture *p)
331{
332 atomic_inc(&n_rcu_torture_free);
333 spin_lock_bh(&rcu_torture_lock);
334 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
335 spin_unlock_bh(&rcu_torture_lock);
336}
337
338/*
339 * Operations vector for selecting different types of tests.
340 */
341
342struct rcu_torture_ops {
343 int ttype;
344 void (*init)(void);
345 void (*cleanup)(void);
346 int (*readlock)(void);
347 void (*read_delay)(struct torture_random_state *rrsp,
348 struct rt_read_seg *rtrsp);
349 void (*readunlock)(int idx);
350 int (*readlock_held)(void);
351 unsigned long (*get_gp_seq)(void);
352 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
353 void (*deferred_free)(struct rcu_torture *p);
354 void (*sync)(void);
355 void (*exp_sync)(void);
356 unsigned long (*get_gp_state_exp)(void);
357 unsigned long (*start_gp_poll_exp)(void);
358 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
359 bool (*poll_gp_state_exp)(unsigned long oldstate);
360 void (*cond_sync_exp)(unsigned long oldstate);
361 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
362 unsigned long (*get_comp_state)(void);
363 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp);
364 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2);
365 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
366 unsigned long (*get_gp_state)(void);
367 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
368 unsigned long (*get_gp_completed)(void);
369 void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp);
370 unsigned long (*start_gp_poll)(void);
371 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
372 bool (*poll_gp_state)(unsigned long oldstate);
373 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
374 bool (*poll_need_2gp)(bool poll, bool poll_full);
375 void (*cond_sync)(unsigned long oldstate);
376 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
377 call_rcu_func_t call;
378 void (*cb_barrier)(void);
379 void (*fqs)(void);
380 void (*stats)(void);
381 void (*gp_kthread_dbg)(void);
382 bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
383 int (*stall_dur)(void);
384 long cbflood_max;
385 int irq_capable;
386 int can_boost;
387 int extendables;
388 int slow_gps;
389 int no_pi_lock;
390 const char *name;
391};
392
393static struct rcu_torture_ops *cur_ops;
394
395/*
396 * Definitions for rcu torture testing.
397 */
398
399static int torture_readlock_not_held(void)
400{
401 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
402}
403
404static int rcu_torture_read_lock(void)
405{
406 rcu_read_lock();
407 return 0;
408}
409
410static void
411rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
412{
413 unsigned long started;
414 unsigned long completed;
415 const unsigned long shortdelay_us = 200;
416 unsigned long longdelay_ms = 300;
417 unsigned long long ts;
418
419 /* We want a short delay sometimes to make a reader delay the grace
420 * period, and we want a long delay occasionally to trigger
421 * force_quiescent_state. */
422
423 if (!atomic_read(&rcu_fwd_cb_nodelay) &&
424 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
425 started = cur_ops->get_gp_seq();
426 ts = rcu_trace_clock_local();
427 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
428 longdelay_ms = 5; /* Avoid triggering BH limits. */
429 mdelay(longdelay_ms);
430 rtrsp->rt_delay_ms = longdelay_ms;
431 completed = cur_ops->get_gp_seq();
432 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
433 started, completed);
434 }
435 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
436 udelay(shortdelay_us);
437 rtrsp->rt_delay_us = shortdelay_us;
438 }
439 if (!preempt_count() &&
440 !(torture_random(rrsp) % (nrealreaders * 500))) {
441 torture_preempt_schedule(); /* QS only if preemptible. */
442 rtrsp->rt_preempted = true;
443 }
444}
445
446static void rcu_torture_read_unlock(int idx)
447{
448 rcu_read_unlock();
449}
450
451/*
452 * Update callback in the pipe. This should be invoked after a grace period.
453 */
454static bool
455rcu_torture_pipe_update_one(struct rcu_torture *rp)
456{
457 int i;
458 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
459
460 if (rtrcp) {
461 WRITE_ONCE(rp->rtort_chkp, NULL);
462 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
463 }
464 i = READ_ONCE(rp->rtort_pipe_count);
465 if (i > RCU_TORTURE_PIPE_LEN)
466 i = RCU_TORTURE_PIPE_LEN;
467 atomic_inc(&rcu_torture_wcount[i]);
468 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
469 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
470 rp->rtort_mbtest = 0;
471 return true;
472 }
473 return false;
474}
475
476/*
477 * Update all callbacks in the pipe. Suitable for synchronous grace-period
478 * primitives.
479 */
480static void
481rcu_torture_pipe_update(struct rcu_torture *old_rp)
482{
483 struct rcu_torture *rp;
484 struct rcu_torture *rp1;
485
486 if (old_rp)
487 list_add(&old_rp->rtort_free, &rcu_torture_removed);
488 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
489 if (rcu_torture_pipe_update_one(rp)) {
490 list_del(&rp->rtort_free);
491 rcu_torture_free(rp);
492 }
493 }
494}
495
496static void
497rcu_torture_cb(struct rcu_head *p)
498{
499 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
500
501 if (torture_must_stop_irq()) {
502 /* Test is ending, just drop callbacks on the floor. */
503 /* The next initialization will pick up the pieces. */
504 return;
505 }
506 if (rcu_torture_pipe_update_one(rp))
507 rcu_torture_free(rp);
508 else
509 cur_ops->deferred_free(rp);
510}
511
512static unsigned long rcu_no_completed(void)
513{
514 return 0;
515}
516
517static void rcu_torture_deferred_free(struct rcu_torture *p)
518{
519 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
520}
521
522static void rcu_sync_torture_init(void)
523{
524 INIT_LIST_HEAD(&rcu_torture_removed);
525}
526
527static bool rcu_poll_need_2gp(bool poll, bool poll_full)
528{
529 return poll;
530}
531
532static struct rcu_torture_ops rcu_ops = {
533 .ttype = RCU_FLAVOR,
534 .init = rcu_sync_torture_init,
535 .readlock = rcu_torture_read_lock,
536 .read_delay = rcu_read_delay,
537 .readunlock = rcu_torture_read_unlock,
538 .readlock_held = torture_readlock_not_held,
539 .get_gp_seq = rcu_get_gp_seq,
540 .gp_diff = rcu_seq_diff,
541 .deferred_free = rcu_torture_deferred_free,
542 .sync = synchronize_rcu,
543 .exp_sync = synchronize_rcu_expedited,
544 .same_gp_state = same_state_synchronize_rcu,
545 .same_gp_state_full = same_state_synchronize_rcu_full,
546 .get_comp_state = get_completed_synchronize_rcu,
547 .get_comp_state_full = get_completed_synchronize_rcu_full,
548 .get_gp_state = get_state_synchronize_rcu,
549 .get_gp_state_full = get_state_synchronize_rcu_full,
550 .get_gp_completed = get_completed_synchronize_rcu,
551 .get_gp_completed_full = get_completed_synchronize_rcu_full,
552 .start_gp_poll = start_poll_synchronize_rcu,
553 .start_gp_poll_full = start_poll_synchronize_rcu_full,
554 .poll_gp_state = poll_state_synchronize_rcu,
555 .poll_gp_state_full = poll_state_synchronize_rcu_full,
556 .poll_need_2gp = rcu_poll_need_2gp,
557 .cond_sync = cond_synchronize_rcu,
558 .cond_sync_full = cond_synchronize_rcu_full,
559 .get_gp_state_exp = get_state_synchronize_rcu,
560 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
561 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
562 .poll_gp_state_exp = poll_state_synchronize_rcu,
563 .cond_sync_exp = cond_synchronize_rcu_expedited,
564 .call = call_rcu_hurry,
565 .cb_barrier = rcu_barrier,
566 .fqs = rcu_force_quiescent_state,
567 .stats = NULL,
568 .gp_kthread_dbg = show_rcu_gp_kthreads,
569 .check_boost_failed = rcu_check_boost_fail,
570 .stall_dur = rcu_jiffies_till_stall_check,
571 .irq_capable = 1,
572 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
573 .extendables = RCUTORTURE_MAX_EXTEND,
574 .name = "rcu"
575};
576
577/*
578 * Don't even think about trying any of these in real life!!!
579 * The names includes "busted", and they really means it!
580 * The only purpose of these functions is to provide a buggy RCU
581 * implementation to make sure that rcutorture correctly emits
582 * buggy-RCU error messages.
583 */
584static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
585{
586 /* This is a deliberate bug for testing purposes only! */
587 rcu_torture_cb(&p->rtort_rcu);
588}
589
590static void synchronize_rcu_busted(void)
591{
592 /* This is a deliberate bug for testing purposes only! */
593}
594
595static void
596call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
597{
598 /* This is a deliberate bug for testing purposes only! */
599 func(head);
600}
601
602static struct rcu_torture_ops rcu_busted_ops = {
603 .ttype = INVALID_RCU_FLAVOR,
604 .init = rcu_sync_torture_init,
605 .readlock = rcu_torture_read_lock,
606 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
607 .readunlock = rcu_torture_read_unlock,
608 .readlock_held = torture_readlock_not_held,
609 .get_gp_seq = rcu_no_completed,
610 .deferred_free = rcu_busted_torture_deferred_free,
611 .sync = synchronize_rcu_busted,
612 .exp_sync = synchronize_rcu_busted,
613 .call = call_rcu_busted,
614 .cb_barrier = NULL,
615 .fqs = NULL,
616 .stats = NULL,
617 .irq_capable = 1,
618 .name = "busted"
619};
620
621/*
622 * Definitions for srcu torture testing.
623 */
624
625DEFINE_STATIC_SRCU(srcu_ctl);
626static struct srcu_struct srcu_ctld;
627static struct srcu_struct *srcu_ctlp = &srcu_ctl;
628static struct rcu_torture_ops srcud_ops;
629
630static int srcu_torture_read_lock(void)
631{
632 if (cur_ops == &srcud_ops)
633 return srcu_read_lock_nmisafe(srcu_ctlp);
634 else
635 return srcu_read_lock(srcu_ctlp);
636}
637
638static void
639srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
640{
641 long delay;
642 const long uspertick = 1000000 / HZ;
643 const long longdelay = 10;
644
645 /* We want there to be long-running readers, but not all the time. */
646
647 delay = torture_random(rrsp) %
648 (nrealreaders * 2 * longdelay * uspertick);
649 if (!delay && in_task()) {
650 schedule_timeout_interruptible(longdelay);
651 rtrsp->rt_delay_jiffies = longdelay;
652 } else {
653 rcu_read_delay(rrsp, rtrsp);
654 }
655}
656
657static void srcu_torture_read_unlock(int idx)
658{
659 if (cur_ops == &srcud_ops)
660 srcu_read_unlock_nmisafe(srcu_ctlp, idx);
661 else
662 srcu_read_unlock(srcu_ctlp, idx);
663}
664
665static int torture_srcu_read_lock_held(void)
666{
667 return srcu_read_lock_held(srcu_ctlp);
668}
669
670static unsigned long srcu_torture_completed(void)
671{
672 return srcu_batches_completed(srcu_ctlp);
673}
674
675static void srcu_torture_deferred_free(struct rcu_torture *rp)
676{
677 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
678}
679
680static void srcu_torture_synchronize(void)
681{
682 synchronize_srcu(srcu_ctlp);
683}
684
685static unsigned long srcu_torture_get_gp_state(void)
686{
687 return get_state_synchronize_srcu(srcu_ctlp);
688}
689
690static unsigned long srcu_torture_start_gp_poll(void)
691{
692 return start_poll_synchronize_srcu(srcu_ctlp);
693}
694
695static bool srcu_torture_poll_gp_state(unsigned long oldstate)
696{
697 return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
698}
699
700static void srcu_torture_call(struct rcu_head *head,
701 rcu_callback_t func)
702{
703 call_srcu(srcu_ctlp, head, func);
704}
705
706static void srcu_torture_barrier(void)
707{
708 srcu_barrier(srcu_ctlp);
709}
710
711static void srcu_torture_stats(void)
712{
713 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
714}
715
716static void srcu_torture_synchronize_expedited(void)
717{
718 synchronize_srcu_expedited(srcu_ctlp);
719}
720
721static struct rcu_torture_ops srcu_ops = {
722 .ttype = SRCU_FLAVOR,
723 .init = rcu_sync_torture_init,
724 .readlock = srcu_torture_read_lock,
725 .read_delay = srcu_read_delay,
726 .readunlock = srcu_torture_read_unlock,
727 .readlock_held = torture_srcu_read_lock_held,
728 .get_gp_seq = srcu_torture_completed,
729 .deferred_free = srcu_torture_deferred_free,
730 .sync = srcu_torture_synchronize,
731 .exp_sync = srcu_torture_synchronize_expedited,
732 .get_gp_state = srcu_torture_get_gp_state,
733 .start_gp_poll = srcu_torture_start_gp_poll,
734 .poll_gp_state = srcu_torture_poll_gp_state,
735 .call = srcu_torture_call,
736 .cb_barrier = srcu_torture_barrier,
737 .stats = srcu_torture_stats,
738 .cbflood_max = 50000,
739 .irq_capable = 1,
740 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
741 .name = "srcu"
742};
743
744static void srcu_torture_init(void)
745{
746 rcu_sync_torture_init();
747 WARN_ON(init_srcu_struct(&srcu_ctld));
748 srcu_ctlp = &srcu_ctld;
749}
750
751static void srcu_torture_cleanup(void)
752{
753 cleanup_srcu_struct(&srcu_ctld);
754 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
755}
756
757/* As above, but dynamically allocated. */
758static struct rcu_torture_ops srcud_ops = {
759 .ttype = SRCU_FLAVOR,
760 .init = srcu_torture_init,
761 .cleanup = srcu_torture_cleanup,
762 .readlock = srcu_torture_read_lock,
763 .read_delay = srcu_read_delay,
764 .readunlock = srcu_torture_read_unlock,
765 .readlock_held = torture_srcu_read_lock_held,
766 .get_gp_seq = srcu_torture_completed,
767 .deferred_free = srcu_torture_deferred_free,
768 .sync = srcu_torture_synchronize,
769 .exp_sync = srcu_torture_synchronize_expedited,
770 .get_gp_state = srcu_torture_get_gp_state,
771 .start_gp_poll = srcu_torture_start_gp_poll,
772 .poll_gp_state = srcu_torture_poll_gp_state,
773 .call = srcu_torture_call,
774 .cb_barrier = srcu_torture_barrier,
775 .stats = srcu_torture_stats,
776 .cbflood_max = 50000,
777 .irq_capable = 1,
778 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
779 .name = "srcud"
780};
781
782/* As above, but broken due to inappropriate reader extension. */
783static struct rcu_torture_ops busted_srcud_ops = {
784 .ttype = SRCU_FLAVOR,
785 .init = srcu_torture_init,
786 .cleanup = srcu_torture_cleanup,
787 .readlock = srcu_torture_read_lock,
788 .read_delay = rcu_read_delay,
789 .readunlock = srcu_torture_read_unlock,
790 .readlock_held = torture_srcu_read_lock_held,
791 .get_gp_seq = srcu_torture_completed,
792 .deferred_free = srcu_torture_deferred_free,
793 .sync = srcu_torture_synchronize,
794 .exp_sync = srcu_torture_synchronize_expedited,
795 .call = srcu_torture_call,
796 .cb_barrier = srcu_torture_barrier,
797 .stats = srcu_torture_stats,
798 .irq_capable = 1,
799 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
800 .extendables = RCUTORTURE_MAX_EXTEND,
801 .name = "busted_srcud"
802};
803
804/*
805 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
806 * This implementation does not necessarily work well with CPU hotplug.
807 */
808
809static void synchronize_rcu_trivial(void)
810{
811 int cpu;
812
813 for_each_online_cpu(cpu) {
814 torture_sched_setaffinity(current->pid, cpumask_of(cpu));
815 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
816 }
817}
818
819static int rcu_torture_read_lock_trivial(void)
820{
821 preempt_disable();
822 return 0;
823}
824
825static void rcu_torture_read_unlock_trivial(int idx)
826{
827 preempt_enable();
828}
829
830static struct rcu_torture_ops trivial_ops = {
831 .ttype = RCU_TRIVIAL_FLAVOR,
832 .init = rcu_sync_torture_init,
833 .readlock = rcu_torture_read_lock_trivial,
834 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
835 .readunlock = rcu_torture_read_unlock_trivial,
836 .readlock_held = torture_readlock_not_held,
837 .get_gp_seq = rcu_no_completed,
838 .sync = synchronize_rcu_trivial,
839 .exp_sync = synchronize_rcu_trivial,
840 .fqs = NULL,
841 .stats = NULL,
842 .irq_capable = 1,
843 .name = "trivial"
844};
845
846#ifdef CONFIG_TASKS_RCU
847
848/*
849 * Definitions for RCU-tasks torture testing.
850 */
851
852static int tasks_torture_read_lock(void)
853{
854 return 0;
855}
856
857static void tasks_torture_read_unlock(int idx)
858{
859}
860
861static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
862{
863 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
864}
865
866static void synchronize_rcu_mult_test(void)
867{
868 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
869}
870
871static struct rcu_torture_ops tasks_ops = {
872 .ttype = RCU_TASKS_FLAVOR,
873 .init = rcu_sync_torture_init,
874 .readlock = tasks_torture_read_lock,
875 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
876 .readunlock = tasks_torture_read_unlock,
877 .get_gp_seq = rcu_no_completed,
878 .deferred_free = rcu_tasks_torture_deferred_free,
879 .sync = synchronize_rcu_tasks,
880 .exp_sync = synchronize_rcu_mult_test,
881 .call = call_rcu_tasks,
882 .cb_barrier = rcu_barrier_tasks,
883 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
884 .fqs = NULL,
885 .stats = NULL,
886 .irq_capable = 1,
887 .slow_gps = 1,
888 .name = "tasks"
889};
890
891#define TASKS_OPS &tasks_ops,
892
893#else // #ifdef CONFIG_TASKS_RCU
894
895#define TASKS_OPS
896
897#endif // #else #ifdef CONFIG_TASKS_RCU
898
899
900#ifdef CONFIG_TASKS_RUDE_RCU
901
902/*
903 * Definitions for rude RCU-tasks torture testing.
904 */
905
906static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
907{
908 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
909}
910
911static struct rcu_torture_ops tasks_rude_ops = {
912 .ttype = RCU_TASKS_RUDE_FLAVOR,
913 .init = rcu_sync_torture_init,
914 .readlock = rcu_torture_read_lock_trivial,
915 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
916 .readunlock = rcu_torture_read_unlock_trivial,
917 .get_gp_seq = rcu_no_completed,
918 .deferred_free = rcu_tasks_rude_torture_deferred_free,
919 .sync = synchronize_rcu_tasks_rude,
920 .exp_sync = synchronize_rcu_tasks_rude,
921 .call = call_rcu_tasks_rude,
922 .cb_barrier = rcu_barrier_tasks_rude,
923 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
924 .cbflood_max = 50000,
925 .fqs = NULL,
926 .stats = NULL,
927 .irq_capable = 1,
928 .name = "tasks-rude"
929};
930
931#define TASKS_RUDE_OPS &tasks_rude_ops,
932
933#else // #ifdef CONFIG_TASKS_RUDE_RCU
934
935#define TASKS_RUDE_OPS
936
937#endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
938
939
940#ifdef CONFIG_TASKS_TRACE_RCU
941
942/*
943 * Definitions for tracing RCU-tasks torture testing.
944 */
945
946static int tasks_tracing_torture_read_lock(void)
947{
948 rcu_read_lock_trace();
949 return 0;
950}
951
952static void tasks_tracing_torture_read_unlock(int idx)
953{
954 rcu_read_unlock_trace();
955}
956
957static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
958{
959 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
960}
961
962static struct rcu_torture_ops tasks_tracing_ops = {
963 .ttype = RCU_TASKS_TRACING_FLAVOR,
964 .init = rcu_sync_torture_init,
965 .readlock = tasks_tracing_torture_read_lock,
966 .read_delay = srcu_read_delay, /* just reuse srcu's version. */
967 .readunlock = tasks_tracing_torture_read_unlock,
968 .readlock_held = rcu_read_lock_trace_held,
969 .get_gp_seq = rcu_no_completed,
970 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
971 .sync = synchronize_rcu_tasks_trace,
972 .exp_sync = synchronize_rcu_tasks_trace,
973 .call = call_rcu_tasks_trace,
974 .cb_barrier = rcu_barrier_tasks_trace,
975 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
976 .cbflood_max = 50000,
977 .fqs = NULL,
978 .stats = NULL,
979 .irq_capable = 1,
980 .slow_gps = 1,
981 .name = "tasks-tracing"
982};
983
984#define TASKS_TRACING_OPS &tasks_tracing_ops,
985
986#else // #ifdef CONFIG_TASKS_TRACE_RCU
987
988#define TASKS_TRACING_OPS
989
990#endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
991
992
993static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
994{
995 if (!cur_ops->gp_diff)
996 return new - old;
997 return cur_ops->gp_diff(new, old);
998}
999
1000/*
1001 * RCU torture priority-boost testing. Runs one real-time thread per
1002 * CPU for moderate bursts, repeatedly starting grace periods and waiting
1003 * for them to complete. If a given grace period takes too long, we assume
1004 * that priority inversion has occurred.
1005 */
1006
1007static int old_rt_runtime = -1;
1008
1009static void rcu_torture_disable_rt_throttle(void)
1010{
1011 /*
1012 * Disable RT throttling so that rcutorture's boost threads don't get
1013 * throttled. Only possible if rcutorture is built-in otherwise the
1014 * user should manually do this by setting the sched_rt_period_us and
1015 * sched_rt_runtime sysctls.
1016 */
1017 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
1018 return;
1019
1020 old_rt_runtime = sysctl_sched_rt_runtime;
1021 sysctl_sched_rt_runtime = -1;
1022}
1023
1024static void rcu_torture_enable_rt_throttle(void)
1025{
1026 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
1027 return;
1028
1029 sysctl_sched_rt_runtime = old_rt_runtime;
1030 old_rt_runtime = -1;
1031}
1032
1033static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
1034{
1035 int cpu;
1036 static int dbg_done;
1037 unsigned long end = jiffies;
1038 bool gp_done;
1039 unsigned long j;
1040 static unsigned long last_persist;
1041 unsigned long lp;
1042 unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
1043
1044 if (end - *start > mininterval) {
1045 // Recheck after checking time to avoid false positives.
1046 smp_mb(); // Time check before grace-period check.
1047 if (cur_ops->poll_gp_state(gp_state))
1048 return false; // passed, though perhaps just barely
1049 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
1050 // At most one persisted message per boost test.
1051 j = jiffies;
1052 lp = READ_ONCE(last_persist);
1053 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
1054 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
1055 return false; // passed on a technicality
1056 }
1057 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
1058 n_rcu_torture_boost_failure++;
1059 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
1060 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1061 current->rt_priority, gp_state, end - *start);
1062 cur_ops->gp_kthread_dbg();
1063 // Recheck after print to flag grace period ending during splat.
1064 gp_done = cur_ops->poll_gp_state(gp_state);
1065 pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1066 gp_done ? "ended already" : "still pending");
1067
1068 }
1069
1070 return true; // failed
1071 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1072 *start = jiffies;
1073 }
1074
1075 return false; // passed
1076}
1077
1078static int rcu_torture_boost(void *arg)
1079{
1080 unsigned long endtime;
1081 unsigned long gp_state;
1082 unsigned long gp_state_time;
1083 unsigned long oldstarttime;
1084
1085 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1086
1087 /* Set real-time priority. */
1088 sched_set_fifo_low(current);
1089
1090 /* Each pass through the following loop does one boost-test cycle. */
1091 do {
1092 bool failed = false; // Test failed already in this test interval
1093 bool gp_initiated = false;
1094
1095 if (kthread_should_stop())
1096 goto checkwait;
1097
1098 /* Wait for the next test interval. */
1099 oldstarttime = READ_ONCE(boost_starttime);
1100 while (time_before(jiffies, oldstarttime)) {
1101 schedule_timeout_interruptible(oldstarttime - jiffies);
1102 if (stutter_wait("rcu_torture_boost"))
1103 sched_set_fifo_low(current);
1104 if (torture_must_stop())
1105 goto checkwait;
1106 }
1107
1108 // Do one boost-test interval.
1109 endtime = oldstarttime + test_boost_duration * HZ;
1110 while (time_before(jiffies, endtime)) {
1111 // Has current GP gone too long?
1112 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1113 failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1114 // If we don't have a grace period in flight, start one.
1115 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1116 gp_state = cur_ops->start_gp_poll();
1117 gp_initiated = true;
1118 gp_state_time = jiffies;
1119 }
1120 if (stutter_wait("rcu_torture_boost")) {
1121 sched_set_fifo_low(current);
1122 // If the grace period already ended,
1123 // we don't know when that happened, so
1124 // start over.
1125 if (cur_ops->poll_gp_state(gp_state))
1126 gp_initiated = false;
1127 }
1128 if (torture_must_stop())
1129 goto checkwait;
1130 }
1131
1132 // In case the grace period extended beyond the end of the loop.
1133 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1134 rcu_torture_boost_failed(gp_state, &gp_state_time);
1135
1136 /*
1137 * Set the start time of the next test interval.
1138 * Yes, this is vulnerable to long delays, but such
1139 * delays simply cause a false negative for the next
1140 * interval. Besides, we are running at RT priority,
1141 * so delays should be relatively rare.
1142 */
1143 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
1144 if (mutex_trylock(&boost_mutex)) {
1145 if (oldstarttime == boost_starttime) {
1146 WRITE_ONCE(boost_starttime,
1147 jiffies + test_boost_interval * HZ);
1148 n_rcu_torture_boosts++;
1149 }
1150 mutex_unlock(&boost_mutex);
1151 break;
1152 }
1153 schedule_timeout_uninterruptible(HZ / 20);
1154 }
1155
1156 /* Go do the stutter. */
1157checkwait: if (stutter_wait("rcu_torture_boost"))
1158 sched_set_fifo_low(current);
1159 } while (!torture_must_stop());
1160
1161 /* Clean up and exit. */
1162 while (!kthread_should_stop()) {
1163 torture_shutdown_absorb("rcu_torture_boost");
1164 schedule_timeout_uninterruptible(HZ / 20);
1165 }
1166 torture_kthread_stopping("rcu_torture_boost");
1167 return 0;
1168}
1169
1170/*
1171 * RCU torture force-quiescent-state kthread. Repeatedly induces
1172 * bursts of calls to force_quiescent_state(), increasing the probability
1173 * of occurrence of some important types of race conditions.
1174 */
1175static int
1176rcu_torture_fqs(void *arg)
1177{
1178 unsigned long fqs_resume_time;
1179 int fqs_burst_remaining;
1180 int oldnice = task_nice(current);
1181
1182 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1183 do {
1184 fqs_resume_time = jiffies + fqs_stutter * HZ;
1185 while (time_before(jiffies, fqs_resume_time) &&
1186 !kthread_should_stop()) {
1187 schedule_timeout_interruptible(HZ / 20);
1188 }
1189 fqs_burst_remaining = fqs_duration;
1190 while (fqs_burst_remaining > 0 &&
1191 !kthread_should_stop()) {
1192 cur_ops->fqs();
1193 udelay(fqs_holdoff);
1194 fqs_burst_remaining -= fqs_holdoff;
1195 }
1196 if (stutter_wait("rcu_torture_fqs"))
1197 sched_set_normal(current, oldnice);
1198 } while (!torture_must_stop());
1199 torture_kthread_stopping("rcu_torture_fqs");
1200 return 0;
1201}
1202
1203// Used by writers to randomly choose from the available grace-period primitives.
1204static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
1205static int nsynctypes;
1206
1207/*
1208 * Determine which grace-period primitives are available.
1209 */
1210static void rcu_torture_write_types(void)
1211{
1212 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
1213 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
1214 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
1215 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
1216
1217 /* Initialize synctype[] array. If none set, take default. */
1218 if (!gp_cond1 &&
1219 !gp_cond_exp1 &&
1220 !gp_cond_full1 &&
1221 !gp_cond_exp_full1 &&
1222 !gp_exp1 &&
1223 !gp_poll_exp1 &&
1224 !gp_poll_exp_full1 &&
1225 !gp_normal1 &&
1226 !gp_poll1 &&
1227 !gp_poll_full1 &&
1228 !gp_sync1) {
1229 gp_cond1 = true;
1230 gp_cond_exp1 = true;
1231 gp_cond_full1 = true;
1232 gp_cond_exp_full1 = true;
1233 gp_exp1 = true;
1234 gp_poll_exp1 = true;
1235 gp_poll_exp_full1 = true;
1236 gp_normal1 = true;
1237 gp_poll1 = true;
1238 gp_poll_full1 = true;
1239 gp_sync1 = true;
1240 }
1241 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1242 synctype[nsynctypes++] = RTWS_COND_GET;
1243 pr_info("%s: Testing conditional GPs.\n", __func__);
1244 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1245 pr_alert("%s: gp_cond without primitives.\n", __func__);
1246 }
1247 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1248 synctype[nsynctypes++] = RTWS_COND_GET_EXP;
1249 pr_info("%s: Testing conditional expedited GPs.\n", __func__);
1250 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1251 pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
1252 }
1253 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
1254 synctype[nsynctypes++] = RTWS_COND_GET_FULL;
1255 pr_info("%s: Testing conditional full-state GPs.\n", __func__);
1256 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
1257 pr_alert("%s: gp_cond_full without primitives.\n", __func__);
1258 }
1259 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
1260 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
1261 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
1262 } else if (gp_cond_exp_full &&
1263 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
1264 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
1265 }
1266 if (gp_exp1 && cur_ops->exp_sync) {
1267 synctype[nsynctypes++] = RTWS_EXP_SYNC;
1268 pr_info("%s: Testing expedited GPs.\n", __func__);
1269 } else if (gp_exp && !cur_ops->exp_sync) {
1270 pr_alert("%s: gp_exp without primitives.\n", __func__);
1271 }
1272 if (gp_normal1 && cur_ops->deferred_free) {
1273 synctype[nsynctypes++] = RTWS_DEF_FREE;
1274 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1275 } else if (gp_normal && !cur_ops->deferred_free) {
1276 pr_alert("%s: gp_normal without primitives.\n", __func__);
1277 }
1278 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
1279 cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1280 synctype[nsynctypes++] = RTWS_POLL_GET;
1281 pr_info("%s: Testing polling GPs.\n", __func__);
1282 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1283 pr_alert("%s: gp_poll without primitives.\n", __func__);
1284 }
1285 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
1286 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
1287 synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
1288 pr_info("%s: Testing polling full-state GPs.\n", __func__);
1289 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
1290 pr_alert("%s: gp_poll_full without primitives.\n", __func__);
1291 }
1292 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1293 synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
1294 pr_info("%s: Testing polling expedited GPs.\n", __func__);
1295 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1296 pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
1297 }
1298 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
1299 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
1300 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
1301 } else if (gp_poll_exp_full &&
1302 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
1303 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
1304 }
1305 if (gp_sync1 && cur_ops->sync) {
1306 synctype[nsynctypes++] = RTWS_SYNC;
1307 pr_info("%s: Testing normal GPs.\n", __func__);
1308 } else if (gp_sync && !cur_ops->sync) {
1309 pr_alert("%s: gp_sync without primitives.\n", __func__);
1310 }
1311}
1312
1313/*
1314 * Do the specified rcu_torture_writer() synchronous grace period,
1315 * while also testing out the polled APIs. Note well that the single-CPU
1316 * grace-period optimizations must be accounted for.
1317 */
1318static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
1319{
1320 unsigned long cookie;
1321 struct rcu_gp_oldstate cookie_full;
1322 bool dopoll;
1323 bool dopoll_full;
1324 unsigned long r = torture_random(trsp);
1325
1326 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
1327 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
1328 if (dopoll || dopoll_full)
1329 cpus_read_lock();
1330 if (dopoll)
1331 cookie = cur_ops->get_gp_state();
1332 if (dopoll_full)
1333 cur_ops->get_gp_state_full(&cookie_full);
1334 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
1335 sync();
1336 sync();
1337 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
1338 "%s: Cookie check 3 failed %pS() online %*pbl.",
1339 __func__, sync, cpumask_pr_args(cpu_online_mask));
1340 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
1341 "%s: Cookie check 4 failed %pS() online %*pbl",
1342 __func__, sync, cpumask_pr_args(cpu_online_mask));
1343 if (dopoll || dopoll_full)
1344 cpus_read_unlock();
1345}
1346
1347/*
1348 * RCU torture writer kthread. Repeatedly substitutes a new structure
1349 * for that pointed to by rcu_torture_current, freeing the old structure
1350 * after a series of grace periods (the "pipeline").
1351 */
1352static int
1353rcu_torture_writer(void *arg)
1354{
1355 bool boot_ended;
1356 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1357 unsigned long cookie;
1358 struct rcu_gp_oldstate cookie_full;
1359 int expediting = 0;
1360 unsigned long gp_snap;
1361 unsigned long gp_snap1;
1362 struct rcu_gp_oldstate gp_snap_full;
1363 struct rcu_gp_oldstate gp_snap1_full;
1364 int i;
1365 int idx;
1366 int oldnice = task_nice(current);
1367 struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE];
1368 struct rcu_torture *rp;
1369 struct rcu_torture *old_rp;
1370 static DEFINE_TORTURE_RANDOM(rand);
1371 unsigned long stallsdone = jiffies;
1372 bool stutter_waited;
1373 unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE];
1374
1375 // If a new stall test is added, this must be adjusted.
1376 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
1377 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * HZ;
1378 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1379 if (!can_expedite)
1380 pr_alert("%s" TORTURE_FLAG
1381 " GP expediting controlled from boot/sysfs for %s.\n",
1382 torture_type, cur_ops->name);
1383 if (WARN_ONCE(nsynctypes == 0,
1384 "%s: No update-side primitives.\n", __func__)) {
1385 /*
1386 * No updates primitives, so don't try updating.
1387 * The resulting test won't be testing much, hence the
1388 * above WARN_ONCE().
1389 */
1390 rcu_torture_writer_state = RTWS_STOPPING;
1391 torture_kthread_stopping("rcu_torture_writer");
1392 return 0;
1393 }
1394
1395 do {
1396 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1397 torture_hrtimeout_us(500, 1000, &rand);
1398 rp = rcu_torture_alloc();
1399 if (rp == NULL)
1400 continue;
1401 rp->rtort_pipe_count = 0;
1402 rcu_torture_writer_state = RTWS_DELAY;
1403 udelay(torture_random(&rand) & 0x3ff);
1404 rcu_torture_writer_state = RTWS_REPLACE;
1405 old_rp = rcu_dereference_check(rcu_torture_current,
1406 current == writer_task);
1407 rp->rtort_mbtest = 1;
1408 rcu_assign_pointer(rcu_torture_current, rp);
1409 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1410 if (old_rp) {
1411 i = old_rp->rtort_pipe_count;
1412 if (i > RCU_TORTURE_PIPE_LEN)
1413 i = RCU_TORTURE_PIPE_LEN;
1414 atomic_inc(&rcu_torture_wcount[i]);
1415 WRITE_ONCE(old_rp->rtort_pipe_count,
1416 old_rp->rtort_pipe_count + 1);
1417
1418 // Make sure readers block polled grace periods.
1419 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1420 idx = cur_ops->readlock();
1421 cookie = cur_ops->get_gp_state();
1422 WARN_ONCE(cur_ops->poll_gp_state(cookie),
1423 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1424 __func__,
1425 rcu_torture_writer_state_getname(),
1426 rcu_torture_writer_state,
1427 cookie, cur_ops->get_gp_state());
1428 if (cur_ops->get_gp_completed) {
1429 cookie = cur_ops->get_gp_completed();
1430 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1431 }
1432 cur_ops->readunlock(idx);
1433 }
1434 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
1435 idx = cur_ops->readlock();
1436 cur_ops->get_gp_state_full(&cookie_full);
1437 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
1438 "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
1439 __func__,
1440 rcu_torture_writer_state_getname(),
1441 rcu_torture_writer_state,
1442 cpumask_pr_args(cpu_online_mask));
1443 if (cur_ops->get_gp_completed_full) {
1444 cur_ops->get_gp_completed_full(&cookie_full);
1445 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
1446 }
1447 cur_ops->readunlock(idx);
1448 }
1449 switch (synctype[torture_random(&rand) % nsynctypes]) {
1450 case RTWS_DEF_FREE:
1451 rcu_torture_writer_state = RTWS_DEF_FREE;
1452 cur_ops->deferred_free(old_rp);
1453 break;
1454 case RTWS_EXP_SYNC:
1455 rcu_torture_writer_state = RTWS_EXP_SYNC;
1456 do_rtws_sync(&rand, cur_ops->exp_sync);
1457 rcu_torture_pipe_update(old_rp);
1458 break;
1459 case RTWS_COND_GET:
1460 rcu_torture_writer_state = RTWS_COND_GET;
1461 gp_snap = cur_ops->get_gp_state();
1462 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1463 rcu_torture_writer_state = RTWS_COND_SYNC;
1464 cur_ops->cond_sync(gp_snap);
1465 rcu_torture_pipe_update(old_rp);
1466 break;
1467 case RTWS_COND_GET_EXP:
1468 rcu_torture_writer_state = RTWS_COND_GET_EXP;
1469 gp_snap = cur_ops->get_gp_state_exp();
1470 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1471 rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
1472 cur_ops->cond_sync_exp(gp_snap);
1473 rcu_torture_pipe_update(old_rp);
1474 break;
1475 case RTWS_COND_GET_FULL:
1476 rcu_torture_writer_state = RTWS_COND_GET_FULL;
1477 cur_ops->get_gp_state_full(&gp_snap_full);
1478 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1479 rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
1480 cur_ops->cond_sync_full(&gp_snap_full);
1481 rcu_torture_pipe_update(old_rp);
1482 break;
1483 case RTWS_COND_GET_EXP_FULL:
1484 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
1485 cur_ops->get_gp_state_full(&gp_snap_full);
1486 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1487 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
1488 cur_ops->cond_sync_exp_full(&gp_snap_full);
1489 rcu_torture_pipe_update(old_rp);
1490 break;
1491 case RTWS_POLL_GET:
1492 rcu_torture_writer_state = RTWS_POLL_GET;
1493 for (i = 0; i < ARRAY_SIZE(ulo); i++)
1494 ulo[i] = cur_ops->get_comp_state();
1495 gp_snap = cur_ops->start_gp_poll();
1496 rcu_torture_writer_state = RTWS_POLL_WAIT;
1497 while (!cur_ops->poll_gp_state(gp_snap)) {
1498 gp_snap1 = cur_ops->get_gp_state();
1499 for (i = 0; i < ARRAY_SIZE(ulo); i++)
1500 if (cur_ops->poll_gp_state(ulo[i]) ||
1501 cur_ops->same_gp_state(ulo[i], gp_snap1)) {
1502 ulo[i] = gp_snap1;
1503 break;
1504 }
1505 WARN_ON_ONCE(i >= ARRAY_SIZE(ulo));
1506 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1507 &rand);
1508 }
1509 rcu_torture_pipe_update(old_rp);
1510 break;
1511 case RTWS_POLL_GET_FULL:
1512 rcu_torture_writer_state = RTWS_POLL_GET_FULL;
1513 for (i = 0; i < ARRAY_SIZE(rgo); i++)
1514 cur_ops->get_comp_state_full(&rgo[i]);
1515 cur_ops->start_gp_poll_full(&gp_snap_full);
1516 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
1517 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1518 cur_ops->get_gp_state_full(&gp_snap1_full);
1519 for (i = 0; i < ARRAY_SIZE(rgo); i++)
1520 if (cur_ops->poll_gp_state_full(&rgo[i]) ||
1521 cur_ops->same_gp_state_full(&rgo[i],
1522 &gp_snap1_full)) {
1523 rgo[i] = gp_snap1_full;
1524 break;
1525 }
1526 WARN_ON_ONCE(i >= ARRAY_SIZE(rgo));
1527 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1528 &rand);
1529 }
1530 rcu_torture_pipe_update(old_rp);
1531 break;
1532 case RTWS_POLL_GET_EXP:
1533 rcu_torture_writer_state = RTWS_POLL_GET_EXP;
1534 gp_snap = cur_ops->start_gp_poll_exp();
1535 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
1536 while (!cur_ops->poll_gp_state_exp(gp_snap))
1537 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1538 &rand);
1539 rcu_torture_pipe_update(old_rp);
1540 break;
1541 case RTWS_POLL_GET_EXP_FULL:
1542 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
1543 cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1544 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
1545 while (!cur_ops->poll_gp_state_full(&gp_snap_full))
1546 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1547 &rand);
1548 rcu_torture_pipe_update(old_rp);
1549 break;
1550 case RTWS_SYNC:
1551 rcu_torture_writer_state = RTWS_SYNC;
1552 do_rtws_sync(&rand, cur_ops->sync);
1553 rcu_torture_pipe_update(old_rp);
1554 break;
1555 default:
1556 WARN_ON_ONCE(1);
1557 break;
1558 }
1559 }
1560 WRITE_ONCE(rcu_torture_current_version,
1561 rcu_torture_current_version + 1);
1562 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1563 if (can_expedite &&
1564 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1565 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1566 if (expediting >= 0)
1567 rcu_expedite_gp();
1568 else
1569 rcu_unexpedite_gp();
1570 if (++expediting > 3)
1571 expediting = -expediting;
1572 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1573 can_expedite = !rcu_gp_is_expedited() &&
1574 !rcu_gp_is_normal();
1575 }
1576 rcu_torture_writer_state = RTWS_STUTTER;
1577 boot_ended = rcu_inkernel_boot_has_ended();
1578 stutter_waited = stutter_wait("rcu_torture_writer");
1579 if (stutter_waited &&
1580 !atomic_read(&rcu_fwd_cb_nodelay) &&
1581 !cur_ops->slow_gps &&
1582 !torture_must_stop() &&
1583 boot_ended &&
1584 time_after(jiffies, stallsdone))
1585 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1586 if (list_empty(&rcu_tortures[i].rtort_free) &&
1587 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) {
1588 tracing_off();
1589 show_rcu_gp_kthreads();
1590 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1591 rcu_ftrace_dump(DUMP_ALL);
1592 }
1593 if (stutter_waited)
1594 sched_set_normal(current, oldnice);
1595 } while (!torture_must_stop());
1596 rcu_torture_current = NULL; // Let stats task know that we are done.
1597 /* Reset expediting back to unexpedited. */
1598 if (expediting > 0)
1599 expediting = -expediting;
1600 while (can_expedite && expediting++ < 0)
1601 rcu_unexpedite_gp();
1602 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1603 if (!can_expedite)
1604 pr_alert("%s" TORTURE_FLAG
1605 " Dynamic grace-period expediting was disabled.\n",
1606 torture_type);
1607 rcu_torture_writer_state = RTWS_STOPPING;
1608 torture_kthread_stopping("rcu_torture_writer");
1609 return 0;
1610}
1611
1612/*
1613 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1614 * delay between calls.
1615 */
1616static int
1617rcu_torture_fakewriter(void *arg)
1618{
1619 unsigned long gp_snap;
1620 struct rcu_gp_oldstate gp_snap_full;
1621 DEFINE_TORTURE_RANDOM(rand);
1622
1623 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1624 set_user_nice(current, MAX_NICE);
1625
1626 if (WARN_ONCE(nsynctypes == 0,
1627 "%s: No update-side primitives.\n", __func__)) {
1628 /*
1629 * No updates primitives, so don't try updating.
1630 * The resulting test won't be testing much, hence the
1631 * above WARN_ONCE().
1632 */
1633 torture_kthread_stopping("rcu_torture_fakewriter");
1634 return 0;
1635 }
1636
1637 do {
1638 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1639 if (cur_ops->cb_barrier != NULL &&
1640 torture_random(&rand) % (nfakewriters * 8) == 0) {
1641 cur_ops->cb_barrier();
1642 } else {
1643 switch (synctype[torture_random(&rand) % nsynctypes]) {
1644 case RTWS_DEF_FREE:
1645 break;
1646 case RTWS_EXP_SYNC:
1647 cur_ops->exp_sync();
1648 break;
1649 case RTWS_COND_GET:
1650 gp_snap = cur_ops->get_gp_state();
1651 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1652 cur_ops->cond_sync(gp_snap);
1653 break;
1654 case RTWS_COND_GET_EXP:
1655 gp_snap = cur_ops->get_gp_state_exp();
1656 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1657 cur_ops->cond_sync_exp(gp_snap);
1658 break;
1659 case RTWS_COND_GET_FULL:
1660 cur_ops->get_gp_state_full(&gp_snap_full);
1661 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1662 cur_ops->cond_sync_full(&gp_snap_full);
1663 break;
1664 case RTWS_COND_GET_EXP_FULL:
1665 cur_ops->get_gp_state_full(&gp_snap_full);
1666 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1667 cur_ops->cond_sync_exp_full(&gp_snap_full);
1668 break;
1669 case RTWS_POLL_GET:
1670 gp_snap = cur_ops->start_gp_poll();
1671 while (!cur_ops->poll_gp_state(gp_snap)) {
1672 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1673 &rand);
1674 }
1675 break;
1676 case RTWS_POLL_GET_FULL:
1677 cur_ops->start_gp_poll_full(&gp_snap_full);
1678 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1679 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1680 &rand);
1681 }
1682 break;
1683 case RTWS_POLL_GET_EXP:
1684 gp_snap = cur_ops->start_gp_poll_exp();
1685 while (!cur_ops->poll_gp_state_exp(gp_snap)) {
1686 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1687 &rand);
1688 }
1689 break;
1690 case RTWS_POLL_GET_EXP_FULL:
1691 cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1692 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1693 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1694 &rand);
1695 }
1696 break;
1697 case RTWS_SYNC:
1698 cur_ops->sync();
1699 break;
1700 default:
1701 WARN_ON_ONCE(1);
1702 break;
1703 }
1704 }
1705 stutter_wait("rcu_torture_fakewriter");
1706 } while (!torture_must_stop());
1707
1708 torture_kthread_stopping("rcu_torture_fakewriter");
1709 return 0;
1710}
1711
1712static void rcu_torture_timer_cb(struct rcu_head *rhp)
1713{
1714 kfree(rhp);
1715}
1716
1717// Set up and carry out testing of RCU's global memory ordering
1718static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1719 struct torture_random_state *trsp)
1720{
1721 unsigned long loops;
1722 int noc = torture_num_online_cpus();
1723 int rdrchked;
1724 int rdrchker;
1725 struct rcu_torture_reader_check *rtrcp; // Me.
1726 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1727 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1728 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1729
1730 if (myid < 0)
1731 return; // Don't try this from timer handlers.
1732
1733 // Increment my counter.
1734 rtrcp = &rcu_torture_reader_mbchk[myid];
1735 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1736
1737 // Attempt to assign someone else some checking work.
1738 rdrchked = torture_random(trsp) % nrealreaders;
1739 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1740 rdrchker = torture_random(trsp) % nrealreaders;
1741 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1742 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1743 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1744 !READ_ONCE(rtp->rtort_chkp) &&
1745 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1746 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1747 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1748 rtrcp->rtc_chkrdr = rdrchked;
1749 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1750 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1751 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1752 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1753 }
1754
1755 // If assigned some completed work, do it!
1756 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1757 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1758 return; // No work or work not yet ready.
1759 rdrchked = rtrcp_assigner->rtc_chkrdr;
1760 if (WARN_ON_ONCE(rdrchked < 0))
1761 return;
1762 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1763 loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1764 atomic_inc(&n_rcu_torture_mbchk_tries);
1765 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1766 atomic_inc(&n_rcu_torture_mbchk_fail);
1767 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1768 rtrcp_assigner->rtc_ready = 0;
1769 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1770 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1771}
1772
1773/*
1774 * Do one extension of an RCU read-side critical section using the
1775 * current reader state in readstate (set to zero for initial entry
1776 * to extended critical section), set the new state as specified by
1777 * newstate (set to zero for final exit from extended critical section),
1778 * and random-number-generator state in trsp. If this is neither the
1779 * beginning or end of the critical section and if there was actually a
1780 * change, do a ->read_delay().
1781 */
1782static void rcutorture_one_extend(int *readstate, int newstate,
1783 struct torture_random_state *trsp,
1784 struct rt_read_seg *rtrsp)
1785{
1786 unsigned long flags;
1787 int idxnew1 = -1;
1788 int idxnew2 = -1;
1789 int idxold1 = *readstate;
1790 int idxold2 = idxold1;
1791 int statesnew = ~*readstate & newstate;
1792 int statesold = *readstate & ~newstate;
1793
1794 WARN_ON_ONCE(idxold2 < 0);
1795 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1796 rtrsp->rt_readstate = newstate;
1797
1798 /* First, put new protection in place to avoid critical-section gap. */
1799 if (statesnew & RCUTORTURE_RDR_BH)
1800 local_bh_disable();
1801 if (statesnew & RCUTORTURE_RDR_RBH)
1802 rcu_read_lock_bh();
1803 if (statesnew & RCUTORTURE_RDR_IRQ)
1804 local_irq_disable();
1805 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1806 preempt_disable();
1807 if (statesnew & RCUTORTURE_RDR_SCHED)
1808 rcu_read_lock_sched();
1809 if (statesnew & RCUTORTURE_RDR_RCU_1)
1810 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
1811 if (statesnew & RCUTORTURE_RDR_RCU_2)
1812 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
1813
1814 /*
1815 * Next, remove old protection, in decreasing order of strength
1816 * to avoid unlock paths that aren't safe in the stronger
1817 * context. Namely: BH can not be enabled with disabled interrupts.
1818 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1819 * context.
1820 */
1821 if (statesold & RCUTORTURE_RDR_IRQ)
1822 local_irq_enable();
1823 if (statesold & RCUTORTURE_RDR_PREEMPT)
1824 preempt_enable();
1825 if (statesold & RCUTORTURE_RDR_SCHED)
1826 rcu_read_unlock_sched();
1827 if (statesold & RCUTORTURE_RDR_BH)
1828 local_bh_enable();
1829 if (statesold & RCUTORTURE_RDR_RBH)
1830 rcu_read_unlock_bh();
1831 if (statesold & RCUTORTURE_RDR_RCU_2) {
1832 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
1833 WARN_ON_ONCE(idxnew2 != -1);
1834 idxold2 = 0;
1835 }
1836 if (statesold & RCUTORTURE_RDR_RCU_1) {
1837 bool lockit;
1838
1839 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
1840 if (lockit)
1841 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1842 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
1843 WARN_ON_ONCE(idxnew1 != -1);
1844 idxold1 = 0;
1845 if (lockit)
1846 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1847 }
1848
1849 /* Delay if neither beginning nor end and there was a change. */
1850 if ((statesnew || statesold) && *readstate && newstate)
1851 cur_ops->read_delay(trsp, rtrsp);
1852
1853 /* Update the reader state. */
1854 if (idxnew1 == -1)
1855 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1856 WARN_ON_ONCE(idxnew1 < 0);
1857 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
1858 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
1859 if (idxnew2 == -1)
1860 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1861 WARN_ON_ONCE(idxnew2 < 0);
1862 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1863 *readstate = idxnew1 | idxnew2 | newstate;
1864 WARN_ON_ONCE(*readstate < 0);
1865 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
1866 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
1867}
1868
1869/* Return the biggest extendables mask given current RCU and boot parameters. */
1870static int rcutorture_extend_mask_max(void)
1871{
1872 int mask;
1873
1874 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1875 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1876 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1877 return mask;
1878}
1879
1880/* Return a random protection state mask, but with at least one bit set. */
1881static int
1882rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1883{
1884 int mask = rcutorture_extend_mask_max();
1885 unsigned long randmask1 = torture_random(trsp);
1886 unsigned long randmask2 = randmask1 >> 3;
1887 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1888 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1889 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1890
1891 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
1892 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1893 if (!(randmask1 & 0x7))
1894 mask = mask & randmask2;
1895 else
1896 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1897
1898 // Can't have nested RCU reader without outer RCU reader.
1899 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1900 if (oldmask & RCUTORTURE_RDR_RCU_1)
1901 mask &= ~RCUTORTURE_RDR_RCU_2;
1902 else
1903 mask |= RCUTORTURE_RDR_RCU_1;
1904 }
1905
1906 /*
1907 * Can't enable bh w/irq disabled.
1908 */
1909 if (mask & RCUTORTURE_RDR_IRQ)
1910 mask |= oldmask & bhs;
1911
1912 /*
1913 * Ideally these sequences would be detected in debug builds
1914 * (regardless of RT), but until then don't stop testing
1915 * them on non-RT.
1916 */
1917 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1918 /* Can't modify BH in atomic context */
1919 if (oldmask & preempts_irq)
1920 mask &= ~bhs;
1921 if ((oldmask | mask) & preempts_irq)
1922 mask |= oldmask & bhs;
1923 }
1924
1925 return mask ?: RCUTORTURE_RDR_RCU_1;
1926}
1927
1928/*
1929 * Do a randomly selected number of extensions of an existing RCU read-side
1930 * critical section.
1931 */
1932static struct rt_read_seg *
1933rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1934 struct rt_read_seg *rtrsp)
1935{
1936 int i;
1937 int j;
1938 int mask = rcutorture_extend_mask_max();
1939
1940 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1941 if (!((mask - 1) & mask))
1942 return rtrsp; /* Current RCU reader not extendable. */
1943 /* Bias towards larger numbers of loops. */
1944 i = torture_random(trsp);
1945 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1946 for (j = 0; j < i; j++) {
1947 mask = rcutorture_extend_mask(*readstate, trsp);
1948 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1949 }
1950 return &rtrsp[j];
1951}
1952
1953/*
1954 * Do one read-side critical section, returning false if there was
1955 * no data to read. Can be invoked both from process context and
1956 * from a timer handler.
1957 */
1958static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1959{
1960 bool checkpolling = !(torture_random(trsp) & 0xfff);
1961 unsigned long cookie;
1962 struct rcu_gp_oldstate cookie_full;
1963 int i;
1964 unsigned long started;
1965 unsigned long completed;
1966 int newstate;
1967 struct rcu_torture *p;
1968 int pipe_count;
1969 int readstate = 0;
1970 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1971 struct rt_read_seg *rtrsp = &rtseg[0];
1972 struct rt_read_seg *rtrsp1;
1973 unsigned long long ts;
1974
1975 WARN_ON_ONCE(!rcu_is_watching());
1976 newstate = rcutorture_extend_mask(readstate, trsp);
1977 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1978 if (checkpolling) {
1979 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1980 cookie = cur_ops->get_gp_state();
1981 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
1982 cur_ops->get_gp_state_full(&cookie_full);
1983 }
1984 started = cur_ops->get_gp_seq();
1985 ts = rcu_trace_clock_local();
1986 p = rcu_dereference_check(rcu_torture_current,
1987 !cur_ops->readlock_held || cur_ops->readlock_held());
1988 if (p == NULL) {
1989 /* Wait for rcu_torture_writer to get underway */
1990 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1991 return false;
1992 }
1993 if (p->rtort_mbtest == 0)
1994 atomic_inc(&n_rcu_torture_mberror);
1995 rcu_torture_reader_do_mbchk(myid, p, trsp);
1996 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1997 preempt_disable();
1998 pipe_count = READ_ONCE(p->rtort_pipe_count);
1999 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
2000 /* Should not happen, but... */
2001 pipe_count = RCU_TORTURE_PIPE_LEN;
2002 }
2003 completed = cur_ops->get_gp_seq();
2004 if (pipe_count > 1) {
2005 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
2006 ts, started, completed);
2007 rcu_ftrace_dump(DUMP_ALL);
2008 }
2009 __this_cpu_inc(rcu_torture_count[pipe_count]);
2010 completed = rcutorture_seq_diff(completed, started);
2011 if (completed > RCU_TORTURE_PIPE_LEN) {
2012 /* Should not happen, but... */
2013 completed = RCU_TORTURE_PIPE_LEN;
2014 }
2015 __this_cpu_inc(rcu_torture_batch[completed]);
2016 preempt_enable();
2017 if (checkpolling) {
2018 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2019 WARN_ONCE(cur_ops->poll_gp_state(cookie),
2020 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
2021 __func__,
2022 rcu_torture_writer_state_getname(),
2023 rcu_torture_writer_state,
2024 cookie, cur_ops->get_gp_state());
2025 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2026 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
2027 "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
2028 __func__,
2029 rcu_torture_writer_state_getname(),
2030 rcu_torture_writer_state,
2031 cpumask_pr_args(cpu_online_mask));
2032 }
2033 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
2034 WARN_ON_ONCE(readstate);
2035 // This next splat is expected behavior if leakpointer, especially
2036 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
2037 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
2038
2039 /* If error or close call, record the sequence of reader protections. */
2040 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
2041 i = 0;
2042 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
2043 err_segs[i++] = *rtrsp1;
2044 rt_read_nsegs = i;
2045 }
2046
2047 return true;
2048}
2049
2050static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
2051
2052/*
2053 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
2054 * incrementing the corresponding element of the pipeline array. The
2055 * counter in the element should never be greater than 1, otherwise, the
2056 * RCU implementation is broken.
2057 */
2058static void rcu_torture_timer(struct timer_list *unused)
2059{
2060 atomic_long_inc(&n_rcu_torture_timers);
2061 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
2062
2063 /* Test call_rcu() invocation from interrupt handler. */
2064 if (cur_ops->call) {
2065 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
2066
2067 if (rhp)
2068 cur_ops->call(rhp, rcu_torture_timer_cb);
2069 }
2070}
2071
2072/*
2073 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
2074 * incrementing the corresponding element of the pipeline array. The
2075 * counter in the element should never be greater than 1, otherwise, the
2076 * RCU implementation is broken.
2077 */
2078static int
2079rcu_torture_reader(void *arg)
2080{
2081 unsigned long lastsleep = jiffies;
2082 long myid = (long)arg;
2083 int mynumonline = myid;
2084 DEFINE_TORTURE_RANDOM(rand);
2085 struct timer_list t;
2086
2087 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
2088 set_user_nice(current, MAX_NICE);
2089 if (irqreader && cur_ops->irq_capable)
2090 timer_setup_on_stack(&t, rcu_torture_timer, 0);
2091 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2092 do {
2093 if (irqreader && cur_ops->irq_capable) {
2094 if (!timer_pending(&t))
2095 mod_timer(&t, jiffies + 1);
2096 }
2097 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
2098 schedule_timeout_interruptible(HZ);
2099 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
2100 torture_hrtimeout_us(500, 1000, &rand);
2101 lastsleep = jiffies + 10;
2102 }
2103 while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
2104 schedule_timeout_interruptible(HZ / 5);
2105 stutter_wait("rcu_torture_reader");
2106 } while (!torture_must_stop());
2107 if (irqreader && cur_ops->irq_capable) {
2108 del_timer_sync(&t);
2109 destroy_timer_on_stack(&t);
2110 }
2111 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2112 torture_kthread_stopping("rcu_torture_reader");
2113 return 0;
2114}
2115
2116/*
2117 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
2118 * increase race probabilities and fuzzes the interval between toggling.
2119 */
2120static int rcu_nocb_toggle(void *arg)
2121{
2122 int cpu;
2123 int maxcpu = -1;
2124 int oldnice = task_nice(current);
2125 long r;
2126 DEFINE_TORTURE_RANDOM(rand);
2127 ktime_t toggle_delay;
2128 unsigned long toggle_fuzz;
2129 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
2130
2131 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
2132 while (!rcu_inkernel_boot_has_ended())
2133 schedule_timeout_interruptible(HZ / 10);
2134 for_each_possible_cpu(cpu)
2135 maxcpu = cpu;
2136 WARN_ON(maxcpu < 0);
2137 if (toggle_interval > ULONG_MAX)
2138 toggle_fuzz = ULONG_MAX >> 3;
2139 else
2140 toggle_fuzz = toggle_interval >> 3;
2141 if (toggle_fuzz <= 0)
2142 toggle_fuzz = NSEC_PER_USEC;
2143 do {
2144 r = torture_random(&rand);
2145 cpu = (r >> 1) % (maxcpu + 1);
2146 if (r & 0x1) {
2147 rcu_nocb_cpu_offload(cpu);
2148 atomic_long_inc(&n_nocb_offload);
2149 } else {
2150 rcu_nocb_cpu_deoffload(cpu);
2151 atomic_long_inc(&n_nocb_deoffload);
2152 }
2153 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
2154 set_current_state(TASK_INTERRUPTIBLE);
2155 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
2156 if (stutter_wait("rcu_nocb_toggle"))
2157 sched_set_normal(current, oldnice);
2158 } while (!torture_must_stop());
2159 torture_kthread_stopping("rcu_nocb_toggle");
2160 return 0;
2161}
2162
2163/*
2164 * Print torture statistics. Caller must ensure that there is only
2165 * one call to this function at a given time!!! This is normally
2166 * accomplished by relying on the module system to only have one copy
2167 * of the module loaded, and then by giving the rcu_torture_stats
2168 * kthread full control (or the init/cleanup functions when rcu_torture_stats
2169 * thread is not running).
2170 */
2171static void
2172rcu_torture_stats_print(void)
2173{
2174 int cpu;
2175 int i;
2176 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2177 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2178 struct rcu_torture *rtcp;
2179 static unsigned long rtcv_snap = ULONG_MAX;
2180 static bool splatted;
2181 struct task_struct *wtp;
2182
2183 for_each_possible_cpu(cpu) {
2184 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2185 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
2186 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
2187 }
2188 }
2189 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
2190 if (pipesummary[i] != 0)
2191 break;
2192 }
2193
2194 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2195 rtcp = rcu_access_pointer(rcu_torture_current);
2196 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
2197 rtcp,
2198 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
2199 rcu_torture_current_version,
2200 list_empty(&rcu_torture_freelist),
2201 atomic_read(&n_rcu_torture_alloc),
2202 atomic_read(&n_rcu_torture_alloc_fail),
2203 atomic_read(&n_rcu_torture_free));
2204 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ",
2205 atomic_read(&n_rcu_torture_mberror),
2206 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
2207 n_rcu_torture_barrier_error,
2208 n_rcu_torture_boost_ktrerror);
2209 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
2210 n_rcu_torture_boost_failure,
2211 n_rcu_torture_boosts,
2212 atomic_long_read(&n_rcu_torture_timers));
2213 torture_onoff_stats();
2214 pr_cont("barrier: %ld/%ld:%ld ",
2215 data_race(n_barrier_successes),
2216 data_race(n_barrier_attempts),
2217 data_race(n_rcu_torture_barrier_error));
2218 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
2219 pr_cont("nocb-toggles: %ld:%ld\n",
2220 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
2221
2222 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2223 if (atomic_read(&n_rcu_torture_mberror) ||
2224 atomic_read(&n_rcu_torture_mbchk_fail) ||
2225 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
2226 n_rcu_torture_boost_failure || i > 1) {
2227 pr_cont("%s", "!!! ");
2228 atomic_inc(&n_rcu_torture_error);
2229 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
2230 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
2231 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
2232 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
2233 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
2234 WARN_ON_ONCE(i > 1); // Too-short grace period
2235 }
2236 pr_cont("Reader Pipe: ");
2237 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2238 pr_cont(" %ld", pipesummary[i]);
2239 pr_cont("\n");
2240
2241 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2242 pr_cont("Reader Batch: ");
2243 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2244 pr_cont(" %ld", batchsummary[i]);
2245 pr_cont("\n");
2246
2247 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2248 pr_cont("Free-Block Circulation: ");
2249 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2250 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
2251 }
2252 pr_cont("\n");
2253
2254 if (cur_ops->stats)
2255 cur_ops->stats();
2256 if (rtcv_snap == rcu_torture_current_version &&
2257 rcu_access_pointer(rcu_torture_current) &&
2258 !rcu_stall_is_suppressed()) {
2259 int __maybe_unused flags = 0;
2260 unsigned long __maybe_unused gp_seq = 0;
2261
2262 rcutorture_get_gp_data(cur_ops->ttype,
2263 &flags, &gp_seq);
2264 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
2265 &flags, &gp_seq);
2266 wtp = READ_ONCE(writer_task);
2267 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2268 rcu_torture_writer_state_getname(),
2269 rcu_torture_writer_state, gp_seq, flags,
2270 wtp == NULL ? ~0U : wtp->__state,
2271 wtp == NULL ? -1 : (int)task_cpu(wtp));
2272 if (!splatted && wtp) {
2273 sched_show_task(wtp);
2274 splatted = true;
2275 }
2276 if (cur_ops->gp_kthread_dbg)
2277 cur_ops->gp_kthread_dbg();
2278 rcu_ftrace_dump(DUMP_ALL);
2279 }
2280 rtcv_snap = rcu_torture_current_version;
2281}
2282
2283/*
2284 * Periodically prints torture statistics, if periodic statistics printing
2285 * was specified via the stat_interval module parameter.
2286 */
2287static int
2288rcu_torture_stats(void *arg)
2289{
2290 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
2291 do {
2292 schedule_timeout_interruptible(stat_interval * HZ);
2293 rcu_torture_stats_print();
2294 torture_shutdown_absorb("rcu_torture_stats");
2295 } while (!torture_must_stop());
2296 torture_kthread_stopping("rcu_torture_stats");
2297 return 0;
2298}
2299
2300/* Test mem_dump_obj() and friends. */
2301static void rcu_torture_mem_dump_obj(void)
2302{
2303 struct rcu_head *rhp;
2304 struct kmem_cache *kcp;
2305 static int z;
2306
2307 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
2308 if (WARN_ON_ONCE(!kcp))
2309 return;
2310 rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
2311 if (WARN_ON_ONCE(!rhp)) {
2312 kmem_cache_destroy(kcp);
2313 return;
2314 }
2315 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
2316 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
2317 mem_dump_obj(ZERO_SIZE_PTR);
2318 pr_alert("mem_dump_obj(NULL):");
2319 mem_dump_obj(NULL);
2320 pr_alert("mem_dump_obj(%px):", &rhp);
2321 mem_dump_obj(&rhp);
2322 pr_alert("mem_dump_obj(%px):", rhp);
2323 mem_dump_obj(rhp);
2324 pr_alert("mem_dump_obj(%px):", &rhp->func);
2325 mem_dump_obj(&rhp->func);
2326 pr_alert("mem_dump_obj(%px):", &z);
2327 mem_dump_obj(&z);
2328 kmem_cache_free(kcp, rhp);
2329 kmem_cache_destroy(kcp);
2330 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2331 if (WARN_ON_ONCE(!rhp))
2332 return;
2333 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2334 pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2335 mem_dump_obj(rhp);
2336 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2337 mem_dump_obj(&rhp->func);
2338 kfree(rhp);
2339 rhp = vmalloc(4096);
2340 if (WARN_ON_ONCE(!rhp))
2341 return;
2342 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2343 pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2344 mem_dump_obj(rhp);
2345 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2346 mem_dump_obj(&rhp->func);
2347 vfree(rhp);
2348}
2349
2350static void
2351rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2352{
2353 pr_alert("%s" TORTURE_FLAG
2354 "--- %s: nreaders=%d nfakewriters=%d "
2355 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2356 "shuffle_interval=%d stutter=%d irqreader=%d "
2357 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2358 "test_boost=%d/%d test_boost_interval=%d "
2359 "test_boost_duration=%d shutdown_secs=%d "
2360 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2361 "stall_cpu_block=%d "
2362 "n_barrier_cbs=%d "
2363 "onoff_interval=%d onoff_holdoff=%d "
2364 "read_exit_delay=%d read_exit_burst=%d "
2365 "nocbs_nthreads=%d nocbs_toggle=%d "
2366 "test_nmis=%d\n",
2367 torture_type, tag, nrealreaders, nfakewriters,
2368 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2369 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2370 test_boost, cur_ops->can_boost,
2371 test_boost_interval, test_boost_duration, shutdown_secs,
2372 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
2373 stall_cpu_block,
2374 n_barrier_cbs,
2375 onoff_interval, onoff_holdoff,
2376 read_exit_delay, read_exit_burst,
2377 nocbs_nthreads, nocbs_toggle,
2378 test_nmis);
2379}
2380
2381static int rcutorture_booster_cleanup(unsigned int cpu)
2382{
2383 struct task_struct *t;
2384
2385 if (boost_tasks[cpu] == NULL)
2386 return 0;
2387 mutex_lock(&boost_mutex);
2388 t = boost_tasks[cpu];
2389 boost_tasks[cpu] = NULL;
2390 rcu_torture_enable_rt_throttle();
2391 mutex_unlock(&boost_mutex);
2392
2393 /* This must be outside of the mutex, otherwise deadlock! */
2394 torture_stop_kthread(rcu_torture_boost, t);
2395 return 0;
2396}
2397
2398static int rcutorture_booster_init(unsigned int cpu)
2399{
2400 int retval;
2401
2402 if (boost_tasks[cpu] != NULL)
2403 return 0; /* Already created, nothing more to do. */
2404
2405 // Testing RCU priority boosting requires rcutorture do
2406 // some serious abuse. Counter this by running ksoftirqd
2407 // at higher priority.
2408 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
2409 struct sched_param sp;
2410 struct task_struct *t;
2411
2412 t = per_cpu(ksoftirqd, cpu);
2413 WARN_ON_ONCE(!t);
2414 sp.sched_priority = 2;
2415 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2416 }
2417
2418 /* Don't allow time recalculation while creating a new task. */
2419 mutex_lock(&boost_mutex);
2420 rcu_torture_disable_rt_throttle();
2421 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2422 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
2423 cpu, "rcu_torture_boost_%u");
2424 if (IS_ERR(boost_tasks[cpu])) {
2425 retval = PTR_ERR(boost_tasks[cpu]);
2426 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2427 n_rcu_torture_boost_ktrerror++;
2428 boost_tasks[cpu] = NULL;
2429 mutex_unlock(&boost_mutex);
2430 return retval;
2431 }
2432 mutex_unlock(&boost_mutex);
2433 return 0;
2434}
2435
2436static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr)
2437{
2438 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr);
2439 return NOTIFY_OK;
2440}
2441
2442static struct notifier_block rcu_torture_stall_block = {
2443 .notifier_call = rcu_torture_stall_nf,
2444};
2445
2446/*
2447 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
2448 * induces a CPU stall for the time specified by stall_cpu. If a new
2449 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted.
2450 */
2451static int rcu_torture_stall(void *args)
2452{
2453 int idx;
2454 int ret;
2455 unsigned long stop_at;
2456
2457 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2458 if (rcu_cpu_stall_notifiers) {
2459 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
2460 if (ret)
2461 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
2462 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
2463 }
2464 if (stall_cpu_holdoff > 0) {
2465 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2466 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2467 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2468 }
2469 if (!kthread_should_stop() && stall_gp_kthread > 0) {
2470 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2471 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2472 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2473 if (kthread_should_stop())
2474 break;
2475 schedule_timeout_uninterruptible(HZ);
2476 }
2477 }
2478 if (!kthread_should_stop() && stall_cpu > 0) {
2479 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2480 stop_at = ktime_get_seconds() + stall_cpu;
2481 /* RCU CPU stall is expected behavior in following code. */
2482 idx = cur_ops->readlock();
2483 if (stall_cpu_irqsoff)
2484 local_irq_disable();
2485 else if (!stall_cpu_block)
2486 preempt_disable();
2487 pr_alert("%s start on CPU %d.\n",
2488 __func__, raw_smp_processor_id());
2489 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2490 stop_at))
2491 if (stall_cpu_block) {
2492#ifdef CONFIG_PREEMPTION
2493 preempt_schedule();
2494#else
2495 schedule_timeout_uninterruptible(HZ);
2496#endif
2497 } else if (stall_no_softlockup) {
2498 touch_softlockup_watchdog();
2499 }
2500 if (stall_cpu_irqsoff)
2501 local_irq_enable();
2502 else if (!stall_cpu_block)
2503 preempt_enable();
2504 cur_ops->readunlock(idx);
2505 }
2506 pr_alert("%s end.\n", __func__);
2507 if (rcu_cpu_stall_notifiers && !ret) {
2508 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
2509 if (ret)
2510 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
2511 }
2512 torture_shutdown_absorb("rcu_torture_stall");
2513 while (!kthread_should_stop())
2514 schedule_timeout_interruptible(10 * HZ);
2515 return 0;
2516}
2517
2518/* Spawn CPU-stall kthread, if stall_cpu specified. */
2519static int __init rcu_torture_stall_init(void)
2520{
2521 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2522 return 0;
2523 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2524}
2525
2526/* State structure for forward-progress self-propagating RCU callback. */
2527struct fwd_cb_state {
2528 struct rcu_head rh;
2529 int stop;
2530};
2531
2532/*
2533 * Forward-progress self-propagating RCU callback function. Because
2534 * callbacks run from softirq, this function is an implicit RCU read-side
2535 * critical section.
2536 */
2537static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2538{
2539 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2540
2541 if (READ_ONCE(fcsp->stop)) {
2542 WRITE_ONCE(fcsp->stop, 2);
2543 return;
2544 }
2545 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2546}
2547
2548/* State for continuous-flood RCU callbacks. */
2549struct rcu_fwd_cb {
2550 struct rcu_head rh;
2551 struct rcu_fwd_cb *rfc_next;
2552 struct rcu_fwd *rfc_rfp;
2553 int rfc_gps;
2554};
2555
2556#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
2557#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
2558#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
2559#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
2560#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2561
2562struct rcu_launder_hist {
2563 long n_launders;
2564 unsigned long launder_gp_seq;
2565};
2566
2567struct rcu_fwd {
2568 spinlock_t rcu_fwd_lock;
2569 struct rcu_fwd_cb *rcu_fwd_cb_head;
2570 struct rcu_fwd_cb **rcu_fwd_cb_tail;
2571 long n_launders_cb;
2572 unsigned long rcu_fwd_startat;
2573 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2574 unsigned long rcu_launder_gp_seq_start;
2575 int rcu_fwd_id;
2576};
2577
2578static DEFINE_MUTEX(rcu_fwd_mutex);
2579static struct rcu_fwd *rcu_fwds;
2580static unsigned long rcu_fwd_seq;
2581static atomic_long_t rcu_fwd_max_cbs;
2582static bool rcu_fwd_emergency_stop;
2583
2584static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2585{
2586 unsigned long gps;
2587 unsigned long gps_old;
2588 int i;
2589 int j;
2590
2591 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2592 if (rfp->n_launders_hist[i].n_launders > 0)
2593 break;
2594 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2595 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2596 gps_old = rfp->rcu_launder_gp_seq_start;
2597 for (j = 0; j <= i; j++) {
2598 gps = rfp->n_launders_hist[j].launder_gp_seq;
2599 pr_cont(" %ds/%d: %ld:%ld",
2600 j + 1, FWD_CBS_HIST_DIV,
2601 rfp->n_launders_hist[j].n_launders,
2602 rcutorture_seq_diff(gps, gps_old));
2603 gps_old = gps;
2604 }
2605 pr_cont("\n");
2606}
2607
2608/* Callback function for continuous-flood RCU callbacks. */
2609static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2610{
2611 unsigned long flags;
2612 int i;
2613 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2614 struct rcu_fwd_cb **rfcpp;
2615 struct rcu_fwd *rfp = rfcp->rfc_rfp;
2616
2617 rfcp->rfc_next = NULL;
2618 rfcp->rfc_gps++;
2619 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2620 rfcpp = rfp->rcu_fwd_cb_tail;
2621 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2622 WRITE_ONCE(*rfcpp, rfcp);
2623 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2624 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2625 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2626 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2627 rfp->n_launders_hist[i].n_launders++;
2628 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2629 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2630}
2631
2632// Give the scheduler a chance, even on nohz_full CPUs.
2633static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2634{
2635 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2636 // Real call_rcu() floods hit userspace, so emulate that.
2637 if (need_resched() || (iter & 0xfff))
2638 schedule();
2639 return;
2640 }
2641 // No userspace emulation: CB invocation throttles call_rcu()
2642 cond_resched();
2643}
2644
2645/*
2646 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2647 * test is over or because we hit an OOM event.
2648 */
2649static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2650{
2651 unsigned long flags;
2652 unsigned long freed = 0;
2653 struct rcu_fwd_cb *rfcp;
2654
2655 for (;;) {
2656 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2657 rfcp = rfp->rcu_fwd_cb_head;
2658 if (!rfcp) {
2659 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2660 break;
2661 }
2662 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2663 if (!rfp->rcu_fwd_cb_head)
2664 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2665 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2666 kfree(rfcp);
2667 freed++;
2668 rcu_torture_fwd_prog_cond_resched(freed);
2669 if (tick_nohz_full_enabled()) {
2670 local_irq_save(flags);
2671 rcu_momentary_dyntick_idle();
2672 local_irq_restore(flags);
2673 }
2674 }
2675 return freed;
2676}
2677
2678/* Carry out need_resched()/cond_resched() forward-progress testing. */
2679static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2680 int *tested, int *tested_tries)
2681{
2682 unsigned long cver;
2683 unsigned long dur;
2684 struct fwd_cb_state fcs;
2685 unsigned long gps;
2686 int idx;
2687 int sd;
2688 int sd4;
2689 bool selfpropcb = false;
2690 unsigned long stopat;
2691 static DEFINE_TORTURE_RANDOM(trs);
2692
2693 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2694 if (!cur_ops->sync)
2695 return; // Cannot do need_resched() forward progress testing without ->sync.
2696 if (cur_ops->call && cur_ops->cb_barrier) {
2697 init_rcu_head_on_stack(&fcs.rh);
2698 selfpropcb = true;
2699 }
2700
2701 /* Tight loop containing cond_resched(). */
2702 atomic_inc(&rcu_fwd_cb_nodelay);
2703 cur_ops->sync(); /* Later readers see above write. */
2704 if (selfpropcb) {
2705 WRITE_ONCE(fcs.stop, 0);
2706 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2707 }
2708 cver = READ_ONCE(rcu_torture_current_version);
2709 gps = cur_ops->get_gp_seq();
2710 sd = cur_ops->stall_dur() + 1;
2711 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2712 dur = sd4 + torture_random(&trs) % (sd - sd4);
2713 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2714 stopat = rfp->rcu_fwd_startat + dur;
2715 while (time_before(jiffies, stopat) &&
2716 !shutdown_time_arrived() &&
2717 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2718 idx = cur_ops->readlock();
2719 udelay(10);
2720 cur_ops->readunlock(idx);
2721 if (!fwd_progress_need_resched || need_resched())
2722 cond_resched();
2723 }
2724 (*tested_tries)++;
2725 if (!time_before(jiffies, stopat) &&
2726 !shutdown_time_arrived() &&
2727 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2728 (*tested)++;
2729 cver = READ_ONCE(rcu_torture_current_version) - cver;
2730 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2731 WARN_ON(!cver && gps < 2);
2732 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2733 rfp->rcu_fwd_id, dur, cver, gps);
2734 }
2735 if (selfpropcb) {
2736 WRITE_ONCE(fcs.stop, 1);
2737 cur_ops->sync(); /* Wait for running CB to complete. */
2738 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2739 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2740 }
2741
2742 if (selfpropcb) {
2743 WARN_ON(READ_ONCE(fcs.stop) != 2);
2744 destroy_rcu_head_on_stack(&fcs.rh);
2745 }
2746 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2747 atomic_dec(&rcu_fwd_cb_nodelay);
2748}
2749
2750/* Carry out call_rcu() forward-progress testing. */
2751static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2752{
2753 unsigned long cver;
2754 unsigned long flags;
2755 unsigned long gps;
2756 int i;
2757 long n_launders;
2758 long n_launders_cb_snap;
2759 long n_launders_sa;
2760 long n_max_cbs;
2761 long n_max_gps;
2762 struct rcu_fwd_cb *rfcp;
2763 struct rcu_fwd_cb *rfcpn;
2764 unsigned long stopat;
2765 unsigned long stoppedat;
2766
2767 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2768 if (READ_ONCE(rcu_fwd_emergency_stop))
2769 return; /* Get out of the way quickly, no GP wait! */
2770 if (!cur_ops->call)
2771 return; /* Can't do call_rcu() fwd prog without ->call. */
2772
2773 /* Loop continuously posting RCU callbacks. */
2774 atomic_inc(&rcu_fwd_cb_nodelay);
2775 cur_ops->sync(); /* Later readers see above write. */
2776 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2777 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2778 n_launders = 0;
2779 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2780 n_launders_sa = 0;
2781 n_max_cbs = 0;
2782 n_max_gps = 0;
2783 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2784 rfp->n_launders_hist[i].n_launders = 0;
2785 cver = READ_ONCE(rcu_torture_current_version);
2786 gps = cur_ops->get_gp_seq();
2787 rfp->rcu_launder_gp_seq_start = gps;
2788 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2789 while (time_before(jiffies, stopat) &&
2790 !shutdown_time_arrived() &&
2791 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2792 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2793 rfcpn = NULL;
2794 if (rfcp)
2795 rfcpn = READ_ONCE(rfcp->rfc_next);
2796 if (rfcpn) {
2797 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2798 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2799 break;
2800 rfp->rcu_fwd_cb_head = rfcpn;
2801 n_launders++;
2802 n_launders_sa++;
2803 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
2804 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2805 if (WARN_ON_ONCE(!rfcp)) {
2806 schedule_timeout_interruptible(1);
2807 continue;
2808 }
2809 n_max_cbs++;
2810 n_launders_sa = 0;
2811 rfcp->rfc_gps = 0;
2812 rfcp->rfc_rfp = rfp;
2813 } else {
2814 rfcp = NULL;
2815 }
2816 if (rfcp)
2817 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2818 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2819 if (tick_nohz_full_enabled()) {
2820 local_irq_save(flags);
2821 rcu_momentary_dyntick_idle();
2822 local_irq_restore(flags);
2823 }
2824 }
2825 stoppedat = jiffies;
2826 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2827 cver = READ_ONCE(rcu_torture_current_version) - cver;
2828 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2829 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2830 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2831 (void)rcu_torture_fwd_prog_cbfree(rfp);
2832
2833 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2834 !shutdown_time_arrived()) {
2835 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2836 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2837 __func__,
2838 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2839 n_launders + n_max_cbs - n_launders_cb_snap,
2840 n_launders, n_launders_sa,
2841 n_max_gps, n_max_cbs, cver, gps);
2842 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
2843 mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
2844 rcu_torture_fwd_cb_hist(rfp);
2845 mutex_unlock(&rcu_fwd_mutex);
2846 }
2847 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2848 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2849 atomic_dec(&rcu_fwd_cb_nodelay);
2850}
2851
2852
2853/*
2854 * OOM notifier, but this only prints diagnostic information for the
2855 * current forward-progress test.
2856 */
2857static int rcutorture_oom_notify(struct notifier_block *self,
2858 unsigned long notused, void *nfreed)
2859{
2860 int i;
2861 long ncbs;
2862 struct rcu_fwd *rfp;
2863
2864 mutex_lock(&rcu_fwd_mutex);
2865 rfp = rcu_fwds;
2866 if (!rfp) {
2867 mutex_unlock(&rcu_fwd_mutex);
2868 return NOTIFY_OK;
2869 }
2870 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2871 __func__);
2872 for (i = 0; i < fwd_progress; i++) {
2873 rcu_torture_fwd_cb_hist(&rfp[i]);
2874 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2875 }
2876 WRITE_ONCE(rcu_fwd_emergency_stop, true);
2877 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2878 ncbs = 0;
2879 for (i = 0; i < fwd_progress; i++)
2880 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2881 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2882 cur_ops->cb_barrier();
2883 ncbs = 0;
2884 for (i = 0; i < fwd_progress; i++)
2885 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2886 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2887 cur_ops->cb_barrier();
2888 ncbs = 0;
2889 for (i = 0; i < fwd_progress; i++)
2890 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2891 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2892 smp_mb(); /* Frees before return to avoid redoing OOM. */
2893 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2894 pr_info("%s returning after OOM processing.\n", __func__);
2895 mutex_unlock(&rcu_fwd_mutex);
2896 return NOTIFY_OK;
2897}
2898
2899static struct notifier_block rcutorture_oom_nb = {
2900 .notifier_call = rcutorture_oom_notify
2901};
2902
2903/* Carry out grace-period forward-progress testing. */
2904static int rcu_torture_fwd_prog(void *args)
2905{
2906 bool firsttime = true;
2907 long max_cbs;
2908 int oldnice = task_nice(current);
2909 unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
2910 struct rcu_fwd *rfp = args;
2911 int tested = 0;
2912 int tested_tries = 0;
2913
2914 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2915 rcu_bind_current_to_nocb();
2916 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2917 set_user_nice(current, MAX_NICE);
2918 do {
2919 if (!rfp->rcu_fwd_id) {
2920 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2921 WRITE_ONCE(rcu_fwd_emergency_stop, false);
2922 if (!firsttime) {
2923 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
2924 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
2925 }
2926 firsttime = false;
2927 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
2928 } else {
2929 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
2930 schedule_timeout_interruptible(HZ / 20);
2931 oldseq = READ_ONCE(rcu_fwd_seq);
2932 }
2933 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2934 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
2935 rcu_torture_fwd_prog_cr(rfp);
2936 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2937 (!IS_ENABLED(CONFIG_TINY_RCU) ||
2938 (rcu_inkernel_boot_has_ended() &&
2939 torture_num_online_cpus() > rfp->rcu_fwd_id)))
2940 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2941
2942 /* Avoid slow periods, better to test when busy. */
2943 if (stutter_wait("rcu_torture_fwd_prog"))
2944 sched_set_normal(current, oldnice);
2945 } while (!torture_must_stop());
2946 /* Short runs might not contain a valid forward-progress attempt. */
2947 if (!rfp->rcu_fwd_id) {
2948 WARN_ON(!tested && tested_tries >= 5);
2949 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2950 }
2951 torture_kthread_stopping("rcu_torture_fwd_prog");
2952 return 0;
2953}
2954
2955/* If forward-progress checking is requested and feasible, spawn the thread. */
2956static int __init rcu_torture_fwd_prog_init(void)
2957{
2958 int i;
2959 int ret = 0;
2960 struct rcu_fwd *rfp;
2961
2962 if (!fwd_progress)
2963 return 0; /* Not requested, so don't do it. */
2964 if (fwd_progress >= nr_cpu_ids) {
2965 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
2966 fwd_progress = nr_cpu_ids;
2967 } else if (fwd_progress < 0) {
2968 fwd_progress = nr_cpu_ids;
2969 }
2970 if ((!cur_ops->sync && !cur_ops->call) ||
2971 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2972 cur_ops == &rcu_busted_ops) {
2973 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2974 fwd_progress = 0;
2975 return 0;
2976 }
2977 if (stall_cpu > 0) {
2978 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2979 fwd_progress = 0;
2980 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
2981 return -EINVAL; /* In module, can fail back to user. */
2982 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2983 return 0;
2984 }
2985 if (fwd_progress_holdoff <= 0)
2986 fwd_progress_holdoff = 1;
2987 if (fwd_progress_div <= 0)
2988 fwd_progress_div = 4;
2989 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
2990 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
2991 if (!rfp || !fwd_prog_tasks) {
2992 kfree(rfp);
2993 kfree(fwd_prog_tasks);
2994 fwd_prog_tasks = NULL;
2995 fwd_progress = 0;
2996 return -ENOMEM;
2997 }
2998 for (i = 0; i < fwd_progress; i++) {
2999 spin_lock_init(&rfp[i].rcu_fwd_lock);
3000 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
3001 rfp[i].rcu_fwd_id = i;
3002 }
3003 mutex_lock(&rcu_fwd_mutex);
3004 rcu_fwds = rfp;
3005 mutex_unlock(&rcu_fwd_mutex);
3006 register_oom_notifier(&rcutorture_oom_nb);
3007 for (i = 0; i < fwd_progress; i++) {
3008 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
3009 if (ret) {
3010 fwd_progress = i;
3011 return ret;
3012 }
3013 }
3014 return 0;
3015}
3016
3017static void rcu_torture_fwd_prog_cleanup(void)
3018{
3019 int i;
3020 struct rcu_fwd *rfp;
3021
3022 if (!rcu_fwds || !fwd_prog_tasks)
3023 return;
3024 for (i = 0; i < fwd_progress; i++)
3025 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
3026 unregister_oom_notifier(&rcutorture_oom_nb);
3027 mutex_lock(&rcu_fwd_mutex);
3028 rfp = rcu_fwds;
3029 rcu_fwds = NULL;
3030 mutex_unlock(&rcu_fwd_mutex);
3031 kfree(rfp);
3032 kfree(fwd_prog_tasks);
3033 fwd_prog_tasks = NULL;
3034}
3035
3036/* Callback function for RCU barrier testing. */
3037static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
3038{
3039 atomic_inc(&barrier_cbs_invoked);
3040}
3041
3042/* IPI handler to get callback posted on desired CPU, if online. */
3043static void rcu_torture_barrier1cb(void *rcu_void)
3044{
3045 struct rcu_head *rhp = rcu_void;
3046
3047 cur_ops->call(rhp, rcu_torture_barrier_cbf);
3048}
3049
3050/* kthread function to register callbacks used to test RCU barriers. */
3051static int rcu_torture_barrier_cbs(void *arg)
3052{
3053 long myid = (long)arg;
3054 bool lastphase = false;
3055 bool newphase;
3056 struct rcu_head rcu;
3057
3058 init_rcu_head_on_stack(&rcu);
3059 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
3060 set_user_nice(current, MAX_NICE);
3061 do {
3062 wait_event(barrier_cbs_wq[myid],
3063 (newphase =
3064 smp_load_acquire(&barrier_phase)) != lastphase ||
3065 torture_must_stop());
3066 lastphase = newphase;
3067 if (torture_must_stop())
3068 break;
3069 /*
3070 * The above smp_load_acquire() ensures barrier_phase load
3071 * is ordered before the following ->call().
3072 */
3073 if (smp_call_function_single(myid, rcu_torture_barrier1cb,
3074 &rcu, 1)) {
3075 // IPI failed, so use direct call from current CPU.
3076 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
3077 }
3078 if (atomic_dec_and_test(&barrier_cbs_count))
3079 wake_up(&barrier_wq);
3080 } while (!torture_must_stop());
3081 if (cur_ops->cb_barrier != NULL)
3082 cur_ops->cb_barrier();
3083 destroy_rcu_head_on_stack(&rcu);
3084 torture_kthread_stopping("rcu_torture_barrier_cbs");
3085 return 0;
3086}
3087
3088/* kthread function to drive and coordinate RCU barrier testing. */
3089static int rcu_torture_barrier(void *arg)
3090{
3091 int i;
3092
3093 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
3094 do {
3095 atomic_set(&barrier_cbs_invoked, 0);
3096 atomic_set(&barrier_cbs_count, n_barrier_cbs);
3097 /* Ensure barrier_phase ordered after prior assignments. */
3098 smp_store_release(&barrier_phase, !barrier_phase);
3099 for (i = 0; i < n_barrier_cbs; i++)
3100 wake_up(&barrier_cbs_wq[i]);
3101 wait_event(barrier_wq,
3102 atomic_read(&barrier_cbs_count) == 0 ||
3103 torture_must_stop());
3104 if (torture_must_stop())
3105 break;
3106 n_barrier_attempts++;
3107 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
3108 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
3109 n_rcu_torture_barrier_error++;
3110 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
3111 atomic_read(&barrier_cbs_invoked),
3112 n_barrier_cbs);
3113 WARN_ON(1);
3114 // Wait manually for the remaining callbacks
3115 i = 0;
3116 do {
3117 if (WARN_ON(i++ > HZ))
3118 i = INT_MIN;
3119 schedule_timeout_interruptible(1);
3120 cur_ops->cb_barrier();
3121 } while (atomic_read(&barrier_cbs_invoked) !=
3122 n_barrier_cbs &&
3123 !torture_must_stop());
3124 smp_mb(); // Can't trust ordering if broken.
3125 if (!torture_must_stop())
3126 pr_err("Recovered: barrier_cbs_invoked = %d\n",
3127 atomic_read(&barrier_cbs_invoked));
3128 } else {
3129 n_barrier_successes++;
3130 }
3131 schedule_timeout_interruptible(HZ / 10);
3132 } while (!torture_must_stop());
3133 torture_kthread_stopping("rcu_torture_barrier");
3134 return 0;
3135}
3136
3137/* Initialize RCU barrier testing. */
3138static int rcu_torture_barrier_init(void)
3139{
3140 int i;
3141 int ret;
3142
3143 if (n_barrier_cbs <= 0)
3144 return 0;
3145 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
3146 pr_alert("%s" TORTURE_FLAG
3147 " Call or barrier ops missing for %s,\n",
3148 torture_type, cur_ops->name);
3149 pr_alert("%s" TORTURE_FLAG
3150 " RCU barrier testing omitted from run.\n",
3151 torture_type);
3152 return 0;
3153 }
3154 atomic_set(&barrier_cbs_count, 0);
3155 atomic_set(&barrier_cbs_invoked, 0);
3156 barrier_cbs_tasks =
3157 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
3158 GFP_KERNEL);
3159 barrier_cbs_wq =
3160 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
3161 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
3162 return -ENOMEM;
3163 for (i = 0; i < n_barrier_cbs; i++) {
3164 init_waitqueue_head(&barrier_cbs_wq[i]);
3165 ret = torture_create_kthread(rcu_torture_barrier_cbs,
3166 (void *)(long)i,
3167 barrier_cbs_tasks[i]);
3168 if (ret)
3169 return ret;
3170 }
3171 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
3172}
3173
3174/* Clean up after RCU barrier testing. */
3175static void rcu_torture_barrier_cleanup(void)
3176{
3177 int i;
3178
3179 torture_stop_kthread(rcu_torture_barrier, barrier_task);
3180 if (barrier_cbs_tasks != NULL) {
3181 for (i = 0; i < n_barrier_cbs; i++)
3182 torture_stop_kthread(rcu_torture_barrier_cbs,
3183 barrier_cbs_tasks[i]);
3184 kfree(barrier_cbs_tasks);
3185 barrier_cbs_tasks = NULL;
3186 }
3187 if (barrier_cbs_wq != NULL) {
3188 kfree(barrier_cbs_wq);
3189 barrier_cbs_wq = NULL;
3190 }
3191}
3192
3193static bool rcu_torture_can_boost(void)
3194{
3195 static int boost_warn_once;
3196 int prio;
3197
3198 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
3199 return false;
3200 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
3201 return false;
3202
3203 prio = rcu_get_gp_kthreads_prio();
3204 if (!prio)
3205 return false;
3206
3207 if (prio < 2) {
3208 if (boost_warn_once == 1)
3209 return false;
3210
3211 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
3212 boost_warn_once = 1;
3213 return false;
3214 }
3215
3216 return true;
3217}
3218
3219static bool read_exit_child_stop;
3220static bool read_exit_child_stopped;
3221static wait_queue_head_t read_exit_wq;
3222
3223// Child kthread which just does an rcutorture reader and exits.
3224static int rcu_torture_read_exit_child(void *trsp_in)
3225{
3226 struct torture_random_state *trsp = trsp_in;
3227
3228 set_user_nice(current, MAX_NICE);
3229 // Minimize time between reading and exiting.
3230 while (!kthread_should_stop())
3231 schedule_timeout_uninterruptible(HZ / 20);
3232 (void)rcu_torture_one_read(trsp, -1);
3233 return 0;
3234}
3235
3236// Parent kthread which creates and destroys read-exit child kthreads.
3237static int rcu_torture_read_exit(void *unused)
3238{
3239 bool errexit = false;
3240 int i;
3241 struct task_struct *tsp;
3242 DEFINE_TORTURE_RANDOM(trs);
3243
3244 // Allocate and initialize.
3245 set_user_nice(current, MAX_NICE);
3246 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
3247
3248 // Each pass through this loop does one read-exit episode.
3249 do {
3250 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
3251 for (i = 0; i < read_exit_burst; i++) {
3252 if (READ_ONCE(read_exit_child_stop))
3253 break;
3254 stutter_wait("rcu_torture_read_exit");
3255 // Spawn child.
3256 tsp = kthread_run(rcu_torture_read_exit_child,
3257 &trs, "%s", "rcu_torture_read_exit_child");
3258 if (IS_ERR(tsp)) {
3259 TOROUT_ERRSTRING("out of memory");
3260 errexit = true;
3261 break;
3262 }
3263 cond_resched();
3264 kthread_stop(tsp);
3265 n_read_exits++;
3266 }
3267 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
3268 rcu_barrier(); // Wait for task_struct free, avoid OOM.
3269 i = 0;
3270 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
3271 schedule_timeout_uninterruptible(HZ);
3272 } while (!errexit && !READ_ONCE(read_exit_child_stop));
3273
3274 // Clean up and exit.
3275 smp_store_release(&read_exit_child_stopped, true); // After reaping.
3276 smp_mb(); // Store before wakeup.
3277 wake_up(&read_exit_wq);
3278 while (!torture_must_stop())
3279 schedule_timeout_uninterruptible(HZ / 20);
3280 torture_kthread_stopping("rcu_torture_read_exit");
3281 return 0;
3282}
3283
3284static int rcu_torture_read_exit_init(void)
3285{
3286 if (read_exit_burst <= 0)
3287 return 0;
3288 init_waitqueue_head(&read_exit_wq);
3289 read_exit_child_stop = false;
3290 read_exit_child_stopped = false;
3291 return torture_create_kthread(rcu_torture_read_exit, NULL,
3292 read_exit_task);
3293}
3294
3295static void rcu_torture_read_exit_cleanup(void)
3296{
3297 if (!read_exit_task)
3298 return;
3299 WRITE_ONCE(read_exit_child_stop, true);
3300 smp_mb(); // Above write before wait.
3301 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
3302 torture_stop_kthread(rcutorture_read_exit, read_exit_task);
3303}
3304
3305static void rcutorture_test_nmis(int n)
3306{
3307#if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3308 int cpu;
3309 int dumpcpu;
3310 int i;
3311
3312 for (i = 0; i < n; i++) {
3313 preempt_disable();
3314 cpu = smp_processor_id();
3315 dumpcpu = cpu + 1;
3316 if (dumpcpu >= nr_cpu_ids)
3317 dumpcpu = 0;
3318 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu);
3319 dump_cpu_task(dumpcpu);
3320 preempt_enable();
3321 schedule_timeout_uninterruptible(15 * HZ);
3322 }
3323#else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3324 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis);
3325#endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3326}
3327
3328static enum cpuhp_state rcutor_hp;
3329
3330static void
3331rcu_torture_cleanup(void)
3332{
3333 int firsttime;
3334 int flags = 0;
3335 unsigned long gp_seq = 0;
3336 int i;
3337
3338 if (torture_cleanup_begin()) {
3339 if (cur_ops->cb_barrier != NULL) {
3340 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3341 cur_ops->cb_barrier();
3342 }
3343 rcu_gp_slow_unregister(NULL);
3344 return;
3345 }
3346 if (!cur_ops) {
3347 torture_cleanup_end();
3348 rcu_gp_slow_unregister(NULL);
3349 return;
3350 }
3351
3352 rcutorture_test_nmis(test_nmis);
3353
3354 if (cur_ops->gp_kthread_dbg)
3355 cur_ops->gp_kthread_dbg();
3356 rcu_torture_read_exit_cleanup();
3357 rcu_torture_barrier_cleanup();
3358 rcu_torture_fwd_prog_cleanup();
3359 torture_stop_kthread(rcu_torture_stall, stall_task);
3360 torture_stop_kthread(rcu_torture_writer, writer_task);
3361
3362 if (nocb_tasks) {
3363 for (i = 0; i < nrealnocbers; i++)
3364 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
3365 kfree(nocb_tasks);
3366 nocb_tasks = NULL;
3367 }
3368
3369 if (reader_tasks) {
3370 for (i = 0; i < nrealreaders; i++)
3371 torture_stop_kthread(rcu_torture_reader,
3372 reader_tasks[i]);
3373 kfree(reader_tasks);
3374 reader_tasks = NULL;
3375 }
3376 kfree(rcu_torture_reader_mbchk);
3377 rcu_torture_reader_mbchk = NULL;
3378
3379 if (fakewriter_tasks) {
3380 for (i = 0; i < nfakewriters; i++)
3381 torture_stop_kthread(rcu_torture_fakewriter,
3382 fakewriter_tasks[i]);
3383 kfree(fakewriter_tasks);
3384 fakewriter_tasks = NULL;
3385 }
3386
3387 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3388 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3389 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
3390 cur_ops->name, (long)gp_seq, flags,
3391 rcutorture_seq_diff(gp_seq, start_gp_seq));
3392 torture_stop_kthread(rcu_torture_stats, stats_task);
3393 torture_stop_kthread(rcu_torture_fqs, fqs_task);
3394 if (rcu_torture_can_boost() && rcutor_hp >= 0)
3395 cpuhp_remove_state(rcutor_hp);
3396
3397 /*
3398 * Wait for all RCU callbacks to fire, then do torture-type-specific
3399 * cleanup operations.
3400 */
3401 if (cur_ops->cb_barrier != NULL) {
3402 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3403 cur_ops->cb_barrier();
3404 }
3405 if (cur_ops->cleanup != NULL)
3406 cur_ops->cleanup();
3407
3408 rcu_torture_mem_dump_obj();
3409
3410 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
3411
3412 if (err_segs_recorded) {
3413 pr_alert("Failure/close-call rcutorture reader segments:\n");
3414 if (rt_read_nsegs == 0)
3415 pr_alert("\t: No segments recorded!!!\n");
3416 firsttime = 1;
3417 for (i = 0; i < rt_read_nsegs; i++) {
3418 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
3419 if (err_segs[i].rt_delay_jiffies != 0) {
3420 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
3421 err_segs[i].rt_delay_jiffies);
3422 firsttime = 0;
3423 }
3424 if (err_segs[i].rt_delay_ms != 0) {
3425 pr_cont("%s%ldms", firsttime ? "" : "+",
3426 err_segs[i].rt_delay_ms);
3427 firsttime = 0;
3428 }
3429 if (err_segs[i].rt_delay_us != 0) {
3430 pr_cont("%s%ldus", firsttime ? "" : "+",
3431 err_segs[i].rt_delay_us);
3432 firsttime = 0;
3433 }
3434 pr_cont("%s\n",
3435 err_segs[i].rt_preempted ? "preempted" : "");
3436
3437 }
3438 }
3439 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3440 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3441 else if (torture_onoff_failures())
3442 rcu_torture_print_module_parms(cur_ops,
3443 "End of test: RCU_HOTPLUG");
3444 else
3445 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3446 torture_cleanup_end();
3447 rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
3448}
3449
3450#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3451static void rcu_torture_leak_cb(struct rcu_head *rhp)
3452{
3453}
3454
3455static void rcu_torture_err_cb(struct rcu_head *rhp)
3456{
3457 /*
3458 * This -might- happen due to race conditions, but is unlikely.
3459 * The scenario that leads to this happening is that the
3460 * first of the pair of duplicate callbacks is queued,
3461 * someone else starts a grace period that includes that
3462 * callback, then the second of the pair must wait for the
3463 * next grace period. Unlikely, but can happen. If it
3464 * does happen, the debug-objects subsystem won't have splatted.
3465 */
3466 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3467}
3468#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3469
3470/*
3471 * Verify that double-free causes debug-objects to complain, but only
3472 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
3473 * cannot be carried out.
3474 */
3475static void rcu_test_debug_objects(void)
3476{
3477#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3478 struct rcu_head rh1;
3479 struct rcu_head rh2;
3480 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3481
3482 init_rcu_head_on_stack(&rh1);
3483 init_rcu_head_on_stack(&rh2);
3484 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
3485
3486 /* Try to queue the rh2 pair of callbacks for the same grace period. */
3487 preempt_disable(); /* Prevent preemption from interrupting test. */
3488 rcu_read_lock(); /* Make it impossible to finish a grace period. */
3489 call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3490 local_irq_disable(); /* Make it harder to start a new grace period. */
3491 call_rcu_hurry(&rh2, rcu_torture_leak_cb);
3492 call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
3493 if (rhp) {
3494 call_rcu_hurry(rhp, rcu_torture_leak_cb);
3495 call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3496 }
3497 local_irq_enable();
3498 rcu_read_unlock();
3499 preempt_enable();
3500
3501 /* Wait for them all to get done so we can safely return. */
3502 rcu_barrier();
3503 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
3504 destroy_rcu_head_on_stack(&rh1);
3505 destroy_rcu_head_on_stack(&rh2);
3506 kfree(rhp);
3507#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3508 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
3509#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3510}
3511
3512static void rcutorture_sync(void)
3513{
3514 static unsigned long n;
3515
3516 if (cur_ops->sync && !(++n & 0xfff))
3517 cur_ops->sync();
3518}
3519
3520static DEFINE_MUTEX(mut0);
3521static DEFINE_MUTEX(mut1);
3522static DEFINE_MUTEX(mut2);
3523static DEFINE_MUTEX(mut3);
3524static DEFINE_MUTEX(mut4);
3525static DEFINE_MUTEX(mut5);
3526static DEFINE_MUTEX(mut6);
3527static DEFINE_MUTEX(mut7);
3528static DEFINE_MUTEX(mut8);
3529static DEFINE_MUTEX(mut9);
3530
3531static DECLARE_RWSEM(rwsem0);
3532static DECLARE_RWSEM(rwsem1);
3533static DECLARE_RWSEM(rwsem2);
3534static DECLARE_RWSEM(rwsem3);
3535static DECLARE_RWSEM(rwsem4);
3536static DECLARE_RWSEM(rwsem5);
3537static DECLARE_RWSEM(rwsem6);
3538static DECLARE_RWSEM(rwsem7);
3539static DECLARE_RWSEM(rwsem8);
3540static DECLARE_RWSEM(rwsem9);
3541
3542DEFINE_STATIC_SRCU(srcu0);
3543DEFINE_STATIC_SRCU(srcu1);
3544DEFINE_STATIC_SRCU(srcu2);
3545DEFINE_STATIC_SRCU(srcu3);
3546DEFINE_STATIC_SRCU(srcu4);
3547DEFINE_STATIC_SRCU(srcu5);
3548DEFINE_STATIC_SRCU(srcu6);
3549DEFINE_STATIC_SRCU(srcu7);
3550DEFINE_STATIC_SRCU(srcu8);
3551DEFINE_STATIC_SRCU(srcu9);
3552
3553static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i,
3554 int cyclelen, int deadlock)
3555{
3556 int j = i + 1;
3557
3558 if (j >= cyclelen)
3559 j = deadlock ? 0 : -1;
3560 if (j >= 0)
3561 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i);
3562 else
3563 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i);
3564 return j;
3565}
3566
3567// Test lockdep on SRCU-based deadlock scenarios.
3568static void rcu_torture_init_srcu_lockdep(void)
3569{
3570 int cyclelen;
3571 int deadlock;
3572 bool err = false;
3573 int i;
3574 int j;
3575 int idx;
3576 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4,
3577 &mut5, &mut6, &mut7, &mut8, &mut9 };
3578 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4,
3579 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 };
3580 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4,
3581 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 };
3582 int testtype;
3583
3584 if (!test_srcu_lockdep)
3585 return;
3586
3587 deadlock = test_srcu_lockdep / 1000;
3588 testtype = (test_srcu_lockdep / 10) % 100;
3589 cyclelen = test_srcu_lockdep % 10;
3590 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus));
3591 if (WARN_ONCE(deadlock != !!deadlock,
3592 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n",
3593 __func__, test_srcu_lockdep, deadlock))
3594 err = true;
3595 if (WARN_ONCE(cyclelen <= 0,
3596 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n",
3597 __func__, test_srcu_lockdep, cyclelen))
3598 err = true;
3599 if (err)
3600 goto err_out;
3601
3602 if (testtype == 0) {
3603 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n",
3604 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3605 if (deadlock && cyclelen == 1)
3606 pr_info("%s: Expect hang.\n", __func__);
3607 for (i = 0; i < cyclelen; i++) {
3608 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu",
3609 "srcu_read_unlock", i, cyclelen, deadlock);
3610 idx = srcu_read_lock(srcus[i]);
3611 if (j >= 0)
3612 synchronize_srcu(srcus[j]);
3613 srcu_read_unlock(srcus[i], idx);
3614 }
3615 return;
3616 }
3617
3618 if (testtype == 1) {
3619 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n",
3620 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3621 for (i = 0; i < cyclelen; i++) {
3622 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n",
3623 __func__, i, i, i, i);
3624 idx = srcu_read_lock(srcus[i]);
3625 mutex_lock(muts[i]);
3626 mutex_unlock(muts[i]);
3627 srcu_read_unlock(srcus[i], idx);
3628
3629 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu",
3630 "mutex_unlock", i, cyclelen, deadlock);
3631 mutex_lock(muts[i]);
3632 if (j >= 0)
3633 synchronize_srcu(srcus[j]);
3634 mutex_unlock(muts[i]);
3635 }
3636 return;
3637 }
3638
3639 if (testtype == 2) {
3640 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n",
3641 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3642 for (i = 0; i < cyclelen; i++) {
3643 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n",
3644 __func__, i, i, i, i);
3645 idx = srcu_read_lock(srcus[i]);
3646 down_read(rwsems[i]);
3647 up_read(rwsems[i]);
3648 srcu_read_unlock(srcus[i], idx);
3649
3650 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu",
3651 "up_write", i, cyclelen, deadlock);
3652 down_write(rwsems[i]);
3653 if (j >= 0)
3654 synchronize_srcu(srcus[j]);
3655 up_write(rwsems[i]);
3656 }
3657 return;
3658 }
3659
3660#ifdef CONFIG_TASKS_TRACE_RCU
3661 if (testtype == 3) {
3662 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n",
3663 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3664 if (deadlock && cyclelen == 1)
3665 pr_info("%s: Expect hang.\n", __func__);
3666 for (i = 0; i < cyclelen; i++) {
3667 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock";
3668 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace"
3669 : "synchronize_srcu";
3670 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock";
3671
3672 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock);
3673 if (i == 0)
3674 rcu_read_lock_trace();
3675 else
3676 idx = srcu_read_lock(srcus[i]);
3677 if (j >= 0) {
3678 if (i == cyclelen - 1)
3679 synchronize_rcu_tasks_trace();
3680 else
3681 synchronize_srcu(srcus[j]);
3682 }
3683 if (i == 0)
3684 rcu_read_unlock_trace();
3685 else
3686 srcu_read_unlock(srcus[i], idx);
3687 }
3688 return;
3689 }
3690#endif // #ifdef CONFIG_TASKS_TRACE_RCU
3691
3692err_out:
3693 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep);
3694 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__);
3695 pr_info("%s: D: Deadlock if nonzero.\n", __func__);
3696 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__);
3697 pr_info("%s: L: Cycle length.\n", __func__);
3698 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU))
3699 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__);
3700}
3701
3702static int __init
3703rcu_torture_init(void)
3704{
3705 long i;
3706 int cpu;
3707 int firsterr = 0;
3708 int flags = 0;
3709 unsigned long gp_seq = 0;
3710 static struct rcu_torture_ops *torture_ops[] = {
3711 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
3712 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
3713 &trivial_ops,
3714 };
3715
3716 if (!torture_init_begin(torture_type, verbose))
3717 return -EBUSY;
3718
3719 /* Process args and tell the world that the torturer is on the job. */
3720 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3721 cur_ops = torture_ops[i];
3722 if (strcmp(torture_type, cur_ops->name) == 0)
3723 break;
3724 }
3725 if (i == ARRAY_SIZE(torture_ops)) {
3726 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3727 torture_type);
3728 pr_alert("rcu-torture types:");
3729 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3730 pr_cont(" %s", torture_ops[i]->name);
3731 pr_cont("\n");
3732 firsterr = -EINVAL;
3733 cur_ops = NULL;
3734 goto unwind;
3735 }
3736 if (cur_ops->fqs == NULL && fqs_duration != 0) {
3737 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3738 fqs_duration = 0;
3739 }
3740 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops ||
3741 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
3742 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n",
3743 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU));
3744 nocbs_nthreads = 0;
3745 }
3746 if (cur_ops->init)
3747 cur_ops->init();
3748
3749 rcu_torture_init_srcu_lockdep();
3750
3751 if (nreaders >= 0) {
3752 nrealreaders = nreaders;
3753 } else {
3754 nrealreaders = num_online_cpus() - 2 - nreaders;
3755 if (nrealreaders <= 0)
3756 nrealreaders = 1;
3757 }
3758 rcu_torture_print_module_parms(cur_ops, "Start of test");
3759 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3760 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3761 start_gp_seq = gp_seq;
3762 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
3763 cur_ops->name, (long)gp_seq, flags);
3764
3765 /* Set up the freelist. */
3766
3767 INIT_LIST_HEAD(&rcu_torture_freelist);
3768 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3769 rcu_tortures[i].rtort_mbtest = 0;
3770 list_add_tail(&rcu_tortures[i].rtort_free,
3771 &rcu_torture_freelist);
3772 }
3773
3774 /* Initialize the statistics so that each run gets its own numbers. */
3775
3776 rcu_torture_current = NULL;
3777 rcu_torture_current_version = 0;
3778 atomic_set(&n_rcu_torture_alloc, 0);
3779 atomic_set(&n_rcu_torture_alloc_fail, 0);
3780 atomic_set(&n_rcu_torture_free, 0);
3781 atomic_set(&n_rcu_torture_mberror, 0);
3782 atomic_set(&n_rcu_torture_mbchk_fail, 0);
3783 atomic_set(&n_rcu_torture_mbchk_tries, 0);
3784 atomic_set(&n_rcu_torture_error, 0);
3785 n_rcu_torture_barrier_error = 0;
3786 n_rcu_torture_boost_ktrerror = 0;
3787 n_rcu_torture_boost_failure = 0;
3788 n_rcu_torture_boosts = 0;
3789 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3790 atomic_set(&rcu_torture_wcount[i], 0);
3791 for_each_possible_cpu(cpu) {
3792 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3793 per_cpu(rcu_torture_count, cpu)[i] = 0;
3794 per_cpu(rcu_torture_batch, cpu)[i] = 0;
3795 }
3796 }
3797 err_segs_recorded = 0;
3798 rt_read_nsegs = 0;
3799
3800 /* Start up the kthreads. */
3801
3802 rcu_torture_write_types();
3803 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3804 writer_task);
3805 if (torture_init_error(firsterr))
3806 goto unwind;
3807 if (nfakewriters > 0) {
3808 fakewriter_tasks = kcalloc(nfakewriters,
3809 sizeof(fakewriter_tasks[0]),
3810 GFP_KERNEL);
3811 if (fakewriter_tasks == NULL) {
3812 TOROUT_ERRSTRING("out of memory");
3813 firsterr = -ENOMEM;
3814 goto unwind;
3815 }
3816 }
3817 for (i = 0; i < nfakewriters; i++) {
3818 firsterr = torture_create_kthread(rcu_torture_fakewriter,
3819 NULL, fakewriter_tasks[i]);
3820 if (torture_init_error(firsterr))
3821 goto unwind;
3822 }
3823 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3824 GFP_KERNEL);
3825 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3826 GFP_KERNEL);
3827 if (!reader_tasks || !rcu_torture_reader_mbchk) {
3828 TOROUT_ERRSTRING("out of memory");
3829 firsterr = -ENOMEM;
3830 goto unwind;
3831 }
3832 for (i = 0; i < nrealreaders; i++) {
3833 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3834 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3835 reader_tasks[i]);
3836 if (torture_init_error(firsterr))
3837 goto unwind;
3838 }
3839 nrealnocbers = nocbs_nthreads;
3840 if (WARN_ON(nrealnocbers < 0))
3841 nrealnocbers = 1;
3842 if (WARN_ON(nocbs_toggle < 0))
3843 nocbs_toggle = HZ;
3844 if (nrealnocbers > 0) {
3845 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3846 if (nocb_tasks == NULL) {
3847 TOROUT_ERRSTRING("out of memory");
3848 firsterr = -ENOMEM;
3849 goto unwind;
3850 }
3851 } else {
3852 nocb_tasks = NULL;
3853 }
3854 for (i = 0; i < nrealnocbers; i++) {
3855 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3856 if (torture_init_error(firsterr))
3857 goto unwind;
3858 }
3859 if (stat_interval > 0) {
3860 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3861 stats_task);
3862 if (torture_init_error(firsterr))
3863 goto unwind;
3864 }
3865 if (test_no_idle_hz && shuffle_interval > 0) {
3866 firsterr = torture_shuffle_init(shuffle_interval * HZ);
3867 if (torture_init_error(firsterr))
3868 goto unwind;
3869 }
3870 if (stutter < 0)
3871 stutter = 0;
3872 if (stutter) {
3873 int t;
3874
3875 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3876 firsterr = torture_stutter_init(stutter * HZ, t);
3877 if (torture_init_error(firsterr))
3878 goto unwind;
3879 }
3880 if (fqs_duration < 0)
3881 fqs_duration = 0;
3882 if (fqs_holdoff < 0)
3883 fqs_holdoff = 0;
3884 if (fqs_duration && fqs_holdoff) {
3885 /* Create the fqs thread */
3886 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3887 fqs_task);
3888 if (torture_init_error(firsterr))
3889 goto unwind;
3890 }
3891 if (test_boost_interval < 1)
3892 test_boost_interval = 1;
3893 if (test_boost_duration < 2)
3894 test_boost_duration = 2;
3895 if (rcu_torture_can_boost()) {
3896
3897 boost_starttime = jiffies + test_boost_interval * HZ;
3898
3899 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3900 rcutorture_booster_init,
3901 rcutorture_booster_cleanup);
3902 rcutor_hp = firsterr;
3903 if (torture_init_error(firsterr))
3904 goto unwind;
3905 }
3906 shutdown_jiffies = jiffies + shutdown_secs * HZ;
3907 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3908 if (torture_init_error(firsterr))
3909 goto unwind;
3910 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3911 rcutorture_sync);
3912 if (torture_init_error(firsterr))
3913 goto unwind;
3914 firsterr = rcu_torture_stall_init();
3915 if (torture_init_error(firsterr))
3916 goto unwind;
3917 firsterr = rcu_torture_fwd_prog_init();
3918 if (torture_init_error(firsterr))
3919 goto unwind;
3920 firsterr = rcu_torture_barrier_init();
3921 if (torture_init_error(firsterr))
3922 goto unwind;
3923 firsterr = rcu_torture_read_exit_init();
3924 if (torture_init_error(firsterr))
3925 goto unwind;
3926 if (object_debug)
3927 rcu_test_debug_objects();
3928 torture_init_end();
3929 rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
3930 return 0;
3931
3932unwind:
3933 torture_init_end();
3934 rcu_torture_cleanup();
3935 if (shutdown_secs) {
3936 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3937 kernel_power_off();
3938 }
3939 return firsterr;
3940}
3941
3942module_init(rcu_torture_init);
3943module_exit(rcu_torture_cleanup);
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Read-Copy Update module-based torture test facility
4 *
5 * Copyright (C) IBM Corporation, 2005, 2006
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
9 *
10 * See also: Documentation/RCU/torture.rst
11 */
12
13#define pr_fmt(fmt) fmt
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/kthread.h>
20#include <linux/err.h>
21#include <linux/spinlock.h>
22#include <linux/smp.h>
23#include <linux/rcupdate_wait.h>
24#include <linux/rcu_notifier.h>
25#include <linux/interrupt.h>
26#include <linux/sched/signal.h>
27#include <uapi/linux/sched/types.h>
28#include <linux/atomic.h>
29#include <linux/bitops.h>
30#include <linux/completion.h>
31#include <linux/moduleparam.h>
32#include <linux/percpu.h>
33#include <linux/notifier.h>
34#include <linux/reboot.h>
35#include <linux/freezer.h>
36#include <linux/cpu.h>
37#include <linux/delay.h>
38#include <linux/stat.h>
39#include <linux/srcu.h>
40#include <linux/slab.h>
41#include <linux/trace_clock.h>
42#include <asm/byteorder.h>
43#include <linux/torture.h>
44#include <linux/vmalloc.h>
45#include <linux/sched/debug.h>
46#include <linux/sched/sysctl.h>
47#include <linux/oom.h>
48#include <linux/tick.h>
49#include <linux/rcupdate_trace.h>
50#include <linux/nmi.h>
51
52#include "rcu.h"
53
54MODULE_LICENSE("GPL");
55MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
56
57/* Bits for ->extendables field, extendables param, and related definitions. */
58#define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */
59#define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1)
60#define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */
61#define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2)
62#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
63#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
64#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
65#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
66#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
67#define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */
68#define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */
69#define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */
70#define RCUTORTURE_MAX_EXTEND \
71 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
72 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
73#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
74 /* Must be power of two minus one. */
75#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
76
77torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
78 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
79torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
80torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
81torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
82torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
83torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
84torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
85torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
86torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
87torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
88torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
89torture_param(bool, gp_cond_exp_full, false,
90 "Use conditional/async full-stateexpedited GP wait primitives");
91torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
92torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
93torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
94torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
95torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
96torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
97torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
98torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
99torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
100torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
101torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
102torture_param(int, nreaders, -1, "Number of RCU reader threads");
103torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
104torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
105torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
106torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
107torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
108torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
109torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
110torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
111torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
112torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
113torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
114torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
115torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
116torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
117torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
118torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
119torture_param(int, stutter, 5, "Number of seconds to run/halt test");
120torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
121torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
122torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
123torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
124torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
125torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario.");
126torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
127
128static char *torture_type = "rcu";
129module_param(torture_type, charp, 0444);
130MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
131
132static int nrealnocbers;
133static int nrealreaders;
134static struct task_struct *writer_task;
135static struct task_struct **fakewriter_tasks;
136static struct task_struct **reader_tasks;
137static struct task_struct **nocb_tasks;
138static struct task_struct *stats_task;
139static struct task_struct *fqs_task;
140static struct task_struct *boost_tasks[NR_CPUS];
141static struct task_struct *stall_task;
142static struct task_struct **fwd_prog_tasks;
143static struct task_struct **barrier_cbs_tasks;
144static struct task_struct *barrier_task;
145static struct task_struct *read_exit_task;
146
147#define RCU_TORTURE_PIPE_LEN 10
148
149// Mailbox-like structure to check RCU global memory ordering.
150struct rcu_torture_reader_check {
151 unsigned long rtc_myloops;
152 int rtc_chkrdr;
153 unsigned long rtc_chkloops;
154 int rtc_ready;
155 struct rcu_torture_reader_check *rtc_assigner;
156} ____cacheline_internodealigned_in_smp;
157
158// Update-side data structure used to check RCU readers.
159struct rcu_torture {
160 struct rcu_head rtort_rcu;
161 int rtort_pipe_count;
162 struct list_head rtort_free;
163 int rtort_mbtest;
164 struct rcu_torture_reader_check *rtort_chkp;
165};
166
167static LIST_HEAD(rcu_torture_freelist);
168static struct rcu_torture __rcu *rcu_torture_current;
169static unsigned long rcu_torture_current_version;
170static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
171static DEFINE_SPINLOCK(rcu_torture_lock);
172static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
173static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
174static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
175static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
176static atomic_t n_rcu_torture_alloc;
177static atomic_t n_rcu_torture_alloc_fail;
178static atomic_t n_rcu_torture_free;
179static atomic_t n_rcu_torture_mberror;
180static atomic_t n_rcu_torture_mbchk_fail;
181static atomic_t n_rcu_torture_mbchk_tries;
182static atomic_t n_rcu_torture_error;
183static long n_rcu_torture_barrier_error;
184static long n_rcu_torture_boost_ktrerror;
185static long n_rcu_torture_boost_failure;
186static long n_rcu_torture_boosts;
187static atomic_long_t n_rcu_torture_timers;
188static long n_barrier_attempts;
189static long n_barrier_successes; /* did rcu_barrier test succeed? */
190static unsigned long n_read_exits;
191static struct list_head rcu_torture_removed;
192static unsigned long shutdown_jiffies;
193static unsigned long start_gp_seq;
194static atomic_long_t n_nocb_offload;
195static atomic_long_t n_nocb_deoffload;
196
197static int rcu_torture_writer_state;
198#define RTWS_FIXED_DELAY 0
199#define RTWS_DELAY 1
200#define RTWS_REPLACE 2
201#define RTWS_DEF_FREE 3
202#define RTWS_EXP_SYNC 4
203#define RTWS_COND_GET 5
204#define RTWS_COND_GET_FULL 6
205#define RTWS_COND_GET_EXP 7
206#define RTWS_COND_GET_EXP_FULL 8
207#define RTWS_COND_SYNC 9
208#define RTWS_COND_SYNC_FULL 10
209#define RTWS_COND_SYNC_EXP 11
210#define RTWS_COND_SYNC_EXP_FULL 12
211#define RTWS_POLL_GET 13
212#define RTWS_POLL_GET_FULL 14
213#define RTWS_POLL_GET_EXP 15
214#define RTWS_POLL_GET_EXP_FULL 16
215#define RTWS_POLL_WAIT 17
216#define RTWS_POLL_WAIT_FULL 18
217#define RTWS_POLL_WAIT_EXP 19
218#define RTWS_POLL_WAIT_EXP_FULL 20
219#define RTWS_SYNC 21
220#define RTWS_STUTTER 22
221#define RTWS_STOPPING 23
222static const char * const rcu_torture_writer_state_names[] = {
223 "RTWS_FIXED_DELAY",
224 "RTWS_DELAY",
225 "RTWS_REPLACE",
226 "RTWS_DEF_FREE",
227 "RTWS_EXP_SYNC",
228 "RTWS_COND_GET",
229 "RTWS_COND_GET_FULL",
230 "RTWS_COND_GET_EXP",
231 "RTWS_COND_GET_EXP_FULL",
232 "RTWS_COND_SYNC",
233 "RTWS_COND_SYNC_FULL",
234 "RTWS_COND_SYNC_EXP",
235 "RTWS_COND_SYNC_EXP_FULL",
236 "RTWS_POLL_GET",
237 "RTWS_POLL_GET_FULL",
238 "RTWS_POLL_GET_EXP",
239 "RTWS_POLL_GET_EXP_FULL",
240 "RTWS_POLL_WAIT",
241 "RTWS_POLL_WAIT_FULL",
242 "RTWS_POLL_WAIT_EXP",
243 "RTWS_POLL_WAIT_EXP_FULL",
244 "RTWS_SYNC",
245 "RTWS_STUTTER",
246 "RTWS_STOPPING",
247};
248
249/* Record reader segment types and duration for first failing read. */
250struct rt_read_seg {
251 int rt_readstate;
252 unsigned long rt_delay_jiffies;
253 unsigned long rt_delay_ms;
254 unsigned long rt_delay_us;
255 bool rt_preempted;
256};
257static int err_segs_recorded;
258static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
259static int rt_read_nsegs;
260
261static const char *rcu_torture_writer_state_getname(void)
262{
263 unsigned int i = READ_ONCE(rcu_torture_writer_state);
264
265 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
266 return "???";
267 return rcu_torture_writer_state_names[i];
268}
269
270#ifdef CONFIG_RCU_TRACE
271static u64 notrace rcu_trace_clock_local(void)
272{
273 u64 ts = trace_clock_local();
274
275 (void)do_div(ts, NSEC_PER_USEC);
276 return ts;
277}
278#else /* #ifdef CONFIG_RCU_TRACE */
279static u64 notrace rcu_trace_clock_local(void)
280{
281 return 0ULL;
282}
283#endif /* #else #ifdef CONFIG_RCU_TRACE */
284
285/*
286 * Stop aggressive CPU-hog tests a bit before the end of the test in order
287 * to avoid interfering with test shutdown.
288 */
289static bool shutdown_time_arrived(void)
290{
291 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
292}
293
294static unsigned long boost_starttime; /* jiffies of next boost test start. */
295static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
296 /* and boost task create/destroy. */
297static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
298static bool barrier_phase; /* Test phase. */
299static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
300static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
301static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
302
303static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
304
305/*
306 * Allocate an element from the rcu_tortures pool.
307 */
308static struct rcu_torture *
309rcu_torture_alloc(void)
310{
311 struct list_head *p;
312
313 spin_lock_bh(&rcu_torture_lock);
314 if (list_empty(&rcu_torture_freelist)) {
315 atomic_inc(&n_rcu_torture_alloc_fail);
316 spin_unlock_bh(&rcu_torture_lock);
317 return NULL;
318 }
319 atomic_inc(&n_rcu_torture_alloc);
320 p = rcu_torture_freelist.next;
321 list_del_init(p);
322 spin_unlock_bh(&rcu_torture_lock);
323 return container_of(p, struct rcu_torture, rtort_free);
324}
325
326/*
327 * Free an element to the rcu_tortures pool.
328 */
329static void
330rcu_torture_free(struct rcu_torture *p)
331{
332 atomic_inc(&n_rcu_torture_free);
333 spin_lock_bh(&rcu_torture_lock);
334 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
335 spin_unlock_bh(&rcu_torture_lock);
336}
337
338/*
339 * Operations vector for selecting different types of tests.
340 */
341
342struct rcu_torture_ops {
343 int ttype;
344 void (*init)(void);
345 void (*cleanup)(void);
346 int (*readlock)(void);
347 void (*read_delay)(struct torture_random_state *rrsp,
348 struct rt_read_seg *rtrsp);
349 void (*readunlock)(int idx);
350 int (*readlock_held)(void);
351 unsigned long (*get_gp_seq)(void);
352 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
353 void (*deferred_free)(struct rcu_torture *p);
354 void (*sync)(void);
355 void (*exp_sync)(void);
356 unsigned long (*get_gp_state_exp)(void);
357 unsigned long (*start_gp_poll_exp)(void);
358 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
359 bool (*poll_gp_state_exp)(unsigned long oldstate);
360 void (*cond_sync_exp)(unsigned long oldstate);
361 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
362 unsigned long (*get_comp_state)(void);
363 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp);
364 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2);
365 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
366 unsigned long (*get_gp_state)(void);
367 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
368 unsigned long (*get_gp_completed)(void);
369 void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp);
370 unsigned long (*start_gp_poll)(void);
371 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
372 bool (*poll_gp_state)(unsigned long oldstate);
373 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
374 bool (*poll_need_2gp)(bool poll, bool poll_full);
375 void (*cond_sync)(unsigned long oldstate);
376 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
377 call_rcu_func_t call;
378 void (*cb_barrier)(void);
379 void (*fqs)(void);
380 void (*stats)(void);
381 void (*gp_kthread_dbg)(void);
382 bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
383 int (*stall_dur)(void);
384 long cbflood_max;
385 int irq_capable;
386 int can_boost;
387 int extendables;
388 int slow_gps;
389 int no_pi_lock;
390 const char *name;
391};
392
393static struct rcu_torture_ops *cur_ops;
394
395/*
396 * Definitions for rcu torture testing.
397 */
398
399static int torture_readlock_not_held(void)
400{
401 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
402}
403
404static int rcu_torture_read_lock(void)
405{
406 rcu_read_lock();
407 return 0;
408}
409
410static void
411rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
412{
413 unsigned long started;
414 unsigned long completed;
415 const unsigned long shortdelay_us = 200;
416 unsigned long longdelay_ms = 300;
417 unsigned long long ts;
418
419 /* We want a short delay sometimes to make a reader delay the grace
420 * period, and we want a long delay occasionally to trigger
421 * force_quiescent_state. */
422
423 if (!atomic_read(&rcu_fwd_cb_nodelay) &&
424 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
425 started = cur_ops->get_gp_seq();
426 ts = rcu_trace_clock_local();
427 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
428 longdelay_ms = 5; /* Avoid triggering BH limits. */
429 mdelay(longdelay_ms);
430 rtrsp->rt_delay_ms = longdelay_ms;
431 completed = cur_ops->get_gp_seq();
432 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
433 started, completed);
434 }
435 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
436 udelay(shortdelay_us);
437 rtrsp->rt_delay_us = shortdelay_us;
438 }
439 if (!preempt_count() &&
440 !(torture_random(rrsp) % (nrealreaders * 500))) {
441 torture_preempt_schedule(); /* QS only if preemptible. */
442 rtrsp->rt_preempted = true;
443 }
444}
445
446static void rcu_torture_read_unlock(int idx)
447{
448 rcu_read_unlock();
449}
450
451/*
452 * Update callback in the pipe. This should be invoked after a grace period.
453 */
454static bool
455rcu_torture_pipe_update_one(struct rcu_torture *rp)
456{
457 int i;
458 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
459
460 if (rtrcp) {
461 WRITE_ONCE(rp->rtort_chkp, NULL);
462 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
463 }
464 i = READ_ONCE(rp->rtort_pipe_count);
465 if (i > RCU_TORTURE_PIPE_LEN)
466 i = RCU_TORTURE_PIPE_LEN;
467 atomic_inc(&rcu_torture_wcount[i]);
468 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
469 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
470 rp->rtort_mbtest = 0;
471 return true;
472 }
473 return false;
474}
475
476/*
477 * Update all callbacks in the pipe. Suitable for synchronous grace-period
478 * primitives.
479 */
480static void
481rcu_torture_pipe_update(struct rcu_torture *old_rp)
482{
483 struct rcu_torture *rp;
484 struct rcu_torture *rp1;
485
486 if (old_rp)
487 list_add(&old_rp->rtort_free, &rcu_torture_removed);
488 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
489 if (rcu_torture_pipe_update_one(rp)) {
490 list_del(&rp->rtort_free);
491 rcu_torture_free(rp);
492 }
493 }
494}
495
496static void
497rcu_torture_cb(struct rcu_head *p)
498{
499 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
500
501 if (torture_must_stop_irq()) {
502 /* Test is ending, just drop callbacks on the floor. */
503 /* The next initialization will pick up the pieces. */
504 return;
505 }
506 if (rcu_torture_pipe_update_one(rp))
507 rcu_torture_free(rp);
508 else
509 cur_ops->deferred_free(rp);
510}
511
512static unsigned long rcu_no_completed(void)
513{
514 return 0;
515}
516
517static void rcu_torture_deferred_free(struct rcu_torture *p)
518{
519 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
520}
521
522static void rcu_sync_torture_init(void)
523{
524 INIT_LIST_HEAD(&rcu_torture_removed);
525}
526
527static bool rcu_poll_need_2gp(bool poll, bool poll_full)
528{
529 return poll;
530}
531
532static struct rcu_torture_ops rcu_ops = {
533 .ttype = RCU_FLAVOR,
534 .init = rcu_sync_torture_init,
535 .readlock = rcu_torture_read_lock,
536 .read_delay = rcu_read_delay,
537 .readunlock = rcu_torture_read_unlock,
538 .readlock_held = torture_readlock_not_held,
539 .get_gp_seq = rcu_get_gp_seq,
540 .gp_diff = rcu_seq_diff,
541 .deferred_free = rcu_torture_deferred_free,
542 .sync = synchronize_rcu,
543 .exp_sync = synchronize_rcu_expedited,
544 .same_gp_state = same_state_synchronize_rcu,
545 .same_gp_state_full = same_state_synchronize_rcu_full,
546 .get_comp_state = get_completed_synchronize_rcu,
547 .get_comp_state_full = get_completed_synchronize_rcu_full,
548 .get_gp_state = get_state_synchronize_rcu,
549 .get_gp_state_full = get_state_synchronize_rcu_full,
550 .get_gp_completed = get_completed_synchronize_rcu,
551 .get_gp_completed_full = get_completed_synchronize_rcu_full,
552 .start_gp_poll = start_poll_synchronize_rcu,
553 .start_gp_poll_full = start_poll_synchronize_rcu_full,
554 .poll_gp_state = poll_state_synchronize_rcu,
555 .poll_gp_state_full = poll_state_synchronize_rcu_full,
556 .poll_need_2gp = rcu_poll_need_2gp,
557 .cond_sync = cond_synchronize_rcu,
558 .cond_sync_full = cond_synchronize_rcu_full,
559 .get_gp_state_exp = get_state_synchronize_rcu,
560 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
561 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
562 .poll_gp_state_exp = poll_state_synchronize_rcu,
563 .cond_sync_exp = cond_synchronize_rcu_expedited,
564 .call = call_rcu_hurry,
565 .cb_barrier = rcu_barrier,
566 .fqs = rcu_force_quiescent_state,
567 .stats = NULL,
568 .gp_kthread_dbg = show_rcu_gp_kthreads,
569 .check_boost_failed = rcu_check_boost_fail,
570 .stall_dur = rcu_jiffies_till_stall_check,
571 .irq_capable = 1,
572 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
573 .extendables = RCUTORTURE_MAX_EXTEND,
574 .name = "rcu"
575};
576
577/*
578 * Don't even think about trying any of these in real life!!!
579 * The names includes "busted", and they really means it!
580 * The only purpose of these functions is to provide a buggy RCU
581 * implementation to make sure that rcutorture correctly emits
582 * buggy-RCU error messages.
583 */
584static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
585{
586 /* This is a deliberate bug for testing purposes only! */
587 rcu_torture_cb(&p->rtort_rcu);
588}
589
590static void synchronize_rcu_busted(void)
591{
592 /* This is a deliberate bug for testing purposes only! */
593}
594
595static void
596call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
597{
598 /* This is a deliberate bug for testing purposes only! */
599 func(head);
600}
601
602static struct rcu_torture_ops rcu_busted_ops = {
603 .ttype = INVALID_RCU_FLAVOR,
604 .init = rcu_sync_torture_init,
605 .readlock = rcu_torture_read_lock,
606 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
607 .readunlock = rcu_torture_read_unlock,
608 .readlock_held = torture_readlock_not_held,
609 .get_gp_seq = rcu_no_completed,
610 .deferred_free = rcu_busted_torture_deferred_free,
611 .sync = synchronize_rcu_busted,
612 .exp_sync = synchronize_rcu_busted,
613 .call = call_rcu_busted,
614 .cb_barrier = NULL,
615 .fqs = NULL,
616 .stats = NULL,
617 .irq_capable = 1,
618 .name = "busted"
619};
620
621/*
622 * Definitions for srcu torture testing.
623 */
624
625DEFINE_STATIC_SRCU(srcu_ctl);
626static struct srcu_struct srcu_ctld;
627static struct srcu_struct *srcu_ctlp = &srcu_ctl;
628static struct rcu_torture_ops srcud_ops;
629
630static int srcu_torture_read_lock(void)
631{
632 if (cur_ops == &srcud_ops)
633 return srcu_read_lock_nmisafe(srcu_ctlp);
634 else
635 return srcu_read_lock(srcu_ctlp);
636}
637
638static void
639srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
640{
641 long delay;
642 const long uspertick = 1000000 / HZ;
643 const long longdelay = 10;
644
645 /* We want there to be long-running readers, but not all the time. */
646
647 delay = torture_random(rrsp) %
648 (nrealreaders * 2 * longdelay * uspertick);
649 if (!delay && in_task()) {
650 schedule_timeout_interruptible(longdelay);
651 rtrsp->rt_delay_jiffies = longdelay;
652 } else {
653 rcu_read_delay(rrsp, rtrsp);
654 }
655}
656
657static void srcu_torture_read_unlock(int idx)
658{
659 if (cur_ops == &srcud_ops)
660 srcu_read_unlock_nmisafe(srcu_ctlp, idx);
661 else
662 srcu_read_unlock(srcu_ctlp, idx);
663}
664
665static int torture_srcu_read_lock_held(void)
666{
667 return srcu_read_lock_held(srcu_ctlp);
668}
669
670static unsigned long srcu_torture_completed(void)
671{
672 return srcu_batches_completed(srcu_ctlp);
673}
674
675static void srcu_torture_deferred_free(struct rcu_torture *rp)
676{
677 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
678}
679
680static void srcu_torture_synchronize(void)
681{
682 synchronize_srcu(srcu_ctlp);
683}
684
685static unsigned long srcu_torture_get_gp_state(void)
686{
687 return get_state_synchronize_srcu(srcu_ctlp);
688}
689
690static unsigned long srcu_torture_start_gp_poll(void)
691{
692 return start_poll_synchronize_srcu(srcu_ctlp);
693}
694
695static bool srcu_torture_poll_gp_state(unsigned long oldstate)
696{
697 return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
698}
699
700static void srcu_torture_call(struct rcu_head *head,
701 rcu_callback_t func)
702{
703 call_srcu(srcu_ctlp, head, func);
704}
705
706static void srcu_torture_barrier(void)
707{
708 srcu_barrier(srcu_ctlp);
709}
710
711static void srcu_torture_stats(void)
712{
713 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
714}
715
716static void srcu_torture_synchronize_expedited(void)
717{
718 synchronize_srcu_expedited(srcu_ctlp);
719}
720
721static struct rcu_torture_ops srcu_ops = {
722 .ttype = SRCU_FLAVOR,
723 .init = rcu_sync_torture_init,
724 .readlock = srcu_torture_read_lock,
725 .read_delay = srcu_read_delay,
726 .readunlock = srcu_torture_read_unlock,
727 .readlock_held = torture_srcu_read_lock_held,
728 .get_gp_seq = srcu_torture_completed,
729 .deferred_free = srcu_torture_deferred_free,
730 .sync = srcu_torture_synchronize,
731 .exp_sync = srcu_torture_synchronize_expedited,
732 .get_gp_state = srcu_torture_get_gp_state,
733 .start_gp_poll = srcu_torture_start_gp_poll,
734 .poll_gp_state = srcu_torture_poll_gp_state,
735 .call = srcu_torture_call,
736 .cb_barrier = srcu_torture_barrier,
737 .stats = srcu_torture_stats,
738 .cbflood_max = 50000,
739 .irq_capable = 1,
740 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
741 .name = "srcu"
742};
743
744static void srcu_torture_init(void)
745{
746 rcu_sync_torture_init();
747 WARN_ON(init_srcu_struct(&srcu_ctld));
748 srcu_ctlp = &srcu_ctld;
749}
750
751static void srcu_torture_cleanup(void)
752{
753 cleanup_srcu_struct(&srcu_ctld);
754 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
755}
756
757/* As above, but dynamically allocated. */
758static struct rcu_torture_ops srcud_ops = {
759 .ttype = SRCU_FLAVOR,
760 .init = srcu_torture_init,
761 .cleanup = srcu_torture_cleanup,
762 .readlock = srcu_torture_read_lock,
763 .read_delay = srcu_read_delay,
764 .readunlock = srcu_torture_read_unlock,
765 .readlock_held = torture_srcu_read_lock_held,
766 .get_gp_seq = srcu_torture_completed,
767 .deferred_free = srcu_torture_deferred_free,
768 .sync = srcu_torture_synchronize,
769 .exp_sync = srcu_torture_synchronize_expedited,
770 .get_gp_state = srcu_torture_get_gp_state,
771 .start_gp_poll = srcu_torture_start_gp_poll,
772 .poll_gp_state = srcu_torture_poll_gp_state,
773 .call = srcu_torture_call,
774 .cb_barrier = srcu_torture_barrier,
775 .stats = srcu_torture_stats,
776 .cbflood_max = 50000,
777 .irq_capable = 1,
778 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
779 .name = "srcud"
780};
781
782/* As above, but broken due to inappropriate reader extension. */
783static struct rcu_torture_ops busted_srcud_ops = {
784 .ttype = SRCU_FLAVOR,
785 .init = srcu_torture_init,
786 .cleanup = srcu_torture_cleanup,
787 .readlock = srcu_torture_read_lock,
788 .read_delay = rcu_read_delay,
789 .readunlock = srcu_torture_read_unlock,
790 .readlock_held = torture_srcu_read_lock_held,
791 .get_gp_seq = srcu_torture_completed,
792 .deferred_free = srcu_torture_deferred_free,
793 .sync = srcu_torture_synchronize,
794 .exp_sync = srcu_torture_synchronize_expedited,
795 .call = srcu_torture_call,
796 .cb_barrier = srcu_torture_barrier,
797 .stats = srcu_torture_stats,
798 .irq_capable = 1,
799 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
800 .extendables = RCUTORTURE_MAX_EXTEND,
801 .name = "busted_srcud"
802};
803
804/*
805 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
806 * This implementation does not necessarily work well with CPU hotplug.
807 */
808
809static void synchronize_rcu_trivial(void)
810{
811 int cpu;
812
813 for_each_online_cpu(cpu) {
814 torture_sched_setaffinity(current->pid, cpumask_of(cpu));
815 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
816 }
817}
818
819static int rcu_torture_read_lock_trivial(void)
820{
821 preempt_disable();
822 return 0;
823}
824
825static void rcu_torture_read_unlock_trivial(int idx)
826{
827 preempt_enable();
828}
829
830static struct rcu_torture_ops trivial_ops = {
831 .ttype = RCU_TRIVIAL_FLAVOR,
832 .init = rcu_sync_torture_init,
833 .readlock = rcu_torture_read_lock_trivial,
834 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
835 .readunlock = rcu_torture_read_unlock_trivial,
836 .readlock_held = torture_readlock_not_held,
837 .get_gp_seq = rcu_no_completed,
838 .sync = synchronize_rcu_trivial,
839 .exp_sync = synchronize_rcu_trivial,
840 .fqs = NULL,
841 .stats = NULL,
842 .irq_capable = 1,
843 .name = "trivial"
844};
845
846#ifdef CONFIG_TASKS_RCU
847
848/*
849 * Definitions for RCU-tasks torture testing.
850 */
851
852static int tasks_torture_read_lock(void)
853{
854 return 0;
855}
856
857static void tasks_torture_read_unlock(int idx)
858{
859}
860
861static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
862{
863 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
864}
865
866static void synchronize_rcu_mult_test(void)
867{
868 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
869}
870
871static struct rcu_torture_ops tasks_ops = {
872 .ttype = RCU_TASKS_FLAVOR,
873 .init = rcu_sync_torture_init,
874 .readlock = tasks_torture_read_lock,
875 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
876 .readunlock = tasks_torture_read_unlock,
877 .get_gp_seq = rcu_no_completed,
878 .deferred_free = rcu_tasks_torture_deferred_free,
879 .sync = synchronize_rcu_tasks,
880 .exp_sync = synchronize_rcu_mult_test,
881 .call = call_rcu_tasks,
882 .cb_barrier = rcu_barrier_tasks,
883 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
884 .fqs = NULL,
885 .stats = NULL,
886 .irq_capable = 1,
887 .slow_gps = 1,
888 .name = "tasks"
889};
890
891#define TASKS_OPS &tasks_ops,
892
893#else // #ifdef CONFIG_TASKS_RCU
894
895#define TASKS_OPS
896
897#endif // #else #ifdef CONFIG_TASKS_RCU
898
899
900#ifdef CONFIG_TASKS_RUDE_RCU
901
902/*
903 * Definitions for rude RCU-tasks torture testing.
904 */
905
906static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
907{
908 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
909}
910
911static struct rcu_torture_ops tasks_rude_ops = {
912 .ttype = RCU_TASKS_RUDE_FLAVOR,
913 .init = rcu_sync_torture_init,
914 .readlock = rcu_torture_read_lock_trivial,
915 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
916 .readunlock = rcu_torture_read_unlock_trivial,
917 .get_gp_seq = rcu_no_completed,
918 .deferred_free = rcu_tasks_rude_torture_deferred_free,
919 .sync = synchronize_rcu_tasks_rude,
920 .exp_sync = synchronize_rcu_tasks_rude,
921 .call = call_rcu_tasks_rude,
922 .cb_barrier = rcu_barrier_tasks_rude,
923 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
924 .cbflood_max = 50000,
925 .fqs = NULL,
926 .stats = NULL,
927 .irq_capable = 1,
928 .name = "tasks-rude"
929};
930
931#define TASKS_RUDE_OPS &tasks_rude_ops,
932
933#else // #ifdef CONFIG_TASKS_RUDE_RCU
934
935#define TASKS_RUDE_OPS
936
937#endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
938
939
940#ifdef CONFIG_TASKS_TRACE_RCU
941
942/*
943 * Definitions for tracing RCU-tasks torture testing.
944 */
945
946static int tasks_tracing_torture_read_lock(void)
947{
948 rcu_read_lock_trace();
949 return 0;
950}
951
952static void tasks_tracing_torture_read_unlock(int idx)
953{
954 rcu_read_unlock_trace();
955}
956
957static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
958{
959 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
960}
961
962static struct rcu_torture_ops tasks_tracing_ops = {
963 .ttype = RCU_TASKS_TRACING_FLAVOR,
964 .init = rcu_sync_torture_init,
965 .readlock = tasks_tracing_torture_read_lock,
966 .read_delay = srcu_read_delay, /* just reuse srcu's version. */
967 .readunlock = tasks_tracing_torture_read_unlock,
968 .readlock_held = rcu_read_lock_trace_held,
969 .get_gp_seq = rcu_no_completed,
970 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
971 .sync = synchronize_rcu_tasks_trace,
972 .exp_sync = synchronize_rcu_tasks_trace,
973 .call = call_rcu_tasks_trace,
974 .cb_barrier = rcu_barrier_tasks_trace,
975 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
976 .cbflood_max = 50000,
977 .fqs = NULL,
978 .stats = NULL,
979 .irq_capable = 1,
980 .slow_gps = 1,
981 .name = "tasks-tracing"
982};
983
984#define TASKS_TRACING_OPS &tasks_tracing_ops,
985
986#else // #ifdef CONFIG_TASKS_TRACE_RCU
987
988#define TASKS_TRACING_OPS
989
990#endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
991
992
993static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
994{
995 if (!cur_ops->gp_diff)
996 return new - old;
997 return cur_ops->gp_diff(new, old);
998}
999
1000/*
1001 * RCU torture priority-boost testing. Runs one real-time thread per
1002 * CPU for moderate bursts, repeatedly starting grace periods and waiting
1003 * for them to complete. If a given grace period takes too long, we assume
1004 * that priority inversion has occurred.
1005 */
1006
1007static int old_rt_runtime = -1;
1008
1009static void rcu_torture_disable_rt_throttle(void)
1010{
1011 /*
1012 * Disable RT throttling so that rcutorture's boost threads don't get
1013 * throttled. Only possible if rcutorture is built-in otherwise the
1014 * user should manually do this by setting the sched_rt_period_us and
1015 * sched_rt_runtime sysctls.
1016 */
1017 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
1018 return;
1019
1020 old_rt_runtime = sysctl_sched_rt_runtime;
1021 sysctl_sched_rt_runtime = -1;
1022}
1023
1024static void rcu_torture_enable_rt_throttle(void)
1025{
1026 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
1027 return;
1028
1029 sysctl_sched_rt_runtime = old_rt_runtime;
1030 old_rt_runtime = -1;
1031}
1032
1033static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
1034{
1035 int cpu;
1036 static int dbg_done;
1037 unsigned long end = jiffies;
1038 bool gp_done;
1039 unsigned long j;
1040 static unsigned long last_persist;
1041 unsigned long lp;
1042 unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
1043
1044 if (end - *start > mininterval) {
1045 // Recheck after checking time to avoid false positives.
1046 smp_mb(); // Time check before grace-period check.
1047 if (cur_ops->poll_gp_state(gp_state))
1048 return false; // passed, though perhaps just barely
1049 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
1050 // At most one persisted message per boost test.
1051 j = jiffies;
1052 lp = READ_ONCE(last_persist);
1053 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
1054 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
1055 return false; // passed on a technicality
1056 }
1057 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
1058 n_rcu_torture_boost_failure++;
1059 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
1060 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1061 current->rt_priority, gp_state, end - *start);
1062 cur_ops->gp_kthread_dbg();
1063 // Recheck after print to flag grace period ending during splat.
1064 gp_done = cur_ops->poll_gp_state(gp_state);
1065 pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1066 gp_done ? "ended already" : "still pending");
1067
1068 }
1069
1070 return true; // failed
1071 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1072 *start = jiffies;
1073 }
1074
1075 return false; // passed
1076}
1077
1078static int rcu_torture_boost(void *arg)
1079{
1080 unsigned long endtime;
1081 unsigned long gp_state;
1082 unsigned long gp_state_time;
1083 unsigned long oldstarttime;
1084
1085 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1086
1087 /* Set real-time priority. */
1088 sched_set_fifo_low(current);
1089
1090 /* Each pass through the following loop does one boost-test cycle. */
1091 do {
1092 bool failed = false; // Test failed already in this test interval
1093 bool gp_initiated = false;
1094
1095 if (kthread_should_stop())
1096 goto checkwait;
1097
1098 /* Wait for the next test interval. */
1099 oldstarttime = READ_ONCE(boost_starttime);
1100 while (time_before(jiffies, oldstarttime)) {
1101 schedule_timeout_interruptible(oldstarttime - jiffies);
1102 if (stutter_wait("rcu_torture_boost"))
1103 sched_set_fifo_low(current);
1104 if (torture_must_stop())
1105 goto checkwait;
1106 }
1107
1108 // Do one boost-test interval.
1109 endtime = oldstarttime + test_boost_duration * HZ;
1110 while (time_before(jiffies, endtime)) {
1111 // Has current GP gone too long?
1112 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1113 failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1114 // If we don't have a grace period in flight, start one.
1115 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1116 gp_state = cur_ops->start_gp_poll();
1117 gp_initiated = true;
1118 gp_state_time = jiffies;
1119 }
1120 if (stutter_wait("rcu_torture_boost")) {
1121 sched_set_fifo_low(current);
1122 // If the grace period already ended,
1123 // we don't know when that happened, so
1124 // start over.
1125 if (cur_ops->poll_gp_state(gp_state))
1126 gp_initiated = false;
1127 }
1128 if (torture_must_stop())
1129 goto checkwait;
1130 }
1131
1132 // In case the grace period extended beyond the end of the loop.
1133 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1134 rcu_torture_boost_failed(gp_state, &gp_state_time);
1135
1136 /*
1137 * Set the start time of the next test interval.
1138 * Yes, this is vulnerable to long delays, but such
1139 * delays simply cause a false negative for the next
1140 * interval. Besides, we are running at RT priority,
1141 * so delays should be relatively rare.
1142 */
1143 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
1144 if (mutex_trylock(&boost_mutex)) {
1145 if (oldstarttime == boost_starttime) {
1146 WRITE_ONCE(boost_starttime,
1147 jiffies + test_boost_interval * HZ);
1148 n_rcu_torture_boosts++;
1149 }
1150 mutex_unlock(&boost_mutex);
1151 break;
1152 }
1153 schedule_timeout_uninterruptible(HZ / 20);
1154 }
1155
1156 /* Go do the stutter. */
1157checkwait: if (stutter_wait("rcu_torture_boost"))
1158 sched_set_fifo_low(current);
1159 } while (!torture_must_stop());
1160
1161 /* Clean up and exit. */
1162 while (!kthread_should_stop()) {
1163 torture_shutdown_absorb("rcu_torture_boost");
1164 schedule_timeout_uninterruptible(HZ / 20);
1165 }
1166 torture_kthread_stopping("rcu_torture_boost");
1167 return 0;
1168}
1169
1170/*
1171 * RCU torture force-quiescent-state kthread. Repeatedly induces
1172 * bursts of calls to force_quiescent_state(), increasing the probability
1173 * of occurrence of some important types of race conditions.
1174 */
1175static int
1176rcu_torture_fqs(void *arg)
1177{
1178 unsigned long fqs_resume_time;
1179 int fqs_burst_remaining;
1180 int oldnice = task_nice(current);
1181
1182 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1183 do {
1184 fqs_resume_time = jiffies + fqs_stutter * HZ;
1185 while (time_before(jiffies, fqs_resume_time) &&
1186 !kthread_should_stop()) {
1187 schedule_timeout_interruptible(HZ / 20);
1188 }
1189 fqs_burst_remaining = fqs_duration;
1190 while (fqs_burst_remaining > 0 &&
1191 !kthread_should_stop()) {
1192 cur_ops->fqs();
1193 udelay(fqs_holdoff);
1194 fqs_burst_remaining -= fqs_holdoff;
1195 }
1196 if (stutter_wait("rcu_torture_fqs"))
1197 sched_set_normal(current, oldnice);
1198 } while (!torture_must_stop());
1199 torture_kthread_stopping("rcu_torture_fqs");
1200 return 0;
1201}
1202
1203// Used by writers to randomly choose from the available grace-period primitives.
1204static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
1205static int nsynctypes;
1206
1207/*
1208 * Determine which grace-period primitives are available.
1209 */
1210static void rcu_torture_write_types(void)
1211{
1212 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
1213 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
1214 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
1215 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
1216
1217 /* Initialize synctype[] array. If none set, take default. */
1218 if (!gp_cond1 &&
1219 !gp_cond_exp1 &&
1220 !gp_cond_full1 &&
1221 !gp_cond_exp_full1 &&
1222 !gp_exp1 &&
1223 !gp_poll_exp1 &&
1224 !gp_poll_exp_full1 &&
1225 !gp_normal1 &&
1226 !gp_poll1 &&
1227 !gp_poll_full1 &&
1228 !gp_sync1) {
1229 gp_cond1 = true;
1230 gp_cond_exp1 = true;
1231 gp_cond_full1 = true;
1232 gp_cond_exp_full1 = true;
1233 gp_exp1 = true;
1234 gp_poll_exp1 = true;
1235 gp_poll_exp_full1 = true;
1236 gp_normal1 = true;
1237 gp_poll1 = true;
1238 gp_poll_full1 = true;
1239 gp_sync1 = true;
1240 }
1241 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1242 synctype[nsynctypes++] = RTWS_COND_GET;
1243 pr_info("%s: Testing conditional GPs.\n", __func__);
1244 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1245 pr_alert("%s: gp_cond without primitives.\n", __func__);
1246 }
1247 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1248 synctype[nsynctypes++] = RTWS_COND_GET_EXP;
1249 pr_info("%s: Testing conditional expedited GPs.\n", __func__);
1250 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1251 pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
1252 }
1253 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
1254 synctype[nsynctypes++] = RTWS_COND_GET_FULL;
1255 pr_info("%s: Testing conditional full-state GPs.\n", __func__);
1256 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
1257 pr_alert("%s: gp_cond_full without primitives.\n", __func__);
1258 }
1259 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
1260 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
1261 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
1262 } else if (gp_cond_exp_full &&
1263 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
1264 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
1265 }
1266 if (gp_exp1 && cur_ops->exp_sync) {
1267 synctype[nsynctypes++] = RTWS_EXP_SYNC;
1268 pr_info("%s: Testing expedited GPs.\n", __func__);
1269 } else if (gp_exp && !cur_ops->exp_sync) {
1270 pr_alert("%s: gp_exp without primitives.\n", __func__);
1271 }
1272 if (gp_normal1 && cur_ops->deferred_free) {
1273 synctype[nsynctypes++] = RTWS_DEF_FREE;
1274 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1275 } else if (gp_normal && !cur_ops->deferred_free) {
1276 pr_alert("%s: gp_normal without primitives.\n", __func__);
1277 }
1278 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
1279 cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1280 synctype[nsynctypes++] = RTWS_POLL_GET;
1281 pr_info("%s: Testing polling GPs.\n", __func__);
1282 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1283 pr_alert("%s: gp_poll without primitives.\n", __func__);
1284 }
1285 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
1286 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
1287 synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
1288 pr_info("%s: Testing polling full-state GPs.\n", __func__);
1289 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
1290 pr_alert("%s: gp_poll_full without primitives.\n", __func__);
1291 }
1292 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1293 synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
1294 pr_info("%s: Testing polling expedited GPs.\n", __func__);
1295 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1296 pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
1297 }
1298 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
1299 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
1300 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
1301 } else if (gp_poll_exp_full &&
1302 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
1303 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
1304 }
1305 if (gp_sync1 && cur_ops->sync) {
1306 synctype[nsynctypes++] = RTWS_SYNC;
1307 pr_info("%s: Testing normal GPs.\n", __func__);
1308 } else if (gp_sync && !cur_ops->sync) {
1309 pr_alert("%s: gp_sync without primitives.\n", __func__);
1310 }
1311}
1312
1313/*
1314 * Do the specified rcu_torture_writer() synchronous grace period,
1315 * while also testing out the polled APIs. Note well that the single-CPU
1316 * grace-period optimizations must be accounted for.
1317 */
1318static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
1319{
1320 unsigned long cookie;
1321 struct rcu_gp_oldstate cookie_full;
1322 bool dopoll;
1323 bool dopoll_full;
1324 unsigned long r = torture_random(trsp);
1325
1326 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
1327 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
1328 if (dopoll || dopoll_full)
1329 cpus_read_lock();
1330 if (dopoll)
1331 cookie = cur_ops->get_gp_state();
1332 if (dopoll_full)
1333 cur_ops->get_gp_state_full(&cookie_full);
1334 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
1335 sync();
1336 sync();
1337 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
1338 "%s: Cookie check 3 failed %pS() online %*pbl.",
1339 __func__, sync, cpumask_pr_args(cpu_online_mask));
1340 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
1341 "%s: Cookie check 4 failed %pS() online %*pbl",
1342 __func__, sync, cpumask_pr_args(cpu_online_mask));
1343 if (dopoll || dopoll_full)
1344 cpus_read_unlock();
1345}
1346
1347/*
1348 * RCU torture writer kthread. Repeatedly substitutes a new structure
1349 * for that pointed to by rcu_torture_current, freeing the old structure
1350 * after a series of grace periods (the "pipeline").
1351 */
1352static int
1353rcu_torture_writer(void *arg)
1354{
1355 bool boot_ended;
1356 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1357 unsigned long cookie;
1358 struct rcu_gp_oldstate cookie_full;
1359 int expediting = 0;
1360 unsigned long gp_snap;
1361 unsigned long gp_snap1;
1362 struct rcu_gp_oldstate gp_snap_full;
1363 struct rcu_gp_oldstate gp_snap1_full;
1364 int i;
1365 int idx;
1366 int oldnice = task_nice(current);
1367 struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE];
1368 struct rcu_torture *rp;
1369 struct rcu_torture *old_rp;
1370 static DEFINE_TORTURE_RANDOM(rand);
1371 bool stutter_waited;
1372 unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE];
1373
1374 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1375 if (!can_expedite)
1376 pr_alert("%s" TORTURE_FLAG
1377 " GP expediting controlled from boot/sysfs for %s.\n",
1378 torture_type, cur_ops->name);
1379 if (WARN_ONCE(nsynctypes == 0,
1380 "%s: No update-side primitives.\n", __func__)) {
1381 /*
1382 * No updates primitives, so don't try updating.
1383 * The resulting test won't be testing much, hence the
1384 * above WARN_ONCE().
1385 */
1386 rcu_torture_writer_state = RTWS_STOPPING;
1387 torture_kthread_stopping("rcu_torture_writer");
1388 return 0;
1389 }
1390
1391 do {
1392 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1393 torture_hrtimeout_us(500, 1000, &rand);
1394 rp = rcu_torture_alloc();
1395 if (rp == NULL)
1396 continue;
1397 rp->rtort_pipe_count = 0;
1398 rcu_torture_writer_state = RTWS_DELAY;
1399 udelay(torture_random(&rand) & 0x3ff);
1400 rcu_torture_writer_state = RTWS_REPLACE;
1401 old_rp = rcu_dereference_check(rcu_torture_current,
1402 current == writer_task);
1403 rp->rtort_mbtest = 1;
1404 rcu_assign_pointer(rcu_torture_current, rp);
1405 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1406 if (old_rp) {
1407 i = old_rp->rtort_pipe_count;
1408 if (i > RCU_TORTURE_PIPE_LEN)
1409 i = RCU_TORTURE_PIPE_LEN;
1410 atomic_inc(&rcu_torture_wcount[i]);
1411 WRITE_ONCE(old_rp->rtort_pipe_count,
1412 old_rp->rtort_pipe_count + 1);
1413
1414 // Make sure readers block polled grace periods.
1415 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1416 idx = cur_ops->readlock();
1417 cookie = cur_ops->get_gp_state();
1418 WARN_ONCE(cur_ops->poll_gp_state(cookie),
1419 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1420 __func__,
1421 rcu_torture_writer_state_getname(),
1422 rcu_torture_writer_state,
1423 cookie, cur_ops->get_gp_state());
1424 if (cur_ops->get_gp_completed) {
1425 cookie = cur_ops->get_gp_completed();
1426 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1427 }
1428 cur_ops->readunlock(idx);
1429 }
1430 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
1431 idx = cur_ops->readlock();
1432 cur_ops->get_gp_state_full(&cookie_full);
1433 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
1434 "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
1435 __func__,
1436 rcu_torture_writer_state_getname(),
1437 rcu_torture_writer_state,
1438 cpumask_pr_args(cpu_online_mask));
1439 if (cur_ops->get_gp_completed_full) {
1440 cur_ops->get_gp_completed_full(&cookie_full);
1441 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
1442 }
1443 cur_ops->readunlock(idx);
1444 }
1445 switch (synctype[torture_random(&rand) % nsynctypes]) {
1446 case RTWS_DEF_FREE:
1447 rcu_torture_writer_state = RTWS_DEF_FREE;
1448 cur_ops->deferred_free(old_rp);
1449 break;
1450 case RTWS_EXP_SYNC:
1451 rcu_torture_writer_state = RTWS_EXP_SYNC;
1452 do_rtws_sync(&rand, cur_ops->exp_sync);
1453 rcu_torture_pipe_update(old_rp);
1454 break;
1455 case RTWS_COND_GET:
1456 rcu_torture_writer_state = RTWS_COND_GET;
1457 gp_snap = cur_ops->get_gp_state();
1458 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1459 rcu_torture_writer_state = RTWS_COND_SYNC;
1460 cur_ops->cond_sync(gp_snap);
1461 rcu_torture_pipe_update(old_rp);
1462 break;
1463 case RTWS_COND_GET_EXP:
1464 rcu_torture_writer_state = RTWS_COND_GET_EXP;
1465 gp_snap = cur_ops->get_gp_state_exp();
1466 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1467 rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
1468 cur_ops->cond_sync_exp(gp_snap);
1469 rcu_torture_pipe_update(old_rp);
1470 break;
1471 case RTWS_COND_GET_FULL:
1472 rcu_torture_writer_state = RTWS_COND_GET_FULL;
1473 cur_ops->get_gp_state_full(&gp_snap_full);
1474 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1475 rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
1476 cur_ops->cond_sync_full(&gp_snap_full);
1477 rcu_torture_pipe_update(old_rp);
1478 break;
1479 case RTWS_COND_GET_EXP_FULL:
1480 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
1481 cur_ops->get_gp_state_full(&gp_snap_full);
1482 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1483 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
1484 cur_ops->cond_sync_exp_full(&gp_snap_full);
1485 rcu_torture_pipe_update(old_rp);
1486 break;
1487 case RTWS_POLL_GET:
1488 rcu_torture_writer_state = RTWS_POLL_GET;
1489 for (i = 0; i < ARRAY_SIZE(ulo); i++)
1490 ulo[i] = cur_ops->get_comp_state();
1491 gp_snap = cur_ops->start_gp_poll();
1492 rcu_torture_writer_state = RTWS_POLL_WAIT;
1493 while (!cur_ops->poll_gp_state(gp_snap)) {
1494 gp_snap1 = cur_ops->get_gp_state();
1495 for (i = 0; i < ARRAY_SIZE(ulo); i++)
1496 if (cur_ops->poll_gp_state(ulo[i]) ||
1497 cur_ops->same_gp_state(ulo[i], gp_snap1)) {
1498 ulo[i] = gp_snap1;
1499 break;
1500 }
1501 WARN_ON_ONCE(i >= ARRAY_SIZE(ulo));
1502 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1503 &rand);
1504 }
1505 rcu_torture_pipe_update(old_rp);
1506 break;
1507 case RTWS_POLL_GET_FULL:
1508 rcu_torture_writer_state = RTWS_POLL_GET_FULL;
1509 for (i = 0; i < ARRAY_SIZE(rgo); i++)
1510 cur_ops->get_comp_state_full(&rgo[i]);
1511 cur_ops->start_gp_poll_full(&gp_snap_full);
1512 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
1513 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1514 cur_ops->get_gp_state_full(&gp_snap1_full);
1515 for (i = 0; i < ARRAY_SIZE(rgo); i++)
1516 if (cur_ops->poll_gp_state_full(&rgo[i]) ||
1517 cur_ops->same_gp_state_full(&rgo[i],
1518 &gp_snap1_full)) {
1519 rgo[i] = gp_snap1_full;
1520 break;
1521 }
1522 WARN_ON_ONCE(i >= ARRAY_SIZE(rgo));
1523 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1524 &rand);
1525 }
1526 rcu_torture_pipe_update(old_rp);
1527 break;
1528 case RTWS_POLL_GET_EXP:
1529 rcu_torture_writer_state = RTWS_POLL_GET_EXP;
1530 gp_snap = cur_ops->start_gp_poll_exp();
1531 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
1532 while (!cur_ops->poll_gp_state_exp(gp_snap))
1533 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1534 &rand);
1535 rcu_torture_pipe_update(old_rp);
1536 break;
1537 case RTWS_POLL_GET_EXP_FULL:
1538 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
1539 cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1540 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
1541 while (!cur_ops->poll_gp_state_full(&gp_snap_full))
1542 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1543 &rand);
1544 rcu_torture_pipe_update(old_rp);
1545 break;
1546 case RTWS_SYNC:
1547 rcu_torture_writer_state = RTWS_SYNC;
1548 do_rtws_sync(&rand, cur_ops->sync);
1549 rcu_torture_pipe_update(old_rp);
1550 break;
1551 default:
1552 WARN_ON_ONCE(1);
1553 break;
1554 }
1555 }
1556 WRITE_ONCE(rcu_torture_current_version,
1557 rcu_torture_current_version + 1);
1558 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1559 if (can_expedite &&
1560 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1561 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1562 if (expediting >= 0)
1563 rcu_expedite_gp();
1564 else
1565 rcu_unexpedite_gp();
1566 if (++expediting > 3)
1567 expediting = -expediting;
1568 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1569 can_expedite = !rcu_gp_is_expedited() &&
1570 !rcu_gp_is_normal();
1571 }
1572 rcu_torture_writer_state = RTWS_STUTTER;
1573 boot_ended = rcu_inkernel_boot_has_ended();
1574 stutter_waited = stutter_wait("rcu_torture_writer");
1575 if (stutter_waited &&
1576 !atomic_read(&rcu_fwd_cb_nodelay) &&
1577 !cur_ops->slow_gps &&
1578 !torture_must_stop() &&
1579 boot_ended)
1580 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1581 if (list_empty(&rcu_tortures[i].rtort_free) &&
1582 rcu_access_pointer(rcu_torture_current) !=
1583 &rcu_tortures[i]) {
1584 tracing_off();
1585 show_rcu_gp_kthreads();
1586 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1587 rcu_ftrace_dump(DUMP_ALL);
1588 }
1589 if (stutter_waited)
1590 sched_set_normal(current, oldnice);
1591 } while (!torture_must_stop());
1592 rcu_torture_current = NULL; // Let stats task know that we are done.
1593 /* Reset expediting back to unexpedited. */
1594 if (expediting > 0)
1595 expediting = -expediting;
1596 while (can_expedite && expediting++ < 0)
1597 rcu_unexpedite_gp();
1598 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1599 if (!can_expedite)
1600 pr_alert("%s" TORTURE_FLAG
1601 " Dynamic grace-period expediting was disabled.\n",
1602 torture_type);
1603 rcu_torture_writer_state = RTWS_STOPPING;
1604 torture_kthread_stopping("rcu_torture_writer");
1605 return 0;
1606}
1607
1608/*
1609 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1610 * delay between calls.
1611 */
1612static int
1613rcu_torture_fakewriter(void *arg)
1614{
1615 unsigned long gp_snap;
1616 struct rcu_gp_oldstate gp_snap_full;
1617 DEFINE_TORTURE_RANDOM(rand);
1618
1619 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1620 set_user_nice(current, MAX_NICE);
1621
1622 if (WARN_ONCE(nsynctypes == 0,
1623 "%s: No update-side primitives.\n", __func__)) {
1624 /*
1625 * No updates primitives, so don't try updating.
1626 * The resulting test won't be testing much, hence the
1627 * above WARN_ONCE().
1628 */
1629 torture_kthread_stopping("rcu_torture_fakewriter");
1630 return 0;
1631 }
1632
1633 do {
1634 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1635 if (cur_ops->cb_barrier != NULL &&
1636 torture_random(&rand) % (nfakewriters * 8) == 0) {
1637 cur_ops->cb_barrier();
1638 } else {
1639 switch (synctype[torture_random(&rand) % nsynctypes]) {
1640 case RTWS_DEF_FREE:
1641 break;
1642 case RTWS_EXP_SYNC:
1643 cur_ops->exp_sync();
1644 break;
1645 case RTWS_COND_GET:
1646 gp_snap = cur_ops->get_gp_state();
1647 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1648 cur_ops->cond_sync(gp_snap);
1649 break;
1650 case RTWS_COND_GET_EXP:
1651 gp_snap = cur_ops->get_gp_state_exp();
1652 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1653 cur_ops->cond_sync_exp(gp_snap);
1654 break;
1655 case RTWS_COND_GET_FULL:
1656 cur_ops->get_gp_state_full(&gp_snap_full);
1657 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1658 cur_ops->cond_sync_full(&gp_snap_full);
1659 break;
1660 case RTWS_COND_GET_EXP_FULL:
1661 cur_ops->get_gp_state_full(&gp_snap_full);
1662 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1663 cur_ops->cond_sync_exp_full(&gp_snap_full);
1664 break;
1665 case RTWS_POLL_GET:
1666 gp_snap = cur_ops->start_gp_poll();
1667 while (!cur_ops->poll_gp_state(gp_snap)) {
1668 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1669 &rand);
1670 }
1671 break;
1672 case RTWS_POLL_GET_FULL:
1673 cur_ops->start_gp_poll_full(&gp_snap_full);
1674 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1675 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1676 &rand);
1677 }
1678 break;
1679 case RTWS_POLL_GET_EXP:
1680 gp_snap = cur_ops->start_gp_poll_exp();
1681 while (!cur_ops->poll_gp_state_exp(gp_snap)) {
1682 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1683 &rand);
1684 }
1685 break;
1686 case RTWS_POLL_GET_EXP_FULL:
1687 cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1688 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1689 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1690 &rand);
1691 }
1692 break;
1693 case RTWS_SYNC:
1694 cur_ops->sync();
1695 break;
1696 default:
1697 WARN_ON_ONCE(1);
1698 break;
1699 }
1700 }
1701 stutter_wait("rcu_torture_fakewriter");
1702 } while (!torture_must_stop());
1703
1704 torture_kthread_stopping("rcu_torture_fakewriter");
1705 return 0;
1706}
1707
1708static void rcu_torture_timer_cb(struct rcu_head *rhp)
1709{
1710 kfree(rhp);
1711}
1712
1713// Set up and carry out testing of RCU's global memory ordering
1714static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1715 struct torture_random_state *trsp)
1716{
1717 unsigned long loops;
1718 int noc = torture_num_online_cpus();
1719 int rdrchked;
1720 int rdrchker;
1721 struct rcu_torture_reader_check *rtrcp; // Me.
1722 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1723 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1724 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1725
1726 if (myid < 0)
1727 return; // Don't try this from timer handlers.
1728
1729 // Increment my counter.
1730 rtrcp = &rcu_torture_reader_mbchk[myid];
1731 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1732
1733 // Attempt to assign someone else some checking work.
1734 rdrchked = torture_random(trsp) % nrealreaders;
1735 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1736 rdrchker = torture_random(trsp) % nrealreaders;
1737 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1738 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1739 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1740 !READ_ONCE(rtp->rtort_chkp) &&
1741 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1742 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1743 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1744 rtrcp->rtc_chkrdr = rdrchked;
1745 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1746 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1747 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1748 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1749 }
1750
1751 // If assigned some completed work, do it!
1752 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1753 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1754 return; // No work or work not yet ready.
1755 rdrchked = rtrcp_assigner->rtc_chkrdr;
1756 if (WARN_ON_ONCE(rdrchked < 0))
1757 return;
1758 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1759 loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1760 atomic_inc(&n_rcu_torture_mbchk_tries);
1761 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1762 atomic_inc(&n_rcu_torture_mbchk_fail);
1763 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1764 rtrcp_assigner->rtc_ready = 0;
1765 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1766 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1767}
1768
1769/*
1770 * Do one extension of an RCU read-side critical section using the
1771 * current reader state in readstate (set to zero for initial entry
1772 * to extended critical section), set the new state as specified by
1773 * newstate (set to zero for final exit from extended critical section),
1774 * and random-number-generator state in trsp. If this is neither the
1775 * beginning or end of the critical section and if there was actually a
1776 * change, do a ->read_delay().
1777 */
1778static void rcutorture_one_extend(int *readstate, int newstate,
1779 struct torture_random_state *trsp,
1780 struct rt_read_seg *rtrsp)
1781{
1782 unsigned long flags;
1783 int idxnew1 = -1;
1784 int idxnew2 = -1;
1785 int idxold1 = *readstate;
1786 int idxold2 = idxold1;
1787 int statesnew = ~*readstate & newstate;
1788 int statesold = *readstate & ~newstate;
1789
1790 WARN_ON_ONCE(idxold2 < 0);
1791 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1792 rtrsp->rt_readstate = newstate;
1793
1794 /* First, put new protection in place to avoid critical-section gap. */
1795 if (statesnew & RCUTORTURE_RDR_BH)
1796 local_bh_disable();
1797 if (statesnew & RCUTORTURE_RDR_RBH)
1798 rcu_read_lock_bh();
1799 if (statesnew & RCUTORTURE_RDR_IRQ)
1800 local_irq_disable();
1801 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1802 preempt_disable();
1803 if (statesnew & RCUTORTURE_RDR_SCHED)
1804 rcu_read_lock_sched();
1805 if (statesnew & RCUTORTURE_RDR_RCU_1)
1806 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
1807 if (statesnew & RCUTORTURE_RDR_RCU_2)
1808 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
1809
1810 /*
1811 * Next, remove old protection, in decreasing order of strength
1812 * to avoid unlock paths that aren't safe in the stronger
1813 * context. Namely: BH can not be enabled with disabled interrupts.
1814 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1815 * context.
1816 */
1817 if (statesold & RCUTORTURE_RDR_IRQ)
1818 local_irq_enable();
1819 if (statesold & RCUTORTURE_RDR_PREEMPT)
1820 preempt_enable();
1821 if (statesold & RCUTORTURE_RDR_SCHED)
1822 rcu_read_unlock_sched();
1823 if (statesold & RCUTORTURE_RDR_BH)
1824 local_bh_enable();
1825 if (statesold & RCUTORTURE_RDR_RBH)
1826 rcu_read_unlock_bh();
1827 if (statesold & RCUTORTURE_RDR_RCU_2) {
1828 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
1829 WARN_ON_ONCE(idxnew2 != -1);
1830 idxold2 = 0;
1831 }
1832 if (statesold & RCUTORTURE_RDR_RCU_1) {
1833 bool lockit;
1834
1835 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
1836 if (lockit)
1837 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1838 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
1839 WARN_ON_ONCE(idxnew1 != -1);
1840 idxold1 = 0;
1841 if (lockit)
1842 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1843 }
1844
1845 /* Delay if neither beginning nor end and there was a change. */
1846 if ((statesnew || statesold) && *readstate && newstate)
1847 cur_ops->read_delay(trsp, rtrsp);
1848
1849 /* Update the reader state. */
1850 if (idxnew1 == -1)
1851 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1852 WARN_ON_ONCE(idxnew1 < 0);
1853 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
1854 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
1855 if (idxnew2 == -1)
1856 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1857 WARN_ON_ONCE(idxnew2 < 0);
1858 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1859 *readstate = idxnew1 | idxnew2 | newstate;
1860 WARN_ON_ONCE(*readstate < 0);
1861 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
1862 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
1863}
1864
1865/* Return the biggest extendables mask given current RCU and boot parameters. */
1866static int rcutorture_extend_mask_max(void)
1867{
1868 int mask;
1869
1870 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1871 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1872 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1873 return mask;
1874}
1875
1876/* Return a random protection state mask, but with at least one bit set. */
1877static int
1878rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1879{
1880 int mask = rcutorture_extend_mask_max();
1881 unsigned long randmask1 = torture_random(trsp);
1882 unsigned long randmask2 = randmask1 >> 3;
1883 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1884 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1885 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1886
1887 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
1888 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1889 if (!(randmask1 & 0x7))
1890 mask = mask & randmask2;
1891 else
1892 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1893
1894 // Can't have nested RCU reader without outer RCU reader.
1895 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1896 if (oldmask & RCUTORTURE_RDR_RCU_1)
1897 mask &= ~RCUTORTURE_RDR_RCU_2;
1898 else
1899 mask |= RCUTORTURE_RDR_RCU_1;
1900 }
1901
1902 /*
1903 * Can't enable bh w/irq disabled.
1904 */
1905 if (mask & RCUTORTURE_RDR_IRQ)
1906 mask |= oldmask & bhs;
1907
1908 /*
1909 * Ideally these sequences would be detected in debug builds
1910 * (regardless of RT), but until then don't stop testing
1911 * them on non-RT.
1912 */
1913 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1914 /* Can't modify BH in atomic context */
1915 if (oldmask & preempts_irq)
1916 mask &= ~bhs;
1917 if ((oldmask | mask) & preempts_irq)
1918 mask |= oldmask & bhs;
1919 }
1920
1921 return mask ?: RCUTORTURE_RDR_RCU_1;
1922}
1923
1924/*
1925 * Do a randomly selected number of extensions of an existing RCU read-side
1926 * critical section.
1927 */
1928static struct rt_read_seg *
1929rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1930 struct rt_read_seg *rtrsp)
1931{
1932 int i;
1933 int j;
1934 int mask = rcutorture_extend_mask_max();
1935
1936 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1937 if (!((mask - 1) & mask))
1938 return rtrsp; /* Current RCU reader not extendable. */
1939 /* Bias towards larger numbers of loops. */
1940 i = torture_random(trsp);
1941 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1942 for (j = 0; j < i; j++) {
1943 mask = rcutorture_extend_mask(*readstate, trsp);
1944 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1945 }
1946 return &rtrsp[j];
1947}
1948
1949/*
1950 * Do one read-side critical section, returning false if there was
1951 * no data to read. Can be invoked both from process context and
1952 * from a timer handler.
1953 */
1954static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1955{
1956 bool checkpolling = !(torture_random(trsp) & 0xfff);
1957 unsigned long cookie;
1958 struct rcu_gp_oldstate cookie_full;
1959 int i;
1960 unsigned long started;
1961 unsigned long completed;
1962 int newstate;
1963 struct rcu_torture *p;
1964 int pipe_count;
1965 int readstate = 0;
1966 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1967 struct rt_read_seg *rtrsp = &rtseg[0];
1968 struct rt_read_seg *rtrsp1;
1969 unsigned long long ts;
1970
1971 WARN_ON_ONCE(!rcu_is_watching());
1972 newstate = rcutorture_extend_mask(readstate, trsp);
1973 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1974 if (checkpolling) {
1975 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1976 cookie = cur_ops->get_gp_state();
1977 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
1978 cur_ops->get_gp_state_full(&cookie_full);
1979 }
1980 started = cur_ops->get_gp_seq();
1981 ts = rcu_trace_clock_local();
1982 p = rcu_dereference_check(rcu_torture_current,
1983 !cur_ops->readlock_held || cur_ops->readlock_held());
1984 if (p == NULL) {
1985 /* Wait for rcu_torture_writer to get underway */
1986 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1987 return false;
1988 }
1989 if (p->rtort_mbtest == 0)
1990 atomic_inc(&n_rcu_torture_mberror);
1991 rcu_torture_reader_do_mbchk(myid, p, trsp);
1992 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1993 preempt_disable();
1994 pipe_count = READ_ONCE(p->rtort_pipe_count);
1995 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1996 /* Should not happen, but... */
1997 pipe_count = RCU_TORTURE_PIPE_LEN;
1998 }
1999 completed = cur_ops->get_gp_seq();
2000 if (pipe_count > 1) {
2001 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
2002 ts, started, completed);
2003 rcu_ftrace_dump(DUMP_ALL);
2004 }
2005 __this_cpu_inc(rcu_torture_count[pipe_count]);
2006 completed = rcutorture_seq_diff(completed, started);
2007 if (completed > RCU_TORTURE_PIPE_LEN) {
2008 /* Should not happen, but... */
2009 completed = RCU_TORTURE_PIPE_LEN;
2010 }
2011 __this_cpu_inc(rcu_torture_batch[completed]);
2012 preempt_enable();
2013 if (checkpolling) {
2014 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2015 WARN_ONCE(cur_ops->poll_gp_state(cookie),
2016 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
2017 __func__,
2018 rcu_torture_writer_state_getname(),
2019 rcu_torture_writer_state,
2020 cookie, cur_ops->get_gp_state());
2021 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2022 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
2023 "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
2024 __func__,
2025 rcu_torture_writer_state_getname(),
2026 rcu_torture_writer_state,
2027 cpumask_pr_args(cpu_online_mask));
2028 }
2029 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
2030 WARN_ON_ONCE(readstate);
2031 // This next splat is expected behavior if leakpointer, especially
2032 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
2033 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
2034
2035 /* If error or close call, record the sequence of reader protections. */
2036 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
2037 i = 0;
2038 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
2039 err_segs[i++] = *rtrsp1;
2040 rt_read_nsegs = i;
2041 }
2042
2043 return true;
2044}
2045
2046static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
2047
2048/*
2049 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
2050 * incrementing the corresponding element of the pipeline array. The
2051 * counter in the element should never be greater than 1, otherwise, the
2052 * RCU implementation is broken.
2053 */
2054static void rcu_torture_timer(struct timer_list *unused)
2055{
2056 atomic_long_inc(&n_rcu_torture_timers);
2057 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
2058
2059 /* Test call_rcu() invocation from interrupt handler. */
2060 if (cur_ops->call) {
2061 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
2062
2063 if (rhp)
2064 cur_ops->call(rhp, rcu_torture_timer_cb);
2065 }
2066}
2067
2068/*
2069 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
2070 * incrementing the corresponding element of the pipeline array. The
2071 * counter in the element should never be greater than 1, otherwise, the
2072 * RCU implementation is broken.
2073 */
2074static int
2075rcu_torture_reader(void *arg)
2076{
2077 unsigned long lastsleep = jiffies;
2078 long myid = (long)arg;
2079 int mynumonline = myid;
2080 DEFINE_TORTURE_RANDOM(rand);
2081 struct timer_list t;
2082
2083 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
2084 set_user_nice(current, MAX_NICE);
2085 if (irqreader && cur_ops->irq_capable)
2086 timer_setup_on_stack(&t, rcu_torture_timer, 0);
2087 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2088 do {
2089 if (irqreader && cur_ops->irq_capable) {
2090 if (!timer_pending(&t))
2091 mod_timer(&t, jiffies + 1);
2092 }
2093 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
2094 schedule_timeout_interruptible(HZ);
2095 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
2096 torture_hrtimeout_us(500, 1000, &rand);
2097 lastsleep = jiffies + 10;
2098 }
2099 while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
2100 schedule_timeout_interruptible(HZ / 5);
2101 stutter_wait("rcu_torture_reader");
2102 } while (!torture_must_stop());
2103 if (irqreader && cur_ops->irq_capable) {
2104 del_timer_sync(&t);
2105 destroy_timer_on_stack(&t);
2106 }
2107 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2108 torture_kthread_stopping("rcu_torture_reader");
2109 return 0;
2110}
2111
2112/*
2113 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
2114 * increase race probabilities and fuzzes the interval between toggling.
2115 */
2116static int rcu_nocb_toggle(void *arg)
2117{
2118 int cpu;
2119 int maxcpu = -1;
2120 int oldnice = task_nice(current);
2121 long r;
2122 DEFINE_TORTURE_RANDOM(rand);
2123 ktime_t toggle_delay;
2124 unsigned long toggle_fuzz;
2125 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
2126
2127 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
2128 while (!rcu_inkernel_boot_has_ended())
2129 schedule_timeout_interruptible(HZ / 10);
2130 for_each_possible_cpu(cpu)
2131 maxcpu = cpu;
2132 WARN_ON(maxcpu < 0);
2133 if (toggle_interval > ULONG_MAX)
2134 toggle_fuzz = ULONG_MAX >> 3;
2135 else
2136 toggle_fuzz = toggle_interval >> 3;
2137 if (toggle_fuzz <= 0)
2138 toggle_fuzz = NSEC_PER_USEC;
2139 do {
2140 r = torture_random(&rand);
2141 cpu = (r >> 1) % (maxcpu + 1);
2142 if (r & 0x1) {
2143 rcu_nocb_cpu_offload(cpu);
2144 atomic_long_inc(&n_nocb_offload);
2145 } else {
2146 rcu_nocb_cpu_deoffload(cpu);
2147 atomic_long_inc(&n_nocb_deoffload);
2148 }
2149 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
2150 set_current_state(TASK_INTERRUPTIBLE);
2151 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
2152 if (stutter_wait("rcu_nocb_toggle"))
2153 sched_set_normal(current, oldnice);
2154 } while (!torture_must_stop());
2155 torture_kthread_stopping("rcu_nocb_toggle");
2156 return 0;
2157}
2158
2159/*
2160 * Print torture statistics. Caller must ensure that there is only
2161 * one call to this function at a given time!!! This is normally
2162 * accomplished by relying on the module system to only have one copy
2163 * of the module loaded, and then by giving the rcu_torture_stats
2164 * kthread full control (or the init/cleanup functions when rcu_torture_stats
2165 * thread is not running).
2166 */
2167static void
2168rcu_torture_stats_print(void)
2169{
2170 int cpu;
2171 int i;
2172 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2173 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2174 struct rcu_torture *rtcp;
2175 static unsigned long rtcv_snap = ULONG_MAX;
2176 static bool splatted;
2177 struct task_struct *wtp;
2178
2179 for_each_possible_cpu(cpu) {
2180 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2181 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
2182 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
2183 }
2184 }
2185 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
2186 if (pipesummary[i] != 0)
2187 break;
2188 }
2189
2190 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2191 rtcp = rcu_access_pointer(rcu_torture_current);
2192 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
2193 rtcp,
2194 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
2195 rcu_torture_current_version,
2196 list_empty(&rcu_torture_freelist),
2197 atomic_read(&n_rcu_torture_alloc),
2198 atomic_read(&n_rcu_torture_alloc_fail),
2199 atomic_read(&n_rcu_torture_free));
2200 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ",
2201 atomic_read(&n_rcu_torture_mberror),
2202 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
2203 n_rcu_torture_barrier_error,
2204 n_rcu_torture_boost_ktrerror);
2205 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
2206 n_rcu_torture_boost_failure,
2207 n_rcu_torture_boosts,
2208 atomic_long_read(&n_rcu_torture_timers));
2209 torture_onoff_stats();
2210 pr_cont("barrier: %ld/%ld:%ld ",
2211 data_race(n_barrier_successes),
2212 data_race(n_barrier_attempts),
2213 data_race(n_rcu_torture_barrier_error));
2214 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
2215 pr_cont("nocb-toggles: %ld:%ld\n",
2216 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
2217
2218 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2219 if (atomic_read(&n_rcu_torture_mberror) ||
2220 atomic_read(&n_rcu_torture_mbchk_fail) ||
2221 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
2222 n_rcu_torture_boost_failure || i > 1) {
2223 pr_cont("%s", "!!! ");
2224 atomic_inc(&n_rcu_torture_error);
2225 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
2226 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
2227 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
2228 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
2229 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
2230 WARN_ON_ONCE(i > 1); // Too-short grace period
2231 }
2232 pr_cont("Reader Pipe: ");
2233 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2234 pr_cont(" %ld", pipesummary[i]);
2235 pr_cont("\n");
2236
2237 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2238 pr_cont("Reader Batch: ");
2239 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2240 pr_cont(" %ld", batchsummary[i]);
2241 pr_cont("\n");
2242
2243 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2244 pr_cont("Free-Block Circulation: ");
2245 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2246 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
2247 }
2248 pr_cont("\n");
2249
2250 if (cur_ops->stats)
2251 cur_ops->stats();
2252 if (rtcv_snap == rcu_torture_current_version &&
2253 rcu_access_pointer(rcu_torture_current) &&
2254 !rcu_stall_is_suppressed()) {
2255 int __maybe_unused flags = 0;
2256 unsigned long __maybe_unused gp_seq = 0;
2257
2258 rcutorture_get_gp_data(cur_ops->ttype,
2259 &flags, &gp_seq);
2260 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
2261 &flags, &gp_seq);
2262 wtp = READ_ONCE(writer_task);
2263 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2264 rcu_torture_writer_state_getname(),
2265 rcu_torture_writer_state, gp_seq, flags,
2266 wtp == NULL ? ~0U : wtp->__state,
2267 wtp == NULL ? -1 : (int)task_cpu(wtp));
2268 if (!splatted && wtp) {
2269 sched_show_task(wtp);
2270 splatted = true;
2271 }
2272 if (cur_ops->gp_kthread_dbg)
2273 cur_ops->gp_kthread_dbg();
2274 rcu_ftrace_dump(DUMP_ALL);
2275 }
2276 rtcv_snap = rcu_torture_current_version;
2277}
2278
2279/*
2280 * Periodically prints torture statistics, if periodic statistics printing
2281 * was specified via the stat_interval module parameter.
2282 */
2283static int
2284rcu_torture_stats(void *arg)
2285{
2286 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
2287 do {
2288 schedule_timeout_interruptible(stat_interval * HZ);
2289 rcu_torture_stats_print();
2290 torture_shutdown_absorb("rcu_torture_stats");
2291 } while (!torture_must_stop());
2292 torture_kthread_stopping("rcu_torture_stats");
2293 return 0;
2294}
2295
2296/* Test mem_dump_obj() and friends. */
2297static void rcu_torture_mem_dump_obj(void)
2298{
2299 struct rcu_head *rhp;
2300 struct kmem_cache *kcp;
2301 static int z;
2302
2303 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
2304 if (WARN_ON_ONCE(!kcp))
2305 return;
2306 rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
2307 if (WARN_ON_ONCE(!rhp)) {
2308 kmem_cache_destroy(kcp);
2309 return;
2310 }
2311 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
2312 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
2313 mem_dump_obj(ZERO_SIZE_PTR);
2314 pr_alert("mem_dump_obj(NULL):");
2315 mem_dump_obj(NULL);
2316 pr_alert("mem_dump_obj(%px):", &rhp);
2317 mem_dump_obj(&rhp);
2318 pr_alert("mem_dump_obj(%px):", rhp);
2319 mem_dump_obj(rhp);
2320 pr_alert("mem_dump_obj(%px):", &rhp->func);
2321 mem_dump_obj(&rhp->func);
2322 pr_alert("mem_dump_obj(%px):", &z);
2323 mem_dump_obj(&z);
2324 kmem_cache_free(kcp, rhp);
2325 kmem_cache_destroy(kcp);
2326 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2327 if (WARN_ON_ONCE(!rhp))
2328 return;
2329 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2330 pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2331 mem_dump_obj(rhp);
2332 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2333 mem_dump_obj(&rhp->func);
2334 kfree(rhp);
2335 rhp = vmalloc(4096);
2336 if (WARN_ON_ONCE(!rhp))
2337 return;
2338 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2339 pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2340 mem_dump_obj(rhp);
2341 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2342 mem_dump_obj(&rhp->func);
2343 vfree(rhp);
2344}
2345
2346static void
2347rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2348{
2349 pr_alert("%s" TORTURE_FLAG
2350 "--- %s: nreaders=%d nfakewriters=%d "
2351 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2352 "shuffle_interval=%d stutter=%d irqreader=%d "
2353 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2354 "test_boost=%d/%d test_boost_interval=%d "
2355 "test_boost_duration=%d shutdown_secs=%d "
2356 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2357 "stall_cpu_block=%d "
2358 "n_barrier_cbs=%d "
2359 "onoff_interval=%d onoff_holdoff=%d "
2360 "read_exit_delay=%d read_exit_burst=%d "
2361 "nocbs_nthreads=%d nocbs_toggle=%d "
2362 "test_nmis=%d\n",
2363 torture_type, tag, nrealreaders, nfakewriters,
2364 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2365 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2366 test_boost, cur_ops->can_boost,
2367 test_boost_interval, test_boost_duration, shutdown_secs,
2368 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
2369 stall_cpu_block,
2370 n_barrier_cbs,
2371 onoff_interval, onoff_holdoff,
2372 read_exit_delay, read_exit_burst,
2373 nocbs_nthreads, nocbs_toggle,
2374 test_nmis);
2375}
2376
2377static int rcutorture_booster_cleanup(unsigned int cpu)
2378{
2379 struct task_struct *t;
2380
2381 if (boost_tasks[cpu] == NULL)
2382 return 0;
2383 mutex_lock(&boost_mutex);
2384 t = boost_tasks[cpu];
2385 boost_tasks[cpu] = NULL;
2386 rcu_torture_enable_rt_throttle();
2387 mutex_unlock(&boost_mutex);
2388
2389 /* This must be outside of the mutex, otherwise deadlock! */
2390 torture_stop_kthread(rcu_torture_boost, t);
2391 return 0;
2392}
2393
2394static int rcutorture_booster_init(unsigned int cpu)
2395{
2396 int retval;
2397
2398 if (boost_tasks[cpu] != NULL)
2399 return 0; /* Already created, nothing more to do. */
2400
2401 // Testing RCU priority boosting requires rcutorture do
2402 // some serious abuse. Counter this by running ksoftirqd
2403 // at higher priority.
2404 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
2405 struct sched_param sp;
2406 struct task_struct *t;
2407
2408 t = per_cpu(ksoftirqd, cpu);
2409 WARN_ON_ONCE(!t);
2410 sp.sched_priority = 2;
2411 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2412 }
2413
2414 /* Don't allow time recalculation while creating a new task. */
2415 mutex_lock(&boost_mutex);
2416 rcu_torture_disable_rt_throttle();
2417 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2418 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
2419 cpu, "rcu_torture_boost_%u");
2420 if (IS_ERR(boost_tasks[cpu])) {
2421 retval = PTR_ERR(boost_tasks[cpu]);
2422 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2423 n_rcu_torture_boost_ktrerror++;
2424 boost_tasks[cpu] = NULL;
2425 mutex_unlock(&boost_mutex);
2426 return retval;
2427 }
2428 mutex_unlock(&boost_mutex);
2429 return 0;
2430}
2431
2432static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr)
2433{
2434 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr);
2435 return NOTIFY_OK;
2436}
2437
2438static struct notifier_block rcu_torture_stall_block = {
2439 .notifier_call = rcu_torture_stall_nf,
2440};
2441
2442/*
2443 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
2444 * induces a CPU stall for the time specified by stall_cpu.
2445 */
2446static int rcu_torture_stall(void *args)
2447{
2448 int idx;
2449 int ret;
2450 unsigned long stop_at;
2451
2452 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2453 if (rcu_cpu_stall_notifiers) {
2454 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
2455 if (ret)
2456 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
2457 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
2458 }
2459 if (stall_cpu_holdoff > 0) {
2460 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2461 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2462 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2463 }
2464 if (!kthread_should_stop() && stall_gp_kthread > 0) {
2465 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2466 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2467 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2468 if (kthread_should_stop())
2469 break;
2470 schedule_timeout_uninterruptible(HZ);
2471 }
2472 }
2473 if (!kthread_should_stop() && stall_cpu > 0) {
2474 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2475 stop_at = ktime_get_seconds() + stall_cpu;
2476 /* RCU CPU stall is expected behavior in following code. */
2477 idx = cur_ops->readlock();
2478 if (stall_cpu_irqsoff)
2479 local_irq_disable();
2480 else if (!stall_cpu_block)
2481 preempt_disable();
2482 pr_alert("%s start on CPU %d.\n",
2483 __func__, raw_smp_processor_id());
2484 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2485 stop_at))
2486 if (stall_cpu_block) {
2487#ifdef CONFIG_PREEMPTION
2488 preempt_schedule();
2489#else
2490 schedule_timeout_uninterruptible(HZ);
2491#endif
2492 } else if (stall_no_softlockup) {
2493 touch_softlockup_watchdog();
2494 }
2495 if (stall_cpu_irqsoff)
2496 local_irq_enable();
2497 else if (!stall_cpu_block)
2498 preempt_enable();
2499 cur_ops->readunlock(idx);
2500 }
2501 pr_alert("%s end.\n", __func__);
2502 if (rcu_cpu_stall_notifiers && !ret) {
2503 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
2504 if (ret)
2505 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
2506 }
2507 torture_shutdown_absorb("rcu_torture_stall");
2508 while (!kthread_should_stop())
2509 schedule_timeout_interruptible(10 * HZ);
2510 return 0;
2511}
2512
2513/* Spawn CPU-stall kthread, if stall_cpu specified. */
2514static int __init rcu_torture_stall_init(void)
2515{
2516 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2517 return 0;
2518 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2519}
2520
2521/* State structure for forward-progress self-propagating RCU callback. */
2522struct fwd_cb_state {
2523 struct rcu_head rh;
2524 int stop;
2525};
2526
2527/*
2528 * Forward-progress self-propagating RCU callback function. Because
2529 * callbacks run from softirq, this function is an implicit RCU read-side
2530 * critical section.
2531 */
2532static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2533{
2534 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2535
2536 if (READ_ONCE(fcsp->stop)) {
2537 WRITE_ONCE(fcsp->stop, 2);
2538 return;
2539 }
2540 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2541}
2542
2543/* State for continuous-flood RCU callbacks. */
2544struct rcu_fwd_cb {
2545 struct rcu_head rh;
2546 struct rcu_fwd_cb *rfc_next;
2547 struct rcu_fwd *rfc_rfp;
2548 int rfc_gps;
2549};
2550
2551#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
2552#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
2553#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
2554#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
2555#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2556
2557struct rcu_launder_hist {
2558 long n_launders;
2559 unsigned long launder_gp_seq;
2560};
2561
2562struct rcu_fwd {
2563 spinlock_t rcu_fwd_lock;
2564 struct rcu_fwd_cb *rcu_fwd_cb_head;
2565 struct rcu_fwd_cb **rcu_fwd_cb_tail;
2566 long n_launders_cb;
2567 unsigned long rcu_fwd_startat;
2568 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2569 unsigned long rcu_launder_gp_seq_start;
2570 int rcu_fwd_id;
2571};
2572
2573static DEFINE_MUTEX(rcu_fwd_mutex);
2574static struct rcu_fwd *rcu_fwds;
2575static unsigned long rcu_fwd_seq;
2576static atomic_long_t rcu_fwd_max_cbs;
2577static bool rcu_fwd_emergency_stop;
2578
2579static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2580{
2581 unsigned long gps;
2582 unsigned long gps_old;
2583 int i;
2584 int j;
2585
2586 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2587 if (rfp->n_launders_hist[i].n_launders > 0)
2588 break;
2589 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2590 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2591 gps_old = rfp->rcu_launder_gp_seq_start;
2592 for (j = 0; j <= i; j++) {
2593 gps = rfp->n_launders_hist[j].launder_gp_seq;
2594 pr_cont(" %ds/%d: %ld:%ld",
2595 j + 1, FWD_CBS_HIST_DIV,
2596 rfp->n_launders_hist[j].n_launders,
2597 rcutorture_seq_diff(gps, gps_old));
2598 gps_old = gps;
2599 }
2600 pr_cont("\n");
2601}
2602
2603/* Callback function for continuous-flood RCU callbacks. */
2604static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2605{
2606 unsigned long flags;
2607 int i;
2608 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2609 struct rcu_fwd_cb **rfcpp;
2610 struct rcu_fwd *rfp = rfcp->rfc_rfp;
2611
2612 rfcp->rfc_next = NULL;
2613 rfcp->rfc_gps++;
2614 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2615 rfcpp = rfp->rcu_fwd_cb_tail;
2616 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2617 WRITE_ONCE(*rfcpp, rfcp);
2618 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2619 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2620 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2621 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2622 rfp->n_launders_hist[i].n_launders++;
2623 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2624 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2625}
2626
2627// Give the scheduler a chance, even on nohz_full CPUs.
2628static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2629{
2630 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2631 // Real call_rcu() floods hit userspace, so emulate that.
2632 if (need_resched() || (iter & 0xfff))
2633 schedule();
2634 return;
2635 }
2636 // No userspace emulation: CB invocation throttles call_rcu()
2637 cond_resched();
2638}
2639
2640/*
2641 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2642 * test is over or because we hit an OOM event.
2643 */
2644static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2645{
2646 unsigned long flags;
2647 unsigned long freed = 0;
2648 struct rcu_fwd_cb *rfcp;
2649
2650 for (;;) {
2651 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2652 rfcp = rfp->rcu_fwd_cb_head;
2653 if (!rfcp) {
2654 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2655 break;
2656 }
2657 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2658 if (!rfp->rcu_fwd_cb_head)
2659 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2660 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2661 kfree(rfcp);
2662 freed++;
2663 rcu_torture_fwd_prog_cond_resched(freed);
2664 if (tick_nohz_full_enabled()) {
2665 local_irq_save(flags);
2666 rcu_momentary_dyntick_idle();
2667 local_irq_restore(flags);
2668 }
2669 }
2670 return freed;
2671}
2672
2673/* Carry out need_resched()/cond_resched() forward-progress testing. */
2674static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2675 int *tested, int *tested_tries)
2676{
2677 unsigned long cver;
2678 unsigned long dur;
2679 struct fwd_cb_state fcs;
2680 unsigned long gps;
2681 int idx;
2682 int sd;
2683 int sd4;
2684 bool selfpropcb = false;
2685 unsigned long stopat;
2686 static DEFINE_TORTURE_RANDOM(trs);
2687
2688 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2689 if (!cur_ops->sync)
2690 return; // Cannot do need_resched() forward progress testing without ->sync.
2691 if (cur_ops->call && cur_ops->cb_barrier) {
2692 init_rcu_head_on_stack(&fcs.rh);
2693 selfpropcb = true;
2694 }
2695
2696 /* Tight loop containing cond_resched(). */
2697 atomic_inc(&rcu_fwd_cb_nodelay);
2698 cur_ops->sync(); /* Later readers see above write. */
2699 if (selfpropcb) {
2700 WRITE_ONCE(fcs.stop, 0);
2701 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2702 }
2703 cver = READ_ONCE(rcu_torture_current_version);
2704 gps = cur_ops->get_gp_seq();
2705 sd = cur_ops->stall_dur() + 1;
2706 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2707 dur = sd4 + torture_random(&trs) % (sd - sd4);
2708 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2709 stopat = rfp->rcu_fwd_startat + dur;
2710 while (time_before(jiffies, stopat) &&
2711 !shutdown_time_arrived() &&
2712 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2713 idx = cur_ops->readlock();
2714 udelay(10);
2715 cur_ops->readunlock(idx);
2716 if (!fwd_progress_need_resched || need_resched())
2717 cond_resched();
2718 }
2719 (*tested_tries)++;
2720 if (!time_before(jiffies, stopat) &&
2721 !shutdown_time_arrived() &&
2722 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2723 (*tested)++;
2724 cver = READ_ONCE(rcu_torture_current_version) - cver;
2725 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2726 WARN_ON(!cver && gps < 2);
2727 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2728 rfp->rcu_fwd_id, dur, cver, gps);
2729 }
2730 if (selfpropcb) {
2731 WRITE_ONCE(fcs.stop, 1);
2732 cur_ops->sync(); /* Wait for running CB to complete. */
2733 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2734 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2735 }
2736
2737 if (selfpropcb) {
2738 WARN_ON(READ_ONCE(fcs.stop) != 2);
2739 destroy_rcu_head_on_stack(&fcs.rh);
2740 }
2741 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2742 atomic_dec(&rcu_fwd_cb_nodelay);
2743}
2744
2745/* Carry out call_rcu() forward-progress testing. */
2746static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2747{
2748 unsigned long cver;
2749 unsigned long flags;
2750 unsigned long gps;
2751 int i;
2752 long n_launders;
2753 long n_launders_cb_snap;
2754 long n_launders_sa;
2755 long n_max_cbs;
2756 long n_max_gps;
2757 struct rcu_fwd_cb *rfcp;
2758 struct rcu_fwd_cb *rfcpn;
2759 unsigned long stopat;
2760 unsigned long stoppedat;
2761
2762 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2763 if (READ_ONCE(rcu_fwd_emergency_stop))
2764 return; /* Get out of the way quickly, no GP wait! */
2765 if (!cur_ops->call)
2766 return; /* Can't do call_rcu() fwd prog without ->call. */
2767
2768 /* Loop continuously posting RCU callbacks. */
2769 atomic_inc(&rcu_fwd_cb_nodelay);
2770 cur_ops->sync(); /* Later readers see above write. */
2771 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2772 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2773 n_launders = 0;
2774 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2775 n_launders_sa = 0;
2776 n_max_cbs = 0;
2777 n_max_gps = 0;
2778 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2779 rfp->n_launders_hist[i].n_launders = 0;
2780 cver = READ_ONCE(rcu_torture_current_version);
2781 gps = cur_ops->get_gp_seq();
2782 rfp->rcu_launder_gp_seq_start = gps;
2783 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2784 while (time_before(jiffies, stopat) &&
2785 !shutdown_time_arrived() &&
2786 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2787 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2788 rfcpn = NULL;
2789 if (rfcp)
2790 rfcpn = READ_ONCE(rfcp->rfc_next);
2791 if (rfcpn) {
2792 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2793 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2794 break;
2795 rfp->rcu_fwd_cb_head = rfcpn;
2796 n_launders++;
2797 n_launders_sa++;
2798 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
2799 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2800 if (WARN_ON_ONCE(!rfcp)) {
2801 schedule_timeout_interruptible(1);
2802 continue;
2803 }
2804 n_max_cbs++;
2805 n_launders_sa = 0;
2806 rfcp->rfc_gps = 0;
2807 rfcp->rfc_rfp = rfp;
2808 } else {
2809 rfcp = NULL;
2810 }
2811 if (rfcp)
2812 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2813 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2814 if (tick_nohz_full_enabled()) {
2815 local_irq_save(flags);
2816 rcu_momentary_dyntick_idle();
2817 local_irq_restore(flags);
2818 }
2819 }
2820 stoppedat = jiffies;
2821 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2822 cver = READ_ONCE(rcu_torture_current_version) - cver;
2823 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2824 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2825 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2826 (void)rcu_torture_fwd_prog_cbfree(rfp);
2827
2828 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2829 !shutdown_time_arrived()) {
2830 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2831 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2832 __func__,
2833 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2834 n_launders + n_max_cbs - n_launders_cb_snap,
2835 n_launders, n_launders_sa,
2836 n_max_gps, n_max_cbs, cver, gps);
2837 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
2838 mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
2839 rcu_torture_fwd_cb_hist(rfp);
2840 mutex_unlock(&rcu_fwd_mutex);
2841 }
2842 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2843 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2844 atomic_dec(&rcu_fwd_cb_nodelay);
2845}
2846
2847
2848/*
2849 * OOM notifier, but this only prints diagnostic information for the
2850 * current forward-progress test.
2851 */
2852static int rcutorture_oom_notify(struct notifier_block *self,
2853 unsigned long notused, void *nfreed)
2854{
2855 int i;
2856 long ncbs;
2857 struct rcu_fwd *rfp;
2858
2859 mutex_lock(&rcu_fwd_mutex);
2860 rfp = rcu_fwds;
2861 if (!rfp) {
2862 mutex_unlock(&rcu_fwd_mutex);
2863 return NOTIFY_OK;
2864 }
2865 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2866 __func__);
2867 for (i = 0; i < fwd_progress; i++) {
2868 rcu_torture_fwd_cb_hist(&rfp[i]);
2869 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2870 }
2871 WRITE_ONCE(rcu_fwd_emergency_stop, true);
2872 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2873 ncbs = 0;
2874 for (i = 0; i < fwd_progress; i++)
2875 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2876 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2877 cur_ops->cb_barrier();
2878 ncbs = 0;
2879 for (i = 0; i < fwd_progress; i++)
2880 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2881 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2882 cur_ops->cb_barrier();
2883 ncbs = 0;
2884 for (i = 0; i < fwd_progress; i++)
2885 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2886 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2887 smp_mb(); /* Frees before return to avoid redoing OOM. */
2888 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2889 pr_info("%s returning after OOM processing.\n", __func__);
2890 mutex_unlock(&rcu_fwd_mutex);
2891 return NOTIFY_OK;
2892}
2893
2894static struct notifier_block rcutorture_oom_nb = {
2895 .notifier_call = rcutorture_oom_notify
2896};
2897
2898/* Carry out grace-period forward-progress testing. */
2899static int rcu_torture_fwd_prog(void *args)
2900{
2901 bool firsttime = true;
2902 long max_cbs;
2903 int oldnice = task_nice(current);
2904 unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
2905 struct rcu_fwd *rfp = args;
2906 int tested = 0;
2907 int tested_tries = 0;
2908
2909 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2910 rcu_bind_current_to_nocb();
2911 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2912 set_user_nice(current, MAX_NICE);
2913 do {
2914 if (!rfp->rcu_fwd_id) {
2915 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2916 WRITE_ONCE(rcu_fwd_emergency_stop, false);
2917 if (!firsttime) {
2918 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
2919 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
2920 }
2921 firsttime = false;
2922 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
2923 } else {
2924 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
2925 schedule_timeout_interruptible(HZ / 20);
2926 oldseq = READ_ONCE(rcu_fwd_seq);
2927 }
2928 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2929 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
2930 rcu_torture_fwd_prog_cr(rfp);
2931 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2932 (!IS_ENABLED(CONFIG_TINY_RCU) ||
2933 (rcu_inkernel_boot_has_ended() &&
2934 torture_num_online_cpus() > rfp->rcu_fwd_id)))
2935 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2936
2937 /* Avoid slow periods, better to test when busy. */
2938 if (stutter_wait("rcu_torture_fwd_prog"))
2939 sched_set_normal(current, oldnice);
2940 } while (!torture_must_stop());
2941 /* Short runs might not contain a valid forward-progress attempt. */
2942 if (!rfp->rcu_fwd_id) {
2943 WARN_ON(!tested && tested_tries >= 5);
2944 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2945 }
2946 torture_kthread_stopping("rcu_torture_fwd_prog");
2947 return 0;
2948}
2949
2950/* If forward-progress checking is requested and feasible, spawn the thread. */
2951static int __init rcu_torture_fwd_prog_init(void)
2952{
2953 int i;
2954 int ret = 0;
2955 struct rcu_fwd *rfp;
2956
2957 if (!fwd_progress)
2958 return 0; /* Not requested, so don't do it. */
2959 if (fwd_progress >= nr_cpu_ids) {
2960 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
2961 fwd_progress = nr_cpu_ids;
2962 } else if (fwd_progress < 0) {
2963 fwd_progress = nr_cpu_ids;
2964 }
2965 if ((!cur_ops->sync && !cur_ops->call) ||
2966 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2967 cur_ops == &rcu_busted_ops) {
2968 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2969 fwd_progress = 0;
2970 return 0;
2971 }
2972 if (stall_cpu > 0) {
2973 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2974 fwd_progress = 0;
2975 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
2976 return -EINVAL; /* In module, can fail back to user. */
2977 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2978 return 0;
2979 }
2980 if (fwd_progress_holdoff <= 0)
2981 fwd_progress_holdoff = 1;
2982 if (fwd_progress_div <= 0)
2983 fwd_progress_div = 4;
2984 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
2985 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
2986 if (!rfp || !fwd_prog_tasks) {
2987 kfree(rfp);
2988 kfree(fwd_prog_tasks);
2989 fwd_prog_tasks = NULL;
2990 fwd_progress = 0;
2991 return -ENOMEM;
2992 }
2993 for (i = 0; i < fwd_progress; i++) {
2994 spin_lock_init(&rfp[i].rcu_fwd_lock);
2995 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
2996 rfp[i].rcu_fwd_id = i;
2997 }
2998 mutex_lock(&rcu_fwd_mutex);
2999 rcu_fwds = rfp;
3000 mutex_unlock(&rcu_fwd_mutex);
3001 register_oom_notifier(&rcutorture_oom_nb);
3002 for (i = 0; i < fwd_progress; i++) {
3003 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
3004 if (ret) {
3005 fwd_progress = i;
3006 return ret;
3007 }
3008 }
3009 return 0;
3010}
3011
3012static void rcu_torture_fwd_prog_cleanup(void)
3013{
3014 int i;
3015 struct rcu_fwd *rfp;
3016
3017 if (!rcu_fwds || !fwd_prog_tasks)
3018 return;
3019 for (i = 0; i < fwd_progress; i++)
3020 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
3021 unregister_oom_notifier(&rcutorture_oom_nb);
3022 mutex_lock(&rcu_fwd_mutex);
3023 rfp = rcu_fwds;
3024 rcu_fwds = NULL;
3025 mutex_unlock(&rcu_fwd_mutex);
3026 kfree(rfp);
3027 kfree(fwd_prog_tasks);
3028 fwd_prog_tasks = NULL;
3029}
3030
3031/* Callback function for RCU barrier testing. */
3032static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
3033{
3034 atomic_inc(&barrier_cbs_invoked);
3035}
3036
3037/* IPI handler to get callback posted on desired CPU, if online. */
3038static void rcu_torture_barrier1cb(void *rcu_void)
3039{
3040 struct rcu_head *rhp = rcu_void;
3041
3042 cur_ops->call(rhp, rcu_torture_barrier_cbf);
3043}
3044
3045/* kthread function to register callbacks used to test RCU barriers. */
3046static int rcu_torture_barrier_cbs(void *arg)
3047{
3048 long myid = (long)arg;
3049 bool lastphase = false;
3050 bool newphase;
3051 struct rcu_head rcu;
3052
3053 init_rcu_head_on_stack(&rcu);
3054 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
3055 set_user_nice(current, MAX_NICE);
3056 do {
3057 wait_event(barrier_cbs_wq[myid],
3058 (newphase =
3059 smp_load_acquire(&barrier_phase)) != lastphase ||
3060 torture_must_stop());
3061 lastphase = newphase;
3062 if (torture_must_stop())
3063 break;
3064 /*
3065 * The above smp_load_acquire() ensures barrier_phase load
3066 * is ordered before the following ->call().
3067 */
3068 if (smp_call_function_single(myid, rcu_torture_barrier1cb,
3069 &rcu, 1)) {
3070 // IPI failed, so use direct call from current CPU.
3071 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
3072 }
3073 if (atomic_dec_and_test(&barrier_cbs_count))
3074 wake_up(&barrier_wq);
3075 } while (!torture_must_stop());
3076 if (cur_ops->cb_barrier != NULL)
3077 cur_ops->cb_barrier();
3078 destroy_rcu_head_on_stack(&rcu);
3079 torture_kthread_stopping("rcu_torture_barrier_cbs");
3080 return 0;
3081}
3082
3083/* kthread function to drive and coordinate RCU barrier testing. */
3084static int rcu_torture_barrier(void *arg)
3085{
3086 int i;
3087
3088 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
3089 do {
3090 atomic_set(&barrier_cbs_invoked, 0);
3091 atomic_set(&barrier_cbs_count, n_barrier_cbs);
3092 /* Ensure barrier_phase ordered after prior assignments. */
3093 smp_store_release(&barrier_phase, !barrier_phase);
3094 for (i = 0; i < n_barrier_cbs; i++)
3095 wake_up(&barrier_cbs_wq[i]);
3096 wait_event(barrier_wq,
3097 atomic_read(&barrier_cbs_count) == 0 ||
3098 torture_must_stop());
3099 if (torture_must_stop())
3100 break;
3101 n_barrier_attempts++;
3102 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
3103 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
3104 n_rcu_torture_barrier_error++;
3105 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
3106 atomic_read(&barrier_cbs_invoked),
3107 n_barrier_cbs);
3108 WARN_ON(1);
3109 // Wait manually for the remaining callbacks
3110 i = 0;
3111 do {
3112 if (WARN_ON(i++ > HZ))
3113 i = INT_MIN;
3114 schedule_timeout_interruptible(1);
3115 cur_ops->cb_barrier();
3116 } while (atomic_read(&barrier_cbs_invoked) !=
3117 n_barrier_cbs &&
3118 !torture_must_stop());
3119 smp_mb(); // Can't trust ordering if broken.
3120 if (!torture_must_stop())
3121 pr_err("Recovered: barrier_cbs_invoked = %d\n",
3122 atomic_read(&barrier_cbs_invoked));
3123 } else {
3124 n_barrier_successes++;
3125 }
3126 schedule_timeout_interruptible(HZ / 10);
3127 } while (!torture_must_stop());
3128 torture_kthread_stopping("rcu_torture_barrier");
3129 return 0;
3130}
3131
3132/* Initialize RCU barrier testing. */
3133static int rcu_torture_barrier_init(void)
3134{
3135 int i;
3136 int ret;
3137
3138 if (n_barrier_cbs <= 0)
3139 return 0;
3140 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
3141 pr_alert("%s" TORTURE_FLAG
3142 " Call or barrier ops missing for %s,\n",
3143 torture_type, cur_ops->name);
3144 pr_alert("%s" TORTURE_FLAG
3145 " RCU barrier testing omitted from run.\n",
3146 torture_type);
3147 return 0;
3148 }
3149 atomic_set(&barrier_cbs_count, 0);
3150 atomic_set(&barrier_cbs_invoked, 0);
3151 barrier_cbs_tasks =
3152 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
3153 GFP_KERNEL);
3154 barrier_cbs_wq =
3155 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
3156 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
3157 return -ENOMEM;
3158 for (i = 0; i < n_barrier_cbs; i++) {
3159 init_waitqueue_head(&barrier_cbs_wq[i]);
3160 ret = torture_create_kthread(rcu_torture_barrier_cbs,
3161 (void *)(long)i,
3162 barrier_cbs_tasks[i]);
3163 if (ret)
3164 return ret;
3165 }
3166 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
3167}
3168
3169/* Clean up after RCU barrier testing. */
3170static void rcu_torture_barrier_cleanup(void)
3171{
3172 int i;
3173
3174 torture_stop_kthread(rcu_torture_barrier, barrier_task);
3175 if (barrier_cbs_tasks != NULL) {
3176 for (i = 0; i < n_barrier_cbs; i++)
3177 torture_stop_kthread(rcu_torture_barrier_cbs,
3178 barrier_cbs_tasks[i]);
3179 kfree(barrier_cbs_tasks);
3180 barrier_cbs_tasks = NULL;
3181 }
3182 if (barrier_cbs_wq != NULL) {
3183 kfree(barrier_cbs_wq);
3184 barrier_cbs_wq = NULL;
3185 }
3186}
3187
3188static bool rcu_torture_can_boost(void)
3189{
3190 static int boost_warn_once;
3191 int prio;
3192
3193 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
3194 return false;
3195 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
3196 return false;
3197
3198 prio = rcu_get_gp_kthreads_prio();
3199 if (!prio)
3200 return false;
3201
3202 if (prio < 2) {
3203 if (boost_warn_once == 1)
3204 return false;
3205
3206 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
3207 boost_warn_once = 1;
3208 return false;
3209 }
3210
3211 return true;
3212}
3213
3214static bool read_exit_child_stop;
3215static bool read_exit_child_stopped;
3216static wait_queue_head_t read_exit_wq;
3217
3218// Child kthread which just does an rcutorture reader and exits.
3219static int rcu_torture_read_exit_child(void *trsp_in)
3220{
3221 struct torture_random_state *trsp = trsp_in;
3222
3223 set_user_nice(current, MAX_NICE);
3224 // Minimize time between reading and exiting.
3225 while (!kthread_should_stop())
3226 schedule_timeout_uninterruptible(HZ / 20);
3227 (void)rcu_torture_one_read(trsp, -1);
3228 return 0;
3229}
3230
3231// Parent kthread which creates and destroys read-exit child kthreads.
3232static int rcu_torture_read_exit(void *unused)
3233{
3234 bool errexit = false;
3235 int i;
3236 struct task_struct *tsp;
3237 DEFINE_TORTURE_RANDOM(trs);
3238
3239 // Allocate and initialize.
3240 set_user_nice(current, MAX_NICE);
3241 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
3242
3243 // Each pass through this loop does one read-exit episode.
3244 do {
3245 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
3246 for (i = 0; i < read_exit_burst; i++) {
3247 if (READ_ONCE(read_exit_child_stop))
3248 break;
3249 stutter_wait("rcu_torture_read_exit");
3250 // Spawn child.
3251 tsp = kthread_run(rcu_torture_read_exit_child,
3252 &trs, "%s", "rcu_torture_read_exit_child");
3253 if (IS_ERR(tsp)) {
3254 TOROUT_ERRSTRING("out of memory");
3255 errexit = true;
3256 break;
3257 }
3258 cond_resched();
3259 kthread_stop(tsp);
3260 n_read_exits++;
3261 }
3262 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
3263 rcu_barrier(); // Wait for task_struct free, avoid OOM.
3264 i = 0;
3265 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
3266 schedule_timeout_uninterruptible(HZ);
3267 } while (!errexit && !READ_ONCE(read_exit_child_stop));
3268
3269 // Clean up and exit.
3270 smp_store_release(&read_exit_child_stopped, true); // After reaping.
3271 smp_mb(); // Store before wakeup.
3272 wake_up(&read_exit_wq);
3273 while (!torture_must_stop())
3274 schedule_timeout_uninterruptible(HZ / 20);
3275 torture_kthread_stopping("rcu_torture_read_exit");
3276 return 0;
3277}
3278
3279static int rcu_torture_read_exit_init(void)
3280{
3281 if (read_exit_burst <= 0)
3282 return 0;
3283 init_waitqueue_head(&read_exit_wq);
3284 read_exit_child_stop = false;
3285 read_exit_child_stopped = false;
3286 return torture_create_kthread(rcu_torture_read_exit, NULL,
3287 read_exit_task);
3288}
3289
3290static void rcu_torture_read_exit_cleanup(void)
3291{
3292 if (!read_exit_task)
3293 return;
3294 WRITE_ONCE(read_exit_child_stop, true);
3295 smp_mb(); // Above write before wait.
3296 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
3297 torture_stop_kthread(rcutorture_read_exit, read_exit_task);
3298}
3299
3300static void rcutorture_test_nmis(int n)
3301{
3302#if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3303 int cpu;
3304 int dumpcpu;
3305 int i;
3306
3307 for (i = 0; i < n; i++) {
3308 preempt_disable();
3309 cpu = smp_processor_id();
3310 dumpcpu = cpu + 1;
3311 if (dumpcpu >= nr_cpu_ids)
3312 dumpcpu = 0;
3313 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu);
3314 dump_cpu_task(dumpcpu);
3315 preempt_enable();
3316 schedule_timeout_uninterruptible(15 * HZ);
3317 }
3318#else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3319 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis);
3320#endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3321}
3322
3323static enum cpuhp_state rcutor_hp;
3324
3325static void
3326rcu_torture_cleanup(void)
3327{
3328 int firsttime;
3329 int flags = 0;
3330 unsigned long gp_seq = 0;
3331 int i;
3332
3333 if (torture_cleanup_begin()) {
3334 if (cur_ops->cb_barrier != NULL) {
3335 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3336 cur_ops->cb_barrier();
3337 }
3338 rcu_gp_slow_unregister(NULL);
3339 return;
3340 }
3341 if (!cur_ops) {
3342 torture_cleanup_end();
3343 rcu_gp_slow_unregister(NULL);
3344 return;
3345 }
3346
3347 rcutorture_test_nmis(test_nmis);
3348
3349 if (cur_ops->gp_kthread_dbg)
3350 cur_ops->gp_kthread_dbg();
3351 rcu_torture_read_exit_cleanup();
3352 rcu_torture_barrier_cleanup();
3353 rcu_torture_fwd_prog_cleanup();
3354 torture_stop_kthread(rcu_torture_stall, stall_task);
3355 torture_stop_kthread(rcu_torture_writer, writer_task);
3356
3357 if (nocb_tasks) {
3358 for (i = 0; i < nrealnocbers; i++)
3359 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
3360 kfree(nocb_tasks);
3361 nocb_tasks = NULL;
3362 }
3363
3364 if (reader_tasks) {
3365 for (i = 0; i < nrealreaders; i++)
3366 torture_stop_kthread(rcu_torture_reader,
3367 reader_tasks[i]);
3368 kfree(reader_tasks);
3369 reader_tasks = NULL;
3370 }
3371 kfree(rcu_torture_reader_mbchk);
3372 rcu_torture_reader_mbchk = NULL;
3373
3374 if (fakewriter_tasks) {
3375 for (i = 0; i < nfakewriters; i++)
3376 torture_stop_kthread(rcu_torture_fakewriter,
3377 fakewriter_tasks[i]);
3378 kfree(fakewriter_tasks);
3379 fakewriter_tasks = NULL;
3380 }
3381
3382 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3383 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3384 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
3385 cur_ops->name, (long)gp_seq, flags,
3386 rcutorture_seq_diff(gp_seq, start_gp_seq));
3387 torture_stop_kthread(rcu_torture_stats, stats_task);
3388 torture_stop_kthread(rcu_torture_fqs, fqs_task);
3389 if (rcu_torture_can_boost() && rcutor_hp >= 0)
3390 cpuhp_remove_state(rcutor_hp);
3391
3392 /*
3393 * Wait for all RCU callbacks to fire, then do torture-type-specific
3394 * cleanup operations.
3395 */
3396 if (cur_ops->cb_barrier != NULL) {
3397 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3398 cur_ops->cb_barrier();
3399 }
3400 if (cur_ops->cleanup != NULL)
3401 cur_ops->cleanup();
3402
3403 rcu_torture_mem_dump_obj();
3404
3405 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
3406
3407 if (err_segs_recorded) {
3408 pr_alert("Failure/close-call rcutorture reader segments:\n");
3409 if (rt_read_nsegs == 0)
3410 pr_alert("\t: No segments recorded!!!\n");
3411 firsttime = 1;
3412 for (i = 0; i < rt_read_nsegs; i++) {
3413 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
3414 if (err_segs[i].rt_delay_jiffies != 0) {
3415 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
3416 err_segs[i].rt_delay_jiffies);
3417 firsttime = 0;
3418 }
3419 if (err_segs[i].rt_delay_ms != 0) {
3420 pr_cont("%s%ldms", firsttime ? "" : "+",
3421 err_segs[i].rt_delay_ms);
3422 firsttime = 0;
3423 }
3424 if (err_segs[i].rt_delay_us != 0) {
3425 pr_cont("%s%ldus", firsttime ? "" : "+",
3426 err_segs[i].rt_delay_us);
3427 firsttime = 0;
3428 }
3429 pr_cont("%s\n",
3430 err_segs[i].rt_preempted ? "preempted" : "");
3431
3432 }
3433 }
3434 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3435 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3436 else if (torture_onoff_failures())
3437 rcu_torture_print_module_parms(cur_ops,
3438 "End of test: RCU_HOTPLUG");
3439 else
3440 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3441 torture_cleanup_end();
3442 rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
3443}
3444
3445#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3446static void rcu_torture_leak_cb(struct rcu_head *rhp)
3447{
3448}
3449
3450static void rcu_torture_err_cb(struct rcu_head *rhp)
3451{
3452 /*
3453 * This -might- happen due to race conditions, but is unlikely.
3454 * The scenario that leads to this happening is that the
3455 * first of the pair of duplicate callbacks is queued,
3456 * someone else starts a grace period that includes that
3457 * callback, then the second of the pair must wait for the
3458 * next grace period. Unlikely, but can happen. If it
3459 * does happen, the debug-objects subsystem won't have splatted.
3460 */
3461 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3462}
3463#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3464
3465/*
3466 * Verify that double-free causes debug-objects to complain, but only
3467 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
3468 * cannot be carried out.
3469 */
3470static void rcu_test_debug_objects(void)
3471{
3472#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3473 struct rcu_head rh1;
3474 struct rcu_head rh2;
3475 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3476
3477 init_rcu_head_on_stack(&rh1);
3478 init_rcu_head_on_stack(&rh2);
3479 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
3480
3481 /* Try to queue the rh2 pair of callbacks for the same grace period. */
3482 preempt_disable(); /* Prevent preemption from interrupting test. */
3483 rcu_read_lock(); /* Make it impossible to finish a grace period. */
3484 call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3485 local_irq_disable(); /* Make it harder to start a new grace period. */
3486 call_rcu_hurry(&rh2, rcu_torture_leak_cb);
3487 call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
3488 if (rhp) {
3489 call_rcu_hurry(rhp, rcu_torture_leak_cb);
3490 call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3491 }
3492 local_irq_enable();
3493 rcu_read_unlock();
3494 preempt_enable();
3495
3496 /* Wait for them all to get done so we can safely return. */
3497 rcu_barrier();
3498 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
3499 destroy_rcu_head_on_stack(&rh1);
3500 destroy_rcu_head_on_stack(&rh2);
3501 kfree(rhp);
3502#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3503 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
3504#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3505}
3506
3507static void rcutorture_sync(void)
3508{
3509 static unsigned long n;
3510
3511 if (cur_ops->sync && !(++n & 0xfff))
3512 cur_ops->sync();
3513}
3514
3515static DEFINE_MUTEX(mut0);
3516static DEFINE_MUTEX(mut1);
3517static DEFINE_MUTEX(mut2);
3518static DEFINE_MUTEX(mut3);
3519static DEFINE_MUTEX(mut4);
3520static DEFINE_MUTEX(mut5);
3521static DEFINE_MUTEX(mut6);
3522static DEFINE_MUTEX(mut7);
3523static DEFINE_MUTEX(mut8);
3524static DEFINE_MUTEX(mut9);
3525
3526static DECLARE_RWSEM(rwsem0);
3527static DECLARE_RWSEM(rwsem1);
3528static DECLARE_RWSEM(rwsem2);
3529static DECLARE_RWSEM(rwsem3);
3530static DECLARE_RWSEM(rwsem4);
3531static DECLARE_RWSEM(rwsem5);
3532static DECLARE_RWSEM(rwsem6);
3533static DECLARE_RWSEM(rwsem7);
3534static DECLARE_RWSEM(rwsem8);
3535static DECLARE_RWSEM(rwsem9);
3536
3537DEFINE_STATIC_SRCU(srcu0);
3538DEFINE_STATIC_SRCU(srcu1);
3539DEFINE_STATIC_SRCU(srcu2);
3540DEFINE_STATIC_SRCU(srcu3);
3541DEFINE_STATIC_SRCU(srcu4);
3542DEFINE_STATIC_SRCU(srcu5);
3543DEFINE_STATIC_SRCU(srcu6);
3544DEFINE_STATIC_SRCU(srcu7);
3545DEFINE_STATIC_SRCU(srcu8);
3546DEFINE_STATIC_SRCU(srcu9);
3547
3548static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i,
3549 int cyclelen, int deadlock)
3550{
3551 int j = i + 1;
3552
3553 if (j >= cyclelen)
3554 j = deadlock ? 0 : -1;
3555 if (j >= 0)
3556 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i);
3557 else
3558 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i);
3559 return j;
3560}
3561
3562// Test lockdep on SRCU-based deadlock scenarios.
3563static void rcu_torture_init_srcu_lockdep(void)
3564{
3565 int cyclelen;
3566 int deadlock;
3567 bool err = false;
3568 int i;
3569 int j;
3570 int idx;
3571 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4,
3572 &mut5, &mut6, &mut7, &mut8, &mut9 };
3573 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4,
3574 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 };
3575 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4,
3576 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 };
3577 int testtype;
3578
3579 if (!test_srcu_lockdep)
3580 return;
3581
3582 deadlock = test_srcu_lockdep / 1000;
3583 testtype = (test_srcu_lockdep / 10) % 100;
3584 cyclelen = test_srcu_lockdep % 10;
3585 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus));
3586 if (WARN_ONCE(deadlock != !!deadlock,
3587 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n",
3588 __func__, test_srcu_lockdep, deadlock))
3589 err = true;
3590 if (WARN_ONCE(cyclelen <= 0,
3591 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n",
3592 __func__, test_srcu_lockdep, cyclelen))
3593 err = true;
3594 if (err)
3595 goto err_out;
3596
3597 if (testtype == 0) {
3598 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n",
3599 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3600 if (deadlock && cyclelen == 1)
3601 pr_info("%s: Expect hang.\n", __func__);
3602 for (i = 0; i < cyclelen; i++) {
3603 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu",
3604 "srcu_read_unlock", i, cyclelen, deadlock);
3605 idx = srcu_read_lock(srcus[i]);
3606 if (j >= 0)
3607 synchronize_srcu(srcus[j]);
3608 srcu_read_unlock(srcus[i], idx);
3609 }
3610 return;
3611 }
3612
3613 if (testtype == 1) {
3614 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n",
3615 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3616 for (i = 0; i < cyclelen; i++) {
3617 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n",
3618 __func__, i, i, i, i);
3619 idx = srcu_read_lock(srcus[i]);
3620 mutex_lock(muts[i]);
3621 mutex_unlock(muts[i]);
3622 srcu_read_unlock(srcus[i], idx);
3623
3624 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu",
3625 "mutex_unlock", i, cyclelen, deadlock);
3626 mutex_lock(muts[i]);
3627 if (j >= 0)
3628 synchronize_srcu(srcus[j]);
3629 mutex_unlock(muts[i]);
3630 }
3631 return;
3632 }
3633
3634 if (testtype == 2) {
3635 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n",
3636 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3637 for (i = 0; i < cyclelen; i++) {
3638 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n",
3639 __func__, i, i, i, i);
3640 idx = srcu_read_lock(srcus[i]);
3641 down_read(rwsems[i]);
3642 up_read(rwsems[i]);
3643 srcu_read_unlock(srcus[i], idx);
3644
3645 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu",
3646 "up_write", i, cyclelen, deadlock);
3647 down_write(rwsems[i]);
3648 if (j >= 0)
3649 synchronize_srcu(srcus[j]);
3650 up_write(rwsems[i]);
3651 }
3652 return;
3653 }
3654
3655#ifdef CONFIG_TASKS_TRACE_RCU
3656 if (testtype == 3) {
3657 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n",
3658 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3659 if (deadlock && cyclelen == 1)
3660 pr_info("%s: Expect hang.\n", __func__);
3661 for (i = 0; i < cyclelen; i++) {
3662 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock";
3663 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace"
3664 : "synchronize_srcu";
3665 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock";
3666
3667 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock);
3668 if (i == 0)
3669 rcu_read_lock_trace();
3670 else
3671 idx = srcu_read_lock(srcus[i]);
3672 if (j >= 0) {
3673 if (i == cyclelen - 1)
3674 synchronize_rcu_tasks_trace();
3675 else
3676 synchronize_srcu(srcus[j]);
3677 }
3678 if (i == 0)
3679 rcu_read_unlock_trace();
3680 else
3681 srcu_read_unlock(srcus[i], idx);
3682 }
3683 return;
3684 }
3685#endif // #ifdef CONFIG_TASKS_TRACE_RCU
3686
3687err_out:
3688 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep);
3689 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__);
3690 pr_info("%s: D: Deadlock if nonzero.\n", __func__);
3691 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__);
3692 pr_info("%s: L: Cycle length.\n", __func__);
3693 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU))
3694 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__);
3695}
3696
3697static int __init
3698rcu_torture_init(void)
3699{
3700 long i;
3701 int cpu;
3702 int firsterr = 0;
3703 int flags = 0;
3704 unsigned long gp_seq = 0;
3705 static struct rcu_torture_ops *torture_ops[] = {
3706 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
3707 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
3708 &trivial_ops,
3709 };
3710
3711 if (!torture_init_begin(torture_type, verbose))
3712 return -EBUSY;
3713
3714 /* Process args and tell the world that the torturer is on the job. */
3715 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3716 cur_ops = torture_ops[i];
3717 if (strcmp(torture_type, cur_ops->name) == 0)
3718 break;
3719 }
3720 if (i == ARRAY_SIZE(torture_ops)) {
3721 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3722 torture_type);
3723 pr_alert("rcu-torture types:");
3724 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3725 pr_cont(" %s", torture_ops[i]->name);
3726 pr_cont("\n");
3727 firsterr = -EINVAL;
3728 cur_ops = NULL;
3729 goto unwind;
3730 }
3731 if (cur_ops->fqs == NULL && fqs_duration != 0) {
3732 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3733 fqs_duration = 0;
3734 }
3735 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops ||
3736 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
3737 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n",
3738 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU));
3739 nocbs_nthreads = 0;
3740 }
3741 if (cur_ops->init)
3742 cur_ops->init();
3743
3744 rcu_torture_init_srcu_lockdep();
3745
3746 if (nreaders >= 0) {
3747 nrealreaders = nreaders;
3748 } else {
3749 nrealreaders = num_online_cpus() - 2 - nreaders;
3750 if (nrealreaders <= 0)
3751 nrealreaders = 1;
3752 }
3753 rcu_torture_print_module_parms(cur_ops, "Start of test");
3754 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3755 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3756 start_gp_seq = gp_seq;
3757 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
3758 cur_ops->name, (long)gp_seq, flags);
3759
3760 /* Set up the freelist. */
3761
3762 INIT_LIST_HEAD(&rcu_torture_freelist);
3763 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3764 rcu_tortures[i].rtort_mbtest = 0;
3765 list_add_tail(&rcu_tortures[i].rtort_free,
3766 &rcu_torture_freelist);
3767 }
3768
3769 /* Initialize the statistics so that each run gets its own numbers. */
3770
3771 rcu_torture_current = NULL;
3772 rcu_torture_current_version = 0;
3773 atomic_set(&n_rcu_torture_alloc, 0);
3774 atomic_set(&n_rcu_torture_alloc_fail, 0);
3775 atomic_set(&n_rcu_torture_free, 0);
3776 atomic_set(&n_rcu_torture_mberror, 0);
3777 atomic_set(&n_rcu_torture_mbchk_fail, 0);
3778 atomic_set(&n_rcu_torture_mbchk_tries, 0);
3779 atomic_set(&n_rcu_torture_error, 0);
3780 n_rcu_torture_barrier_error = 0;
3781 n_rcu_torture_boost_ktrerror = 0;
3782 n_rcu_torture_boost_failure = 0;
3783 n_rcu_torture_boosts = 0;
3784 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3785 atomic_set(&rcu_torture_wcount[i], 0);
3786 for_each_possible_cpu(cpu) {
3787 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3788 per_cpu(rcu_torture_count, cpu)[i] = 0;
3789 per_cpu(rcu_torture_batch, cpu)[i] = 0;
3790 }
3791 }
3792 err_segs_recorded = 0;
3793 rt_read_nsegs = 0;
3794
3795 /* Start up the kthreads. */
3796
3797 rcu_torture_write_types();
3798 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3799 writer_task);
3800 if (torture_init_error(firsterr))
3801 goto unwind;
3802 if (nfakewriters > 0) {
3803 fakewriter_tasks = kcalloc(nfakewriters,
3804 sizeof(fakewriter_tasks[0]),
3805 GFP_KERNEL);
3806 if (fakewriter_tasks == NULL) {
3807 TOROUT_ERRSTRING("out of memory");
3808 firsterr = -ENOMEM;
3809 goto unwind;
3810 }
3811 }
3812 for (i = 0; i < nfakewriters; i++) {
3813 firsterr = torture_create_kthread(rcu_torture_fakewriter,
3814 NULL, fakewriter_tasks[i]);
3815 if (torture_init_error(firsterr))
3816 goto unwind;
3817 }
3818 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3819 GFP_KERNEL);
3820 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3821 GFP_KERNEL);
3822 if (!reader_tasks || !rcu_torture_reader_mbchk) {
3823 TOROUT_ERRSTRING("out of memory");
3824 firsterr = -ENOMEM;
3825 goto unwind;
3826 }
3827 for (i = 0; i < nrealreaders; i++) {
3828 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3829 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3830 reader_tasks[i]);
3831 if (torture_init_error(firsterr))
3832 goto unwind;
3833 }
3834 nrealnocbers = nocbs_nthreads;
3835 if (WARN_ON(nrealnocbers < 0))
3836 nrealnocbers = 1;
3837 if (WARN_ON(nocbs_toggle < 0))
3838 nocbs_toggle = HZ;
3839 if (nrealnocbers > 0) {
3840 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3841 if (nocb_tasks == NULL) {
3842 TOROUT_ERRSTRING("out of memory");
3843 firsterr = -ENOMEM;
3844 goto unwind;
3845 }
3846 } else {
3847 nocb_tasks = NULL;
3848 }
3849 for (i = 0; i < nrealnocbers; i++) {
3850 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3851 if (torture_init_error(firsterr))
3852 goto unwind;
3853 }
3854 if (stat_interval > 0) {
3855 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3856 stats_task);
3857 if (torture_init_error(firsterr))
3858 goto unwind;
3859 }
3860 if (test_no_idle_hz && shuffle_interval > 0) {
3861 firsterr = torture_shuffle_init(shuffle_interval * HZ);
3862 if (torture_init_error(firsterr))
3863 goto unwind;
3864 }
3865 if (stutter < 0)
3866 stutter = 0;
3867 if (stutter) {
3868 int t;
3869
3870 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3871 firsterr = torture_stutter_init(stutter * HZ, t);
3872 if (torture_init_error(firsterr))
3873 goto unwind;
3874 }
3875 if (fqs_duration < 0)
3876 fqs_duration = 0;
3877 if (fqs_holdoff < 0)
3878 fqs_holdoff = 0;
3879 if (fqs_duration && fqs_holdoff) {
3880 /* Create the fqs thread */
3881 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3882 fqs_task);
3883 if (torture_init_error(firsterr))
3884 goto unwind;
3885 }
3886 if (test_boost_interval < 1)
3887 test_boost_interval = 1;
3888 if (test_boost_duration < 2)
3889 test_boost_duration = 2;
3890 if (rcu_torture_can_boost()) {
3891
3892 boost_starttime = jiffies + test_boost_interval * HZ;
3893
3894 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3895 rcutorture_booster_init,
3896 rcutorture_booster_cleanup);
3897 rcutor_hp = firsterr;
3898 if (torture_init_error(firsterr))
3899 goto unwind;
3900 }
3901 shutdown_jiffies = jiffies + shutdown_secs * HZ;
3902 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3903 if (torture_init_error(firsterr))
3904 goto unwind;
3905 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3906 rcutorture_sync);
3907 if (torture_init_error(firsterr))
3908 goto unwind;
3909 firsterr = rcu_torture_stall_init();
3910 if (torture_init_error(firsterr))
3911 goto unwind;
3912 firsterr = rcu_torture_fwd_prog_init();
3913 if (torture_init_error(firsterr))
3914 goto unwind;
3915 firsterr = rcu_torture_barrier_init();
3916 if (torture_init_error(firsterr))
3917 goto unwind;
3918 firsterr = rcu_torture_read_exit_init();
3919 if (torture_init_error(firsterr))
3920 goto unwind;
3921 if (object_debug)
3922 rcu_test_debug_objects();
3923 torture_init_end();
3924 rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
3925 return 0;
3926
3927unwind:
3928 torture_init_end();
3929 rcu_torture_cleanup();
3930 if (shutdown_secs) {
3931 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3932 kernel_power_off();
3933 }
3934 return firsterr;
3935}
3936
3937module_init(rcu_torture_init);
3938module_exit(rcu_torture_cleanup);