Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * RCU CPU stall warnings for normal RCU grace periods
4 *
5 * Copyright IBM Corporation, 2019
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#include <linux/kvm_para.h>
11
12//////////////////////////////////////////////////////////////////////////////
13//
14// Controlling CPU stall warnings, including delay calculation.
15
16/* panic() on RCU Stall sysctl. */
17int sysctl_panic_on_rcu_stall __read_mostly;
18int sysctl_max_rcu_stall_to_panic __read_mostly;
19
20#ifdef CONFIG_PROVE_RCU
21#define RCU_STALL_DELAY_DELTA (5 * HZ)
22#else
23#define RCU_STALL_DELAY_DELTA 0
24#endif
25#define RCU_STALL_MIGHT_DIV 8
26#define RCU_STALL_MIGHT_MIN (2 * HZ)
27
28int rcu_exp_jiffies_till_stall_check(void)
29{
30 int cpu_stall_timeout = READ_ONCE(rcu_exp_cpu_stall_timeout);
31 int exp_stall_delay_delta = 0;
32 int till_stall_check;
33
34 // Zero says to use rcu_cpu_stall_timeout, but in milliseconds.
35 if (!cpu_stall_timeout)
36 cpu_stall_timeout = jiffies_to_msecs(rcu_jiffies_till_stall_check());
37
38 // Limit check must be consistent with the Kconfig limits for
39 // CONFIG_RCU_EXP_CPU_STALL_TIMEOUT, so check the allowed range.
40 // The minimum clamped value is "2UL", because at least one full
41 // tick has to be guaranteed.
42 till_stall_check = clamp(msecs_to_jiffies(cpu_stall_timeout), 2UL, 21UL * HZ);
43
44 if (cpu_stall_timeout && jiffies_to_msecs(till_stall_check) != cpu_stall_timeout)
45 WRITE_ONCE(rcu_exp_cpu_stall_timeout, jiffies_to_msecs(till_stall_check));
46
47#ifdef CONFIG_PROVE_RCU
48 /* Add extra ~25% out of till_stall_check. */
49 exp_stall_delay_delta = ((till_stall_check * 25) / 100) + 1;
50#endif
51
52 return till_stall_check + exp_stall_delay_delta;
53}
54EXPORT_SYMBOL_GPL(rcu_exp_jiffies_till_stall_check);
55
56/* Limit-check stall timeouts specified at boottime and runtime. */
57int rcu_jiffies_till_stall_check(void)
58{
59 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
60
61 /*
62 * Limit check must be consistent with the Kconfig limits
63 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
64 */
65 if (till_stall_check < 3) {
66 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
67 till_stall_check = 3;
68 } else if (till_stall_check > 300) {
69 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
70 till_stall_check = 300;
71 }
72 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
73}
74EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
75
76/**
77 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
78 *
79 * Returns @true if the current grace period is sufficiently old that
80 * it is reasonable to assume that it might be stalled. This can be
81 * useful when deciding whether to allocate memory to enable RCU-mediated
82 * freeing on the one hand or just invoking synchronize_rcu() on the other.
83 * The latter is preferable when the grace period is stalled.
84 *
85 * Note that sampling of the .gp_start and .gp_seq fields must be done
86 * carefully to avoid false positives at the beginnings and ends of
87 * grace periods.
88 */
89bool rcu_gp_might_be_stalled(void)
90{
91 unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
92 unsigned long j = jiffies;
93
94 if (d < RCU_STALL_MIGHT_MIN)
95 d = RCU_STALL_MIGHT_MIN;
96 smp_mb(); // jiffies before .gp_seq to avoid false positives.
97 if (!rcu_gp_in_progress())
98 return false;
99 // Long delays at this point avoids false positive, but a delay
100 // of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
101 smp_mb(); // .gp_seq before second .gp_start
102 // And ditto here.
103 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
104}
105
106/* Don't do RCU CPU stall warnings during long sysrq printouts. */
107void rcu_sysrq_start(void)
108{
109 if (!rcu_cpu_stall_suppress)
110 rcu_cpu_stall_suppress = 2;
111}
112
113void rcu_sysrq_end(void)
114{
115 if (rcu_cpu_stall_suppress == 2)
116 rcu_cpu_stall_suppress = 0;
117}
118
119/* Don't print RCU CPU stall warnings during a kernel panic. */
120static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
121{
122 rcu_cpu_stall_suppress = 1;
123 return NOTIFY_DONE;
124}
125
126static struct notifier_block rcu_panic_block = {
127 .notifier_call = rcu_panic,
128};
129
130static int __init check_cpu_stall_init(void)
131{
132 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
133 return 0;
134}
135early_initcall(check_cpu_stall_init);
136
137/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
138static void panic_on_rcu_stall(void)
139{
140 static int cpu_stall;
141
142 if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
143 return;
144
145 if (sysctl_panic_on_rcu_stall)
146 panic("RCU Stall\n");
147}
148
149/**
150 * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
151 *
152 * The caller must disable hard irqs.
153 */
154void rcu_cpu_stall_reset(void)
155{
156 WRITE_ONCE(rcu_state.jiffies_stall,
157 jiffies + rcu_jiffies_till_stall_check());
158}
159
160//////////////////////////////////////////////////////////////////////////////
161//
162// Interaction with RCU grace periods
163
164/* Start of new grace period, so record stall time (and forcing times). */
165static void record_gp_stall_check_time(void)
166{
167 unsigned long j = jiffies;
168 unsigned long j1;
169
170 WRITE_ONCE(rcu_state.gp_start, j);
171 j1 = rcu_jiffies_till_stall_check();
172 smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
173 WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
174 rcu_state.jiffies_resched = j + j1 / 2;
175 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
176}
177
178/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
179static void zero_cpu_stall_ticks(struct rcu_data *rdp)
180{
181 rdp->ticks_this_gp = 0;
182 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
183 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
184}
185
186/*
187 * If too much time has passed in the current grace period, and if
188 * so configured, go kick the relevant kthreads.
189 */
190static void rcu_stall_kick_kthreads(void)
191{
192 unsigned long j;
193
194 if (!READ_ONCE(rcu_kick_kthreads))
195 return;
196 j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
197 if (time_after(jiffies, j) && rcu_state.gp_kthread &&
198 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
199 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
200 rcu_state.name);
201 rcu_ftrace_dump(DUMP_ALL);
202 wake_up_process(rcu_state.gp_kthread);
203 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
204 }
205}
206
207/*
208 * Handler for the irq_work request posted about halfway into the RCU CPU
209 * stall timeout, and used to detect excessive irq disabling. Set state
210 * appropriately, but just complain if there is unexpected state on entry.
211 */
212static void rcu_iw_handler(struct irq_work *iwp)
213{
214 struct rcu_data *rdp;
215 struct rcu_node *rnp;
216
217 rdp = container_of(iwp, struct rcu_data, rcu_iw);
218 rnp = rdp->mynode;
219 raw_spin_lock_rcu_node(rnp);
220 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
221 rdp->rcu_iw_gp_seq = rnp->gp_seq;
222 rdp->rcu_iw_pending = false;
223 }
224 raw_spin_unlock_rcu_node(rnp);
225}
226
227//////////////////////////////////////////////////////////////////////////////
228//
229// Printing RCU CPU stall warnings
230
231#ifdef CONFIG_PREEMPT_RCU
232
233/*
234 * Dump detailed information for all tasks blocking the current RCU
235 * grace period on the specified rcu_node structure.
236 */
237static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
238{
239 unsigned long flags;
240 struct task_struct *t;
241
242 raw_spin_lock_irqsave_rcu_node(rnp, flags);
243 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
244 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
245 return;
246 }
247 t = list_entry(rnp->gp_tasks->prev,
248 struct task_struct, rcu_node_entry);
249 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
250 /*
251 * We could be printing a lot while holding a spinlock.
252 * Avoid triggering hard lockup.
253 */
254 touch_nmi_watchdog();
255 sched_show_task(t);
256 }
257 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
258}
259
260// Communicate task state back to the RCU CPU stall warning request.
261struct rcu_stall_chk_rdr {
262 int nesting;
263 union rcu_special rs;
264 bool on_blkd_list;
265};
266
267/*
268 * Report out the state of a not-running task that is stalling the
269 * current RCU grace period.
270 */
271static int check_slow_task(struct task_struct *t, void *arg)
272{
273 struct rcu_stall_chk_rdr *rscrp = arg;
274
275 if (task_curr(t))
276 return -EBUSY; // It is running, so decline to inspect it.
277 rscrp->nesting = t->rcu_read_lock_nesting;
278 rscrp->rs = t->rcu_read_unlock_special;
279 rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
280 return 0;
281}
282
283/*
284 * Scan the current list of tasks blocked within RCU read-side critical
285 * sections, printing out the tid of each of the first few of them.
286 */
287static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
288 __releases(rnp->lock)
289{
290 int i = 0;
291 int ndetected = 0;
292 struct rcu_stall_chk_rdr rscr;
293 struct task_struct *t;
294 struct task_struct *ts[8];
295
296 lockdep_assert_irqs_disabled();
297 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
298 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
299 return 0;
300 }
301 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
302 rnp->level, rnp->grplo, rnp->grphi);
303 t = list_entry(rnp->gp_tasks->prev,
304 struct task_struct, rcu_node_entry);
305 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
306 get_task_struct(t);
307 ts[i++] = t;
308 if (i >= ARRAY_SIZE(ts))
309 break;
310 }
311 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
312 while (i) {
313 t = ts[--i];
314 if (task_call_func(t, check_slow_task, &rscr))
315 pr_cont(" P%d", t->pid);
316 else
317 pr_cont(" P%d/%d:%c%c%c%c",
318 t->pid, rscr.nesting,
319 ".b"[rscr.rs.b.blocked],
320 ".q"[rscr.rs.b.need_qs],
321 ".e"[rscr.rs.b.exp_hint],
322 ".l"[rscr.on_blkd_list]);
323 lockdep_assert_irqs_disabled();
324 put_task_struct(t);
325 ndetected++;
326 }
327 pr_cont("\n");
328 return ndetected;
329}
330
331#else /* #ifdef CONFIG_PREEMPT_RCU */
332
333/*
334 * Because preemptible RCU does not exist, we never have to check for
335 * tasks blocked within RCU read-side critical sections.
336 */
337static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
338{
339}
340
341/*
342 * Because preemptible RCU does not exist, we never have to check for
343 * tasks blocked within RCU read-side critical sections.
344 */
345static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
346 __releases(rnp->lock)
347{
348 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
349 return 0;
350}
351#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
352
353/*
354 * Dump stacks of all tasks running on stalled CPUs. First try using
355 * NMIs, but fall back to manual remote stack tracing on architectures
356 * that don't support NMI-based stack dumps. The NMI-triggered stack
357 * traces are more accurate because they are printed by the target CPU.
358 */
359static void rcu_dump_cpu_stacks(void)
360{
361 int cpu;
362 unsigned long flags;
363 struct rcu_node *rnp;
364
365 rcu_for_each_leaf_node(rnp) {
366 raw_spin_lock_irqsave_rcu_node(rnp, flags);
367 for_each_leaf_node_possible_cpu(rnp, cpu)
368 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
369 if (cpu_is_offline(cpu))
370 pr_err("Offline CPU %d blocking current GP.\n", cpu);
371 else
372 dump_cpu_task(cpu);
373 }
374 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
375 }
376}
377
378static const char * const gp_state_names[] = {
379 [RCU_GP_IDLE] = "RCU_GP_IDLE",
380 [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
381 [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
382 [RCU_GP_ONOFF] = "RCU_GP_ONOFF",
383 [RCU_GP_INIT] = "RCU_GP_INIT",
384 [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
385 [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
386 [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
387 [RCU_GP_CLEANED] = "RCU_GP_CLEANED",
388};
389
390/*
391 * Convert a ->gp_state value to a character string.
392 */
393static const char *gp_state_getname(short gs)
394{
395 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
396 return "???";
397 return gp_state_names[gs];
398}
399
400/* Is the RCU grace-period kthread being starved of CPU time? */
401static bool rcu_is_gp_kthread_starving(unsigned long *jp)
402{
403 unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
404
405 if (jp)
406 *jp = j;
407 return j > 2 * HZ;
408}
409
410static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp)
411{
412 int cpu;
413 struct task_struct *rcuc;
414 unsigned long j;
415
416 rcuc = rdp->rcu_cpu_kthread_task;
417 if (!rcuc)
418 return false;
419
420 cpu = task_cpu(rcuc);
421 if (cpu_is_offline(cpu) || idle_cpu(cpu))
422 return false;
423
424 j = jiffies - READ_ONCE(rdp->rcuc_activity);
425
426 if (jp)
427 *jp = j;
428 return j > 2 * HZ;
429}
430
431/*
432 * Print out diagnostic information for the specified stalled CPU.
433 *
434 * If the specified CPU is aware of the current RCU grace period, then
435 * print the number of scheduling clock interrupts the CPU has taken
436 * during the time that it has been aware. Otherwise, print the number
437 * of RCU grace periods that this CPU is ignorant of, for example, "1"
438 * if the CPU was aware of the previous grace period.
439 *
440 * Also print out idle info.
441 */
442static void print_cpu_stall_info(int cpu)
443{
444 unsigned long delta;
445 bool falsepositive;
446 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
447 char *ticks_title;
448 unsigned long ticks_value;
449 bool rcuc_starved;
450 unsigned long j;
451 char buf[32];
452
453 /*
454 * We could be printing a lot while holding a spinlock. Avoid
455 * triggering hard lockup.
456 */
457 touch_nmi_watchdog();
458
459 ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
460 if (ticks_value) {
461 ticks_title = "GPs behind";
462 } else {
463 ticks_title = "ticks this GP";
464 ticks_value = rdp->ticks_this_gp;
465 }
466 delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
467 falsepositive = rcu_is_gp_kthread_starving(NULL) &&
468 rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
469 rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
470 if (rcuc_starved)
471 sprintf(buf, " rcuc=%ld jiffies(starved)", j);
472 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
473 cpu,
474 "O."[!!cpu_online(cpu)],
475 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
476 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
477 !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
478 rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
479 "!."[!delta],
480 ticks_value, ticks_title,
481 rcu_dynticks_snap(cpu) & 0xffff,
482 ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu),
483 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
484 data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
485 rcuc_starved ? buf : "",
486 falsepositive ? " (false positive?)" : "");
487}
488
489/* Complain about starvation of grace-period kthread. */
490static void rcu_check_gp_kthread_starvation(void)
491{
492 int cpu;
493 struct task_struct *gpk = rcu_state.gp_kthread;
494 unsigned long j;
495
496 if (rcu_is_gp_kthread_starving(&j)) {
497 cpu = gpk ? task_cpu(gpk) : -1;
498 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
499 rcu_state.name, j,
500 (long)rcu_seq_current(&rcu_state.gp_seq),
501 data_race(READ_ONCE(rcu_state.gp_flags)),
502 gp_state_getname(rcu_state.gp_state),
503 data_race(READ_ONCE(rcu_state.gp_state)),
504 gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
505 if (gpk) {
506 pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
507 pr_err("RCU grace-period kthread stack dump:\n");
508 sched_show_task(gpk);
509 if (cpu >= 0) {
510 if (cpu_is_offline(cpu)) {
511 pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
512 } else {
513 pr_err("Stack dump where RCU GP kthread last ran:\n");
514 dump_cpu_task(cpu);
515 }
516 }
517 wake_up_process(gpk);
518 }
519 }
520}
521
522/* Complain about missing wakeups from expired fqs wait timer */
523static void rcu_check_gp_kthread_expired_fqs_timer(void)
524{
525 struct task_struct *gpk = rcu_state.gp_kthread;
526 short gp_state;
527 unsigned long jiffies_fqs;
528 int cpu;
529
530 /*
531 * Order reads of .gp_state and .jiffies_force_qs.
532 * Matching smp_wmb() is present in rcu_gp_fqs_loop().
533 */
534 gp_state = smp_load_acquire(&rcu_state.gp_state);
535 jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);
536
537 if (gp_state == RCU_GP_WAIT_FQS &&
538 time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
539 gpk && !READ_ONCE(gpk->on_rq)) {
540 cpu = task_cpu(gpk);
541 pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
542 rcu_state.name, (jiffies - jiffies_fqs),
543 (long)rcu_seq_current(&rcu_state.gp_seq),
544 data_race(rcu_state.gp_flags),
545 gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
546 data_race(READ_ONCE(gpk->__state)));
547 pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
548 cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
549 }
550}
551
552static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
553{
554 int cpu;
555 unsigned long flags;
556 unsigned long gpa;
557 unsigned long j;
558 int ndetected = 0;
559 struct rcu_node *rnp;
560 long totqlen = 0;
561
562 lockdep_assert_irqs_disabled();
563
564 /* Kick and suppress, if so configured. */
565 rcu_stall_kick_kthreads();
566 if (rcu_stall_is_suppressed())
567 return;
568
569 /*
570 * OK, time to rat on our buddy...
571 * See Documentation/RCU/stallwarn.rst for info on how to debug
572 * RCU CPU stall warnings.
573 */
574 trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected"));
575 pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
576 rcu_for_each_leaf_node(rnp) {
577 raw_spin_lock_irqsave_rcu_node(rnp, flags);
578 if (rnp->qsmask != 0) {
579 for_each_leaf_node_possible_cpu(rnp, cpu)
580 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
581 print_cpu_stall_info(cpu);
582 ndetected++;
583 }
584 }
585 ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
586 lockdep_assert_irqs_disabled();
587 }
588
589 for_each_possible_cpu(cpu)
590 totqlen += rcu_get_n_cbs_cpu(cpu);
591 pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu ncpus=%d)\n",
592 smp_processor_id(), (long)(jiffies - gps),
593 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus);
594 if (ndetected) {
595 rcu_dump_cpu_stacks();
596
597 /* Complain about tasks blocking the grace period. */
598 rcu_for_each_leaf_node(rnp)
599 rcu_print_detail_task_stall_rnp(rnp);
600 } else {
601 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
602 pr_err("INFO: Stall ended before state dump start\n");
603 } else {
604 j = jiffies;
605 gpa = data_race(READ_ONCE(rcu_state.gp_activity));
606 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
607 rcu_state.name, j - gpa, j, gpa,
608 data_race(READ_ONCE(jiffies_till_next_fqs)),
609 data_race(READ_ONCE(rcu_get_root()->qsmask)));
610 }
611 }
612 /* Rewrite if needed in case of slow consoles. */
613 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
614 WRITE_ONCE(rcu_state.jiffies_stall,
615 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
616
617 rcu_check_gp_kthread_expired_fqs_timer();
618 rcu_check_gp_kthread_starvation();
619
620 panic_on_rcu_stall();
621
622 rcu_force_quiescent_state(); /* Kick them all. */
623}
624
625static void print_cpu_stall(unsigned long gps)
626{
627 int cpu;
628 unsigned long flags;
629 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
630 struct rcu_node *rnp = rcu_get_root();
631 long totqlen = 0;
632
633 lockdep_assert_irqs_disabled();
634
635 /* Kick and suppress, if so configured. */
636 rcu_stall_kick_kthreads();
637 if (rcu_stall_is_suppressed())
638 return;
639
640 /*
641 * OK, time to rat on ourselves...
642 * See Documentation/RCU/stallwarn.rst for info on how to debug
643 * RCU CPU stall warnings.
644 */
645 trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
646 pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
647 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
648 print_cpu_stall_info(smp_processor_id());
649 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
650 for_each_possible_cpu(cpu)
651 totqlen += rcu_get_n_cbs_cpu(cpu);
652 pr_cont("\t(t=%lu jiffies g=%ld q=%lu ncpus=%d)\n",
653 jiffies - gps,
654 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus);
655
656 rcu_check_gp_kthread_expired_fqs_timer();
657 rcu_check_gp_kthread_starvation();
658
659 rcu_dump_cpu_stacks();
660
661 raw_spin_lock_irqsave_rcu_node(rnp, flags);
662 /* Rewrite if needed in case of slow consoles. */
663 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
664 WRITE_ONCE(rcu_state.jiffies_stall,
665 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
666 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
667
668 panic_on_rcu_stall();
669
670 /*
671 * Attempt to revive the RCU machinery by forcing a context switch.
672 *
673 * A context switch would normally allow the RCU state machine to make
674 * progress and it could be we're stuck in kernel space without context
675 * switches for an entirely unreasonable amount of time.
676 */
677 set_tsk_need_resched(current);
678 set_preempt_need_resched();
679}
680
681static void check_cpu_stall(struct rcu_data *rdp)
682{
683 bool didstall = false;
684 unsigned long gs1;
685 unsigned long gs2;
686 unsigned long gps;
687 unsigned long j;
688 unsigned long jn;
689 unsigned long js;
690 struct rcu_node *rnp;
691
692 lockdep_assert_irqs_disabled();
693 if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
694 !rcu_gp_in_progress())
695 return;
696 rcu_stall_kick_kthreads();
697 j = jiffies;
698
699 /*
700 * Lots of memory barriers to reject false positives.
701 *
702 * The idea is to pick up rcu_state.gp_seq, then
703 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
704 * another copy of rcu_state.gp_seq. These values are updated in
705 * the opposite order with memory barriers (or equivalent) during
706 * grace-period initialization and cleanup. Now, a false positive
707 * can occur if we get an new value of rcu_state.gp_start and a old
708 * value of rcu_state.jiffies_stall. But given the memory barriers,
709 * the only way that this can happen is if one grace period ends
710 * and another starts between these two fetches. This is detected
711 * by comparing the second fetch of rcu_state.gp_seq with the
712 * previous fetch from rcu_state.gp_seq.
713 *
714 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
715 * and rcu_state.gp_start suffice to forestall false positives.
716 */
717 gs1 = READ_ONCE(rcu_state.gp_seq);
718 smp_rmb(); /* Pick up ->gp_seq first... */
719 js = READ_ONCE(rcu_state.jiffies_stall);
720 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
721 gps = READ_ONCE(rcu_state.gp_start);
722 smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
723 gs2 = READ_ONCE(rcu_state.gp_seq);
724 if (gs1 != gs2 ||
725 ULONG_CMP_LT(j, js) ||
726 ULONG_CMP_GE(gps, js))
727 return; /* No stall or GP completed since entering function. */
728 rnp = rdp->mynode;
729 jn = jiffies + ULONG_MAX / 2;
730 if (rcu_gp_in_progress() &&
731 (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
732 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
733
734 /*
735 * If a virtual machine is stopped by the host it can look to
736 * the watchdog like an RCU stall. Check to see if the host
737 * stopped the vm.
738 */
739 if (kvm_check_and_clear_guest_paused())
740 return;
741
742 /* We haven't checked in, so go dump stack. */
743 print_cpu_stall(gps);
744 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
745 rcu_ftrace_dump(DUMP_ALL);
746 didstall = true;
747
748 } else if (rcu_gp_in_progress() &&
749 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
750 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
751
752 /*
753 * If a virtual machine is stopped by the host it can look to
754 * the watchdog like an RCU stall. Check to see if the host
755 * stopped the vm.
756 */
757 if (kvm_check_and_clear_guest_paused())
758 return;
759
760 /* They had a few time units to dump stack, so complain. */
761 print_other_cpu_stall(gs2, gps);
762 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
763 rcu_ftrace_dump(DUMP_ALL);
764 didstall = true;
765 }
766 if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) {
767 jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
768 WRITE_ONCE(rcu_state.jiffies_stall, jn);
769 }
770}
771
772//////////////////////////////////////////////////////////////////////////////
773//
774// RCU forward-progress mechanisms, including of callback invocation.
775
776
777/*
778 * Check to see if a failure to end RCU priority inversion was due to
779 * a CPU not passing through a quiescent state. When this happens, there
780 * is nothing that RCU priority boosting can do to help, so we shouldn't
781 * count this as an RCU priority boosting failure. A return of true says
782 * RCU priority boosting is to blame, and false says otherwise. If false
783 * is returned, the first of the CPUs to blame is stored through cpup.
784 * If there was no CPU blocking the current grace period, but also nothing
785 * in need of being boosted, *cpup is set to -1. This can happen in case
786 * of vCPU preemption while the last CPU is reporting its quiscent state,
787 * for example.
788 *
789 * If cpup is NULL, then a lockless quick check is carried out, suitable
790 * for high-rate usage. On the other hand, if cpup is non-NULL, each
791 * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
792 */
793bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
794{
795 bool atb = false;
796 int cpu;
797 unsigned long flags;
798 struct rcu_node *rnp;
799
800 rcu_for_each_leaf_node(rnp) {
801 if (!cpup) {
802 if (data_race(READ_ONCE(rnp->qsmask))) {
803 return false;
804 } else {
805 if (READ_ONCE(rnp->gp_tasks))
806 atb = true;
807 continue;
808 }
809 }
810 *cpup = -1;
811 raw_spin_lock_irqsave_rcu_node(rnp, flags);
812 if (rnp->gp_tasks)
813 atb = true;
814 if (!rnp->qsmask) {
815 // No CPUs without quiescent states for this rnp.
816 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
817 continue;
818 }
819 // Find the first holdout CPU.
820 for_each_leaf_node_possible_cpu(rnp, cpu) {
821 if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
822 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
823 *cpup = cpu;
824 return false;
825 }
826 }
827 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
828 }
829 // Can't blame CPUs, so must blame RCU priority boosting.
830 return atb;
831}
832EXPORT_SYMBOL_GPL(rcu_check_boost_fail);
833
834/*
835 * Show the state of the grace-period kthreads.
836 */
837void show_rcu_gp_kthreads(void)
838{
839 unsigned long cbs = 0;
840 int cpu;
841 unsigned long j;
842 unsigned long ja;
843 unsigned long jr;
844 unsigned long js;
845 unsigned long jw;
846 struct rcu_data *rdp;
847 struct rcu_node *rnp;
848 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
849
850 j = jiffies;
851 ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
852 jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
853 js = j - data_race(READ_ONCE(rcu_state.gp_start));
854 jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
855 pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
856 rcu_state.name, gp_state_getname(rcu_state.gp_state),
857 data_race(READ_ONCE(rcu_state.gp_state)),
858 t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
859 js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
860 (long)data_race(READ_ONCE(rcu_state.gp_seq)),
861 (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
862 data_race(READ_ONCE(rcu_state.gp_max)),
863 data_race(READ_ONCE(rcu_state.gp_flags)));
864 rcu_for_each_node_breadth_first(rnp) {
865 if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
866 !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
867 !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
868 continue;
869 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
870 rnp->grplo, rnp->grphi,
871 (long)data_race(READ_ONCE(rnp->gp_seq)),
872 (long)data_race(READ_ONCE(rnp->gp_seq_needed)),
873 data_race(READ_ONCE(rnp->qsmask)),
874 ".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
875 ".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
876 ".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
877 ".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
878 data_race(READ_ONCE(rnp->n_boosts)));
879 if (!rcu_is_leaf_node(rnp))
880 continue;
881 for_each_leaf_node_possible_cpu(rnp, cpu) {
882 rdp = per_cpu_ptr(&rcu_data, cpu);
883 if (READ_ONCE(rdp->gpwrap) ||
884 ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
885 READ_ONCE(rdp->gp_seq_needed)))
886 continue;
887 pr_info("\tcpu %d ->gp_seq_needed %ld\n",
888 cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
889 }
890 }
891 for_each_possible_cpu(cpu) {
892 rdp = per_cpu_ptr(&rcu_data, cpu);
893 cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
894 if (rcu_segcblist_is_offloaded(&rdp->cblist))
895 show_rcu_nocb_state(rdp);
896 }
897 pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
898 show_rcu_tasks_gp_kthreads();
899}
900EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
901
902/*
903 * This function checks for grace-period requests that fail to motivate
904 * RCU to come out of its idle mode.
905 */
906static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
907 const unsigned long gpssdelay)
908{
909 unsigned long flags;
910 unsigned long j;
911 struct rcu_node *rnp_root = rcu_get_root();
912 static atomic_t warned = ATOMIC_INIT(0);
913
914 if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
915 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
916 READ_ONCE(rnp_root->gp_seq_needed)) ||
917 !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
918 return;
919 j = jiffies; /* Expensive access, and in common case don't get here. */
920 if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
921 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
922 atomic_read(&warned))
923 return;
924
925 raw_spin_lock_irqsave_rcu_node(rnp, flags);
926 j = jiffies;
927 if (rcu_gp_in_progress() ||
928 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
929 READ_ONCE(rnp_root->gp_seq_needed)) ||
930 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
931 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
932 atomic_read(&warned)) {
933 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
934 return;
935 }
936 /* Hold onto the leaf lock to make others see warned==1. */
937
938 if (rnp_root != rnp)
939 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
940 j = jiffies;
941 if (rcu_gp_in_progress() ||
942 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
943 READ_ONCE(rnp_root->gp_seq_needed)) ||
944 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
945 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
946 atomic_xchg(&warned, 1)) {
947 if (rnp_root != rnp)
948 /* irqs remain disabled. */
949 raw_spin_unlock_rcu_node(rnp_root);
950 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
951 return;
952 }
953 WARN_ON(1);
954 if (rnp_root != rnp)
955 raw_spin_unlock_rcu_node(rnp_root);
956 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
957 show_rcu_gp_kthreads();
958}
959
960/*
961 * Do a forward-progress check for rcutorture. This is normally invoked
962 * due to an OOM event. The argument "j" gives the time period during
963 * which rcutorture would like progress to have been made.
964 */
965void rcu_fwd_progress_check(unsigned long j)
966{
967 unsigned long cbs;
968 int cpu;
969 unsigned long max_cbs = 0;
970 int max_cpu = -1;
971 struct rcu_data *rdp;
972
973 if (rcu_gp_in_progress()) {
974 pr_info("%s: GP age %lu jiffies\n",
975 __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
976 show_rcu_gp_kthreads();
977 } else {
978 pr_info("%s: Last GP end %lu jiffies ago\n",
979 __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
980 preempt_disable();
981 rdp = this_cpu_ptr(&rcu_data);
982 rcu_check_gp_start_stall(rdp->mynode, rdp, j);
983 preempt_enable();
984 }
985 for_each_possible_cpu(cpu) {
986 cbs = rcu_get_n_cbs_cpu(cpu);
987 if (!cbs)
988 continue;
989 if (max_cpu < 0)
990 pr_info("%s: callbacks", __func__);
991 pr_cont(" %d: %lu", cpu, cbs);
992 if (cbs <= max_cbs)
993 continue;
994 max_cbs = cbs;
995 max_cpu = cpu;
996 }
997 if (max_cpu >= 0)
998 pr_cont("\n");
999}
1000EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
1001
1002/* Commandeer a sysrq key to dump RCU's tree. */
1003static bool sysrq_rcu;
1004module_param(sysrq_rcu, bool, 0444);
1005
1006/* Dump grace-period-request information due to commandeered sysrq. */
1007static void sysrq_show_rcu(int key)
1008{
1009 show_rcu_gp_kthreads();
1010}
1011
1012static const struct sysrq_key_op sysrq_rcudump_op = {
1013 .handler = sysrq_show_rcu,
1014 .help_msg = "show-rcu(y)",
1015 .action_msg = "Show RCU tree",
1016 .enable_mask = SYSRQ_ENABLE_DUMP,
1017};
1018
1019static int __init rcu_sysrq_init(void)
1020{
1021 if (sysrq_rcu)
1022 return register_sysrq_key('y', &sysrq_rcudump_op);
1023 return 0;
1024}
1025early_initcall(rcu_sysrq_init);
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * RCU CPU stall warnings for normal RCU grace periods
4 *
5 * Copyright IBM Corporation, 2019
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#include <linux/kvm_para.h>
11#include <linux/rcu_notifier.h>
12
13//////////////////////////////////////////////////////////////////////////////
14//
15// Controlling CPU stall warnings, including delay calculation.
16
17/* panic() on RCU Stall sysctl. */
18int sysctl_panic_on_rcu_stall __read_mostly;
19int sysctl_max_rcu_stall_to_panic __read_mostly;
20
21#ifdef CONFIG_PROVE_RCU
22#define RCU_STALL_DELAY_DELTA (5 * HZ)
23#else
24#define RCU_STALL_DELAY_DELTA 0
25#endif
26#define RCU_STALL_MIGHT_DIV 8
27#define RCU_STALL_MIGHT_MIN (2 * HZ)
28
29int rcu_exp_jiffies_till_stall_check(void)
30{
31 int cpu_stall_timeout = READ_ONCE(rcu_exp_cpu_stall_timeout);
32 int exp_stall_delay_delta = 0;
33 int till_stall_check;
34
35 // Zero says to use rcu_cpu_stall_timeout, but in milliseconds.
36 if (!cpu_stall_timeout)
37 cpu_stall_timeout = jiffies_to_msecs(rcu_jiffies_till_stall_check());
38
39 // Limit check must be consistent with the Kconfig limits for
40 // CONFIG_RCU_EXP_CPU_STALL_TIMEOUT, so check the allowed range.
41 // The minimum clamped value is "2UL", because at least one full
42 // tick has to be guaranteed.
43 till_stall_check = clamp(msecs_to_jiffies(cpu_stall_timeout), 2UL, 300UL * HZ);
44
45 if (cpu_stall_timeout && jiffies_to_msecs(till_stall_check) != cpu_stall_timeout)
46 WRITE_ONCE(rcu_exp_cpu_stall_timeout, jiffies_to_msecs(till_stall_check));
47
48#ifdef CONFIG_PROVE_RCU
49 /* Add extra ~25% out of till_stall_check. */
50 exp_stall_delay_delta = ((till_stall_check * 25) / 100) + 1;
51#endif
52
53 return till_stall_check + exp_stall_delay_delta;
54}
55EXPORT_SYMBOL_GPL(rcu_exp_jiffies_till_stall_check);
56
57/* Limit-check stall timeouts specified at boottime and runtime. */
58int rcu_jiffies_till_stall_check(void)
59{
60 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
61
62 /*
63 * Limit check must be consistent with the Kconfig limits
64 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
65 */
66 if (till_stall_check < 3) {
67 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
68 till_stall_check = 3;
69 } else if (till_stall_check > 300) {
70 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
71 till_stall_check = 300;
72 }
73 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
74}
75EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
76
77/**
78 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
79 *
80 * Returns @true if the current grace period is sufficiently old that
81 * it is reasonable to assume that it might be stalled. This can be
82 * useful when deciding whether to allocate memory to enable RCU-mediated
83 * freeing on the one hand or just invoking synchronize_rcu() on the other.
84 * The latter is preferable when the grace period is stalled.
85 *
86 * Note that sampling of the .gp_start and .gp_seq fields must be done
87 * carefully to avoid false positives at the beginnings and ends of
88 * grace periods.
89 */
90bool rcu_gp_might_be_stalled(void)
91{
92 unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
93 unsigned long j = jiffies;
94
95 if (d < RCU_STALL_MIGHT_MIN)
96 d = RCU_STALL_MIGHT_MIN;
97 smp_mb(); // jiffies before .gp_seq to avoid false positives.
98 if (!rcu_gp_in_progress())
99 return false;
100 // Long delays at this point avoids false positive, but a delay
101 // of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
102 smp_mb(); // .gp_seq before second .gp_start
103 // And ditto here.
104 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
105}
106
107/* Don't do RCU CPU stall warnings during long sysrq printouts. */
108void rcu_sysrq_start(void)
109{
110 if (!rcu_cpu_stall_suppress)
111 rcu_cpu_stall_suppress = 2;
112}
113
114void rcu_sysrq_end(void)
115{
116 if (rcu_cpu_stall_suppress == 2)
117 rcu_cpu_stall_suppress = 0;
118}
119
120/* Don't print RCU CPU stall warnings during a kernel panic. */
121static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
122{
123 rcu_cpu_stall_suppress = 1;
124 return NOTIFY_DONE;
125}
126
127static struct notifier_block rcu_panic_block = {
128 .notifier_call = rcu_panic,
129};
130
131static int __init check_cpu_stall_init(void)
132{
133 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
134 return 0;
135}
136early_initcall(check_cpu_stall_init);
137
138/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
139static void panic_on_rcu_stall(void)
140{
141 static int cpu_stall;
142
143 if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
144 return;
145
146 if (sysctl_panic_on_rcu_stall)
147 panic("RCU Stall\n");
148}
149
150/**
151 * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
152 *
153 * To perform the reset request from the caller, disable stall detection until
154 * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
155 * loaded. It should be safe to do from the fqs loop as enough timer
156 * interrupts and context switches should have passed.
157 *
158 * The caller must disable hard irqs.
159 */
160void rcu_cpu_stall_reset(void)
161{
162 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
163 WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
164}
165
166//////////////////////////////////////////////////////////////////////////////
167//
168// Interaction with RCU grace periods
169
170/* Start of new grace period, so record stall time (and forcing times). */
171static void record_gp_stall_check_time(void)
172{
173 unsigned long j = jiffies;
174 unsigned long j1;
175
176 WRITE_ONCE(rcu_state.gp_start, j);
177 j1 = rcu_jiffies_till_stall_check();
178 smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
179 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
180 WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
181 rcu_state.jiffies_resched = j + j1 / 2;
182 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
183}
184
185/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
186static void zero_cpu_stall_ticks(struct rcu_data *rdp)
187{
188 rdp->ticks_this_gp = 0;
189 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
190 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
191}
192
193/*
194 * If too much time has passed in the current grace period, and if
195 * so configured, go kick the relevant kthreads.
196 */
197static void rcu_stall_kick_kthreads(void)
198{
199 unsigned long j;
200
201 if (!READ_ONCE(rcu_kick_kthreads))
202 return;
203 j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
204 if (time_after(jiffies, j) && rcu_state.gp_kthread &&
205 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
206 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
207 rcu_state.name);
208 rcu_ftrace_dump(DUMP_ALL);
209 wake_up_process(rcu_state.gp_kthread);
210 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
211 }
212}
213
214/*
215 * Handler for the irq_work request posted about halfway into the RCU CPU
216 * stall timeout, and used to detect excessive irq disabling. Set state
217 * appropriately, but just complain if there is unexpected state on entry.
218 */
219static void rcu_iw_handler(struct irq_work *iwp)
220{
221 struct rcu_data *rdp;
222 struct rcu_node *rnp;
223
224 rdp = container_of(iwp, struct rcu_data, rcu_iw);
225 rnp = rdp->mynode;
226 raw_spin_lock_rcu_node(rnp);
227 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
228 rdp->rcu_iw_gp_seq = rnp->gp_seq;
229 rdp->rcu_iw_pending = false;
230 }
231 raw_spin_unlock_rcu_node(rnp);
232}
233
234//////////////////////////////////////////////////////////////////////////////
235//
236// Printing RCU CPU stall warnings
237
238#ifdef CONFIG_PREEMPT_RCU
239
240/*
241 * Dump detailed information for all tasks blocking the current RCU
242 * grace period on the specified rcu_node structure.
243 */
244static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
245{
246 unsigned long flags;
247 struct task_struct *t;
248
249 raw_spin_lock_irqsave_rcu_node(rnp, flags);
250 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
251 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
252 return;
253 }
254 t = list_entry(rnp->gp_tasks->prev,
255 struct task_struct, rcu_node_entry);
256 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
257 /*
258 * We could be printing a lot while holding a spinlock.
259 * Avoid triggering hard lockup.
260 */
261 touch_nmi_watchdog();
262 sched_show_task(t);
263 }
264 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
265}
266
267// Communicate task state back to the RCU CPU stall warning request.
268struct rcu_stall_chk_rdr {
269 int nesting;
270 union rcu_special rs;
271 bool on_blkd_list;
272};
273
274/*
275 * Report out the state of a not-running task that is stalling the
276 * current RCU grace period.
277 */
278static int check_slow_task(struct task_struct *t, void *arg)
279{
280 struct rcu_stall_chk_rdr *rscrp = arg;
281
282 if (task_curr(t))
283 return -EBUSY; // It is running, so decline to inspect it.
284 rscrp->nesting = t->rcu_read_lock_nesting;
285 rscrp->rs = t->rcu_read_unlock_special;
286 rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
287 return 0;
288}
289
290/*
291 * Scan the current list of tasks blocked within RCU read-side critical
292 * sections, printing out the tid of each of the first few of them.
293 */
294static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
295 __releases(rnp->lock)
296{
297 int i = 0;
298 int ndetected = 0;
299 struct rcu_stall_chk_rdr rscr;
300 struct task_struct *t;
301 struct task_struct *ts[8];
302
303 lockdep_assert_irqs_disabled();
304 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
305 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
306 return 0;
307 }
308 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
309 rnp->level, rnp->grplo, rnp->grphi);
310 t = list_entry(rnp->gp_tasks->prev,
311 struct task_struct, rcu_node_entry);
312 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
313 get_task_struct(t);
314 ts[i++] = t;
315 if (i >= ARRAY_SIZE(ts))
316 break;
317 }
318 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
319 while (i) {
320 t = ts[--i];
321 if (task_call_func(t, check_slow_task, &rscr))
322 pr_cont(" P%d", t->pid);
323 else
324 pr_cont(" P%d/%d:%c%c%c%c",
325 t->pid, rscr.nesting,
326 ".b"[rscr.rs.b.blocked],
327 ".q"[rscr.rs.b.need_qs],
328 ".e"[rscr.rs.b.exp_hint],
329 ".l"[rscr.on_blkd_list]);
330 lockdep_assert_irqs_disabled();
331 put_task_struct(t);
332 ndetected++;
333 }
334 pr_cont("\n");
335 return ndetected;
336}
337
338#else /* #ifdef CONFIG_PREEMPT_RCU */
339
340/*
341 * Because preemptible RCU does not exist, we never have to check for
342 * tasks blocked within RCU read-side critical sections.
343 */
344static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
345{
346}
347
348/*
349 * Because preemptible RCU does not exist, we never have to check for
350 * tasks blocked within RCU read-side critical sections.
351 */
352static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
353 __releases(rnp->lock)
354{
355 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
356 return 0;
357}
358#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
359
360/*
361 * Dump stacks of all tasks running on stalled CPUs. First try using
362 * NMIs, but fall back to manual remote stack tracing on architectures
363 * that don't support NMI-based stack dumps. The NMI-triggered stack
364 * traces are more accurate because they are printed by the target CPU.
365 */
366static void rcu_dump_cpu_stacks(void)
367{
368 int cpu;
369 unsigned long flags;
370 struct rcu_node *rnp;
371
372 rcu_for_each_leaf_node(rnp) {
373 raw_spin_lock_irqsave_rcu_node(rnp, flags);
374 for_each_leaf_node_possible_cpu(rnp, cpu)
375 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
376 if (cpu_is_offline(cpu))
377 pr_err("Offline CPU %d blocking current GP.\n", cpu);
378 else
379 dump_cpu_task(cpu);
380 }
381 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
382 }
383}
384
385static const char * const gp_state_names[] = {
386 [RCU_GP_IDLE] = "RCU_GP_IDLE",
387 [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
388 [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
389 [RCU_GP_ONOFF] = "RCU_GP_ONOFF",
390 [RCU_GP_INIT] = "RCU_GP_INIT",
391 [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
392 [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
393 [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
394 [RCU_GP_CLEANED] = "RCU_GP_CLEANED",
395};
396
397/*
398 * Convert a ->gp_state value to a character string.
399 */
400static const char *gp_state_getname(short gs)
401{
402 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
403 return "???";
404 return gp_state_names[gs];
405}
406
407/* Is the RCU grace-period kthread being starved of CPU time? */
408static bool rcu_is_gp_kthread_starving(unsigned long *jp)
409{
410 unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
411
412 if (jp)
413 *jp = j;
414 return j > 2 * HZ;
415}
416
417static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp)
418{
419 int cpu;
420 struct task_struct *rcuc;
421 unsigned long j;
422
423 rcuc = rdp->rcu_cpu_kthread_task;
424 if (!rcuc)
425 return false;
426
427 cpu = task_cpu(rcuc);
428 if (cpu_is_offline(cpu) || idle_cpu(cpu))
429 return false;
430
431 j = jiffies - READ_ONCE(rdp->rcuc_activity);
432
433 if (jp)
434 *jp = j;
435 return j > 2 * HZ;
436}
437
438static void print_cpu_stat_info(int cpu)
439{
440 struct rcu_snap_record rsr, *rsrp;
441 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
442 struct kernel_cpustat *kcsp = &kcpustat_cpu(cpu);
443
444 if (!rcu_cpu_stall_cputime)
445 return;
446
447 rsrp = &rdp->snap_record;
448 if (rsrp->gp_seq != rdp->gp_seq)
449 return;
450
451 rsr.cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
452 rsr.cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
453 rsr.cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
454
455 pr_err("\t hardirqs softirqs csw/system\n");
456 pr_err("\t number: %8ld %10d %12lld\n",
457 kstat_cpu_irqs_sum(cpu) - rsrp->nr_hardirqs,
458 kstat_cpu_softirqs_sum(cpu) - rsrp->nr_softirqs,
459 nr_context_switches_cpu(cpu) - rsrp->nr_csw);
460 pr_err("\tcputime: %8lld %10lld %12lld ==> %d(ms)\n",
461 div_u64(rsr.cputime_irq - rsrp->cputime_irq, NSEC_PER_MSEC),
462 div_u64(rsr.cputime_softirq - rsrp->cputime_softirq, NSEC_PER_MSEC),
463 div_u64(rsr.cputime_system - rsrp->cputime_system, NSEC_PER_MSEC),
464 jiffies_to_msecs(jiffies - rsrp->jiffies));
465}
466
467/*
468 * Print out diagnostic information for the specified stalled CPU.
469 *
470 * If the specified CPU is aware of the current RCU grace period, then
471 * print the number of scheduling clock interrupts the CPU has taken
472 * during the time that it has been aware. Otherwise, print the number
473 * of RCU grace periods that this CPU is ignorant of, for example, "1"
474 * if the CPU was aware of the previous grace period.
475 *
476 * Also print out idle info.
477 */
478static void print_cpu_stall_info(int cpu)
479{
480 unsigned long delta;
481 bool falsepositive;
482 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
483 char *ticks_title;
484 unsigned long ticks_value;
485 bool rcuc_starved;
486 unsigned long j;
487 char buf[32];
488
489 /*
490 * We could be printing a lot while holding a spinlock. Avoid
491 * triggering hard lockup.
492 */
493 touch_nmi_watchdog();
494
495 ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
496 if (ticks_value) {
497 ticks_title = "GPs behind";
498 } else {
499 ticks_title = "ticks this GP";
500 ticks_value = rdp->ticks_this_gp;
501 }
502 delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
503 falsepositive = rcu_is_gp_kthread_starving(NULL) &&
504 rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
505 rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
506 if (rcuc_starved)
507 sprintf(buf, " rcuc=%ld jiffies(starved)", j);
508 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
509 cpu,
510 "O."[!!cpu_online(cpu)],
511 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
512 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
513 !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
514 rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
515 "!."[!delta],
516 ticks_value, ticks_title,
517 rcu_dynticks_snap(cpu) & 0xffff,
518 ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu),
519 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
520 data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
521 rcuc_starved ? buf : "",
522 falsepositive ? " (false positive?)" : "");
523
524 print_cpu_stat_info(cpu);
525}
526
527/* Complain about starvation of grace-period kthread. */
528static void rcu_check_gp_kthread_starvation(void)
529{
530 int cpu;
531 struct task_struct *gpk = rcu_state.gp_kthread;
532 unsigned long j;
533
534 if (rcu_is_gp_kthread_starving(&j)) {
535 cpu = gpk ? task_cpu(gpk) : -1;
536 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
537 rcu_state.name, j,
538 (long)rcu_seq_current(&rcu_state.gp_seq),
539 data_race(READ_ONCE(rcu_state.gp_flags)),
540 gp_state_getname(rcu_state.gp_state),
541 data_race(READ_ONCE(rcu_state.gp_state)),
542 gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
543 if (gpk) {
544 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
545
546 pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
547 pr_err("RCU grace-period kthread stack dump:\n");
548 sched_show_task(gpk);
549 if (cpu_is_offline(cpu)) {
550 pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
551 } else if (!(data_race(READ_ONCE(rdp->mynode->qsmask)) & rdp->grpmask)) {
552 pr_err("Stack dump where RCU GP kthread last ran:\n");
553 dump_cpu_task(cpu);
554 }
555 wake_up_process(gpk);
556 }
557 }
558}
559
560/* Complain about missing wakeups from expired fqs wait timer */
561static void rcu_check_gp_kthread_expired_fqs_timer(void)
562{
563 struct task_struct *gpk = rcu_state.gp_kthread;
564 short gp_state;
565 unsigned long jiffies_fqs;
566 int cpu;
567
568 /*
569 * Order reads of .gp_state and .jiffies_force_qs.
570 * Matching smp_wmb() is present in rcu_gp_fqs_loop().
571 */
572 gp_state = smp_load_acquire(&rcu_state.gp_state);
573 jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);
574
575 if (gp_state == RCU_GP_WAIT_FQS &&
576 time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
577 gpk && !READ_ONCE(gpk->on_rq)) {
578 cpu = task_cpu(gpk);
579 pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
580 rcu_state.name, (jiffies - jiffies_fqs),
581 (long)rcu_seq_current(&rcu_state.gp_seq),
582 data_race(rcu_state.gp_flags),
583 gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
584 data_race(READ_ONCE(gpk->__state)));
585 pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
586 cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
587 }
588}
589
590static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
591{
592 int cpu;
593 unsigned long flags;
594 unsigned long gpa;
595 unsigned long j;
596 int ndetected = 0;
597 struct rcu_node *rnp;
598 long totqlen = 0;
599
600 lockdep_assert_irqs_disabled();
601
602 /* Kick and suppress, if so configured. */
603 rcu_stall_kick_kthreads();
604 if (rcu_stall_is_suppressed())
605 return;
606
607 /*
608 * OK, time to rat on our buddy...
609 * See Documentation/RCU/stallwarn.rst for info on how to debug
610 * RCU CPU stall warnings.
611 */
612 trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected"));
613 pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
614 rcu_for_each_leaf_node(rnp) {
615 raw_spin_lock_irqsave_rcu_node(rnp, flags);
616 if (rnp->qsmask != 0) {
617 for_each_leaf_node_possible_cpu(rnp, cpu)
618 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
619 print_cpu_stall_info(cpu);
620 ndetected++;
621 }
622 }
623 ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
624 lockdep_assert_irqs_disabled();
625 }
626
627 for_each_possible_cpu(cpu)
628 totqlen += rcu_get_n_cbs_cpu(cpu);
629 pr_err("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu ncpus=%d)\n",
630 smp_processor_id(), (long)(jiffies - gps),
631 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus);
632 if (ndetected) {
633 rcu_dump_cpu_stacks();
634
635 /* Complain about tasks blocking the grace period. */
636 rcu_for_each_leaf_node(rnp)
637 rcu_print_detail_task_stall_rnp(rnp);
638 } else {
639 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
640 pr_err("INFO: Stall ended before state dump start\n");
641 } else {
642 j = jiffies;
643 gpa = data_race(READ_ONCE(rcu_state.gp_activity));
644 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
645 rcu_state.name, j - gpa, j, gpa,
646 data_race(READ_ONCE(jiffies_till_next_fqs)),
647 data_race(READ_ONCE(rcu_get_root()->qsmask)));
648 }
649 }
650 /* Rewrite if needed in case of slow consoles. */
651 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
652 WRITE_ONCE(rcu_state.jiffies_stall,
653 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
654
655 rcu_check_gp_kthread_expired_fqs_timer();
656 rcu_check_gp_kthread_starvation();
657
658 panic_on_rcu_stall();
659
660 rcu_force_quiescent_state(); /* Kick them all. */
661}
662
663static void print_cpu_stall(unsigned long gps)
664{
665 int cpu;
666 unsigned long flags;
667 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
668 struct rcu_node *rnp = rcu_get_root();
669 long totqlen = 0;
670
671 lockdep_assert_irqs_disabled();
672
673 /* Kick and suppress, if so configured. */
674 rcu_stall_kick_kthreads();
675 if (rcu_stall_is_suppressed())
676 return;
677
678 /*
679 * OK, time to rat on ourselves...
680 * See Documentation/RCU/stallwarn.rst for info on how to debug
681 * RCU CPU stall warnings.
682 */
683 trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
684 pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
685 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
686 print_cpu_stall_info(smp_processor_id());
687 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
688 for_each_possible_cpu(cpu)
689 totqlen += rcu_get_n_cbs_cpu(cpu);
690 pr_err("\t(t=%lu jiffies g=%ld q=%lu ncpus=%d)\n",
691 jiffies - gps,
692 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus);
693
694 rcu_check_gp_kthread_expired_fqs_timer();
695 rcu_check_gp_kthread_starvation();
696
697 rcu_dump_cpu_stacks();
698
699 raw_spin_lock_irqsave_rcu_node(rnp, flags);
700 /* Rewrite if needed in case of slow consoles. */
701 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
702 WRITE_ONCE(rcu_state.jiffies_stall,
703 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
704 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
705
706 panic_on_rcu_stall();
707
708 /*
709 * Attempt to revive the RCU machinery by forcing a context switch.
710 *
711 * A context switch would normally allow the RCU state machine to make
712 * progress and it could be we're stuck in kernel space without context
713 * switches for an entirely unreasonable amount of time.
714 */
715 set_tsk_need_resched(current);
716 set_preempt_need_resched();
717}
718
719static void check_cpu_stall(struct rcu_data *rdp)
720{
721 bool self_detected;
722 unsigned long gs1;
723 unsigned long gs2;
724 unsigned long gps;
725 unsigned long j;
726 unsigned long jn;
727 unsigned long js;
728 struct rcu_node *rnp;
729
730 lockdep_assert_irqs_disabled();
731 if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
732 !rcu_gp_in_progress())
733 return;
734 rcu_stall_kick_kthreads();
735
736 /*
737 * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
738 * loop has to set jiffies to ensure a non-stale jiffies value. This
739 * is required to have good jiffies value after coming out of long
740 * breaks of jiffies updates. Not doing so can cause false positives.
741 */
742 if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
743 return;
744
745 j = jiffies;
746
747 /*
748 * Lots of memory barriers to reject false positives.
749 *
750 * The idea is to pick up rcu_state.gp_seq, then
751 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
752 * another copy of rcu_state.gp_seq. These values are updated in
753 * the opposite order with memory barriers (or equivalent) during
754 * grace-period initialization and cleanup. Now, a false positive
755 * can occur if we get an new value of rcu_state.gp_start and a old
756 * value of rcu_state.jiffies_stall. But given the memory barriers,
757 * the only way that this can happen is if one grace period ends
758 * and another starts between these two fetches. This is detected
759 * by comparing the second fetch of rcu_state.gp_seq with the
760 * previous fetch from rcu_state.gp_seq.
761 *
762 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
763 * and rcu_state.gp_start suffice to forestall false positives.
764 */
765 gs1 = READ_ONCE(rcu_state.gp_seq);
766 smp_rmb(); /* Pick up ->gp_seq first... */
767 js = READ_ONCE(rcu_state.jiffies_stall);
768 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
769 gps = READ_ONCE(rcu_state.gp_start);
770 smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
771 gs2 = READ_ONCE(rcu_state.gp_seq);
772 if (gs1 != gs2 ||
773 ULONG_CMP_LT(j, js) ||
774 ULONG_CMP_GE(gps, js))
775 return; /* No stall or GP completed since entering function. */
776 rnp = rdp->mynode;
777 jn = jiffies + ULONG_MAX / 2;
778 self_detected = READ_ONCE(rnp->qsmask) & rdp->grpmask;
779 if (rcu_gp_in_progress() &&
780 (self_detected || ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) &&
781 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
782 /*
783 * If a virtual machine is stopped by the host it can look to
784 * the watchdog like an RCU stall. Check to see if the host
785 * stopped the vm.
786 */
787 if (kvm_check_and_clear_guest_paused())
788 return;
789
790 rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_NORM, (void *)j - gps);
791 if (self_detected) {
792 /* We haven't checked in, so go dump stack. */
793 print_cpu_stall(gps);
794 } else {
795 /* They had a few time units to dump stack, so complain. */
796 print_other_cpu_stall(gs2, gps);
797 }
798
799 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
800 rcu_ftrace_dump(DUMP_ALL);
801
802 if (READ_ONCE(rcu_state.jiffies_stall) == jn) {
803 jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
804 WRITE_ONCE(rcu_state.jiffies_stall, jn);
805 }
806 }
807}
808
809//////////////////////////////////////////////////////////////////////////////
810//
811// RCU forward-progress mechanisms, including for callback invocation.
812
813
814/*
815 * Check to see if a failure to end RCU priority inversion was due to
816 * a CPU not passing through a quiescent state. When this happens, there
817 * is nothing that RCU priority boosting can do to help, so we shouldn't
818 * count this as an RCU priority boosting failure. A return of true says
819 * RCU priority boosting is to blame, and false says otherwise. If false
820 * is returned, the first of the CPUs to blame is stored through cpup.
821 * If there was no CPU blocking the current grace period, but also nothing
822 * in need of being boosted, *cpup is set to -1. This can happen in case
823 * of vCPU preemption while the last CPU is reporting its quiscent state,
824 * for example.
825 *
826 * If cpup is NULL, then a lockless quick check is carried out, suitable
827 * for high-rate usage. On the other hand, if cpup is non-NULL, each
828 * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
829 */
830bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
831{
832 bool atb = false;
833 int cpu;
834 unsigned long flags;
835 struct rcu_node *rnp;
836
837 rcu_for_each_leaf_node(rnp) {
838 if (!cpup) {
839 if (data_race(READ_ONCE(rnp->qsmask))) {
840 return false;
841 } else {
842 if (READ_ONCE(rnp->gp_tasks))
843 atb = true;
844 continue;
845 }
846 }
847 *cpup = -1;
848 raw_spin_lock_irqsave_rcu_node(rnp, flags);
849 if (rnp->gp_tasks)
850 atb = true;
851 if (!rnp->qsmask) {
852 // No CPUs without quiescent states for this rnp.
853 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
854 continue;
855 }
856 // Find the first holdout CPU.
857 for_each_leaf_node_possible_cpu(rnp, cpu) {
858 if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
859 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
860 *cpup = cpu;
861 return false;
862 }
863 }
864 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
865 }
866 // Can't blame CPUs, so must blame RCU priority boosting.
867 return atb;
868}
869EXPORT_SYMBOL_GPL(rcu_check_boost_fail);
870
871/*
872 * Show the state of the grace-period kthreads.
873 */
874void show_rcu_gp_kthreads(void)
875{
876 unsigned long cbs = 0;
877 int cpu;
878 unsigned long j;
879 unsigned long ja;
880 unsigned long jr;
881 unsigned long js;
882 unsigned long jw;
883 struct rcu_data *rdp;
884 struct rcu_node *rnp;
885 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
886
887 j = jiffies;
888 ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
889 jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
890 js = j - data_race(READ_ONCE(rcu_state.gp_start));
891 jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
892 pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
893 rcu_state.name, gp_state_getname(rcu_state.gp_state),
894 data_race(READ_ONCE(rcu_state.gp_state)),
895 t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
896 js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
897 (long)data_race(READ_ONCE(rcu_state.gp_seq)),
898 (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
899 data_race(READ_ONCE(rcu_state.gp_max)),
900 data_race(READ_ONCE(rcu_state.gp_flags)));
901 rcu_for_each_node_breadth_first(rnp) {
902 if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
903 !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
904 !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
905 continue;
906 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
907 rnp->grplo, rnp->grphi,
908 (long)data_race(READ_ONCE(rnp->gp_seq)),
909 (long)data_race(READ_ONCE(rnp->gp_seq_needed)),
910 data_race(READ_ONCE(rnp->qsmask)),
911 ".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
912 ".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
913 ".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
914 ".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
915 data_race(READ_ONCE(rnp->n_boosts)));
916 if (!rcu_is_leaf_node(rnp))
917 continue;
918 for_each_leaf_node_possible_cpu(rnp, cpu) {
919 rdp = per_cpu_ptr(&rcu_data, cpu);
920 if (READ_ONCE(rdp->gpwrap) ||
921 ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
922 READ_ONCE(rdp->gp_seq_needed)))
923 continue;
924 pr_info("\tcpu %d ->gp_seq_needed %ld\n",
925 cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
926 }
927 }
928 for_each_possible_cpu(cpu) {
929 rdp = per_cpu_ptr(&rcu_data, cpu);
930 cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
931 if (rcu_segcblist_is_offloaded(&rdp->cblist))
932 show_rcu_nocb_state(rdp);
933 }
934 pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
935 show_rcu_tasks_gp_kthreads();
936}
937EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
938
939/*
940 * This function checks for grace-period requests that fail to motivate
941 * RCU to come out of its idle mode.
942 */
943static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
944 const unsigned long gpssdelay)
945{
946 unsigned long flags;
947 unsigned long j;
948 struct rcu_node *rnp_root = rcu_get_root();
949 static atomic_t warned = ATOMIC_INIT(0);
950
951 if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
952 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
953 READ_ONCE(rnp_root->gp_seq_needed)) ||
954 !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
955 return;
956 j = jiffies; /* Expensive access, and in common case don't get here. */
957 if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
958 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
959 atomic_read(&warned))
960 return;
961
962 raw_spin_lock_irqsave_rcu_node(rnp, flags);
963 j = jiffies;
964 if (rcu_gp_in_progress() ||
965 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
966 READ_ONCE(rnp_root->gp_seq_needed)) ||
967 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
968 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
969 atomic_read(&warned)) {
970 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
971 return;
972 }
973 /* Hold onto the leaf lock to make others see warned==1. */
974
975 if (rnp_root != rnp)
976 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
977 j = jiffies;
978 if (rcu_gp_in_progress() ||
979 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
980 READ_ONCE(rnp_root->gp_seq_needed)) ||
981 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
982 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
983 atomic_xchg(&warned, 1)) {
984 if (rnp_root != rnp)
985 /* irqs remain disabled. */
986 raw_spin_unlock_rcu_node(rnp_root);
987 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
988 return;
989 }
990 WARN_ON(1);
991 if (rnp_root != rnp)
992 raw_spin_unlock_rcu_node(rnp_root);
993 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
994 show_rcu_gp_kthreads();
995}
996
997/*
998 * Do a forward-progress check for rcutorture. This is normally invoked
999 * due to an OOM event. The argument "j" gives the time period during
1000 * which rcutorture would like progress to have been made.
1001 */
1002void rcu_fwd_progress_check(unsigned long j)
1003{
1004 unsigned long cbs;
1005 int cpu;
1006 unsigned long max_cbs = 0;
1007 int max_cpu = -1;
1008 struct rcu_data *rdp;
1009
1010 if (rcu_gp_in_progress()) {
1011 pr_info("%s: GP age %lu jiffies\n",
1012 __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
1013 show_rcu_gp_kthreads();
1014 } else {
1015 pr_info("%s: Last GP end %lu jiffies ago\n",
1016 __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
1017 preempt_disable();
1018 rdp = this_cpu_ptr(&rcu_data);
1019 rcu_check_gp_start_stall(rdp->mynode, rdp, j);
1020 preempt_enable();
1021 }
1022 for_each_possible_cpu(cpu) {
1023 cbs = rcu_get_n_cbs_cpu(cpu);
1024 if (!cbs)
1025 continue;
1026 if (max_cpu < 0)
1027 pr_info("%s: callbacks", __func__);
1028 pr_cont(" %d: %lu", cpu, cbs);
1029 if (cbs <= max_cbs)
1030 continue;
1031 max_cbs = cbs;
1032 max_cpu = cpu;
1033 }
1034 if (max_cpu >= 0)
1035 pr_cont("\n");
1036}
1037EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
1038
1039/* Commandeer a sysrq key to dump RCU's tree. */
1040static bool sysrq_rcu;
1041module_param(sysrq_rcu, bool, 0444);
1042
1043/* Dump grace-period-request information due to commandeered sysrq. */
1044static void sysrq_show_rcu(u8 key)
1045{
1046 show_rcu_gp_kthreads();
1047}
1048
1049static const struct sysrq_key_op sysrq_rcudump_op = {
1050 .handler = sysrq_show_rcu,
1051 .help_msg = "show-rcu(y)",
1052 .action_msg = "Show RCU tree",
1053 .enable_mask = SYSRQ_ENABLE_DUMP,
1054};
1055
1056static int __init rcu_sysrq_init(void)
1057{
1058 if (sysrq_rcu)
1059 return register_sysrq_key('y', &sysrq_rcudump_op);
1060 return 0;
1061}
1062early_initcall(rcu_sysrq_init);
1063
1064#ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
1065
1066//////////////////////////////////////////////////////////////////////////////
1067//
1068// RCU CPU stall-warning notifiers
1069
1070static ATOMIC_NOTIFIER_HEAD(rcu_cpu_stall_notifier_list);
1071
1072/**
1073 * rcu_stall_chain_notifier_register - Add an RCU CPU stall notifier
1074 * @n: Entry to add.
1075 *
1076 * Adds an RCU CPU stall notifier to an atomic notifier chain.
1077 * The @action passed to a notifier will be @RCU_STALL_NOTIFY_NORM or
1078 * friends. The @data will be the duration of the stalled grace period,
1079 * in jiffies, coerced to a void* pointer.
1080 *
1081 * Returns 0 on success, %-EEXIST on error.
1082 */
1083int rcu_stall_chain_notifier_register(struct notifier_block *n)
1084{
1085 int rcsn = rcu_cpu_stall_notifiers;
1086
1087 WARN(1, "Adding %pS() to RCU stall notifier list (%s).\n", n->notifier_call,
1088 rcsn ? "possibly suppressing RCU CPU stall warnings" : "failed, so all is well");
1089 if (rcsn)
1090 return atomic_notifier_chain_register(&rcu_cpu_stall_notifier_list, n);
1091 return -EEXIST;
1092}
1093EXPORT_SYMBOL_GPL(rcu_stall_chain_notifier_register);
1094
1095/**
1096 * rcu_stall_chain_notifier_unregister - Remove an RCU CPU stall notifier
1097 * @n: Entry to add.
1098 *
1099 * Removes an RCU CPU stall notifier from an atomic notifier chain.
1100 *
1101 * Returns zero on success, %-ENOENT on failure.
1102 */
1103int rcu_stall_chain_notifier_unregister(struct notifier_block *n)
1104{
1105 return atomic_notifier_chain_unregister(&rcu_cpu_stall_notifier_list, n);
1106}
1107EXPORT_SYMBOL_GPL(rcu_stall_chain_notifier_unregister);
1108
1109/*
1110 * rcu_stall_notifier_call_chain - Call functions in an RCU CPU stall notifier chain
1111 * @val: Value passed unmodified to notifier function
1112 * @v: Pointer passed unmodified to notifier function
1113 *
1114 * Calls each function in the RCU CPU stall notifier chain in turn, which
1115 * is an atomic call chain. See atomic_notifier_call_chain() for more
1116 * information.
1117 *
1118 * This is for use within RCU, hence the omission of the extra asterisk
1119 * to indicate a non-kerneldoc format header comment.
1120 */
1121int rcu_stall_notifier_call_chain(unsigned long val, void *v)
1122{
1123 return atomic_notifier_call_chain(&rcu_cpu_stall_notifier_list, val, v);
1124}
1125
1126#endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER