Loading...
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8#ifdef CONFIG_TASKS_RCU_GENERIC
9
10////////////////////////////////////////////////////////////////////////
11//
12// Generic data structures.
13
14struct rcu_tasks;
15typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
16typedef void (*pregp_func_t)(void);
17typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
18typedef void (*postscan_func_t)(struct list_head *hop);
19typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
20typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
21
22/**
23 * Definition for a Tasks-RCU-like mechanism.
24 * @cbs_head: Head of callback list.
25 * @cbs_tail: Tail pointer for callback list.
26 * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
27 * @cbs_lock: Lock protecting callback list.
28 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
29 * @gp_func: This flavor's grace-period-wait function.
30 * @gp_state: Grace period's most recent state transition (debugging).
31 * @gp_jiffies: Time of last @gp_state transition.
32 * @gp_start: Most recent grace-period start in jiffies.
33 * @n_gps: Number of grace periods completed since boot.
34 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
35 * @n_ipis_fails: Number of IPI-send failures.
36 * @pregp_func: This flavor's pre-grace-period function (optional).
37 * @pertask_func: This flavor's per-task scan function (optional).
38 * @postscan_func: This flavor's post-task scan function (optional).
39 * @holdout_func: This flavor's holdout-list scan function (optional).
40 * @postgp_func: This flavor's post-grace-period function (optional).
41 * @call_func: This flavor's call_rcu()-equivalent function.
42 * @name: This flavor's textual name.
43 * @kname: This flavor's kthread name.
44 */
45struct rcu_tasks {
46 struct rcu_head *cbs_head;
47 struct rcu_head **cbs_tail;
48 struct wait_queue_head cbs_wq;
49 raw_spinlock_t cbs_lock;
50 int gp_state;
51 unsigned long gp_jiffies;
52 unsigned long gp_start;
53 unsigned long n_gps;
54 unsigned long n_ipis;
55 unsigned long n_ipis_fails;
56 struct task_struct *kthread_ptr;
57 rcu_tasks_gp_func_t gp_func;
58 pregp_func_t pregp_func;
59 pertask_func_t pertask_func;
60 postscan_func_t postscan_func;
61 holdouts_func_t holdouts_func;
62 postgp_func_t postgp_func;
63 call_rcu_func_t call_func;
64 char *name;
65 char *kname;
66};
67
68#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
69static struct rcu_tasks rt_name = \
70{ \
71 .cbs_tail = &rt_name.cbs_head, \
72 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
73 .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \
74 .gp_func = gp, \
75 .call_func = call, \
76 .name = n, \
77 .kname = #rt_name, \
78}
79
80/* Track exiting tasks in order to allow them to be waited for. */
81DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
82
83/* Avoid IPIing CPUs early in the grace period. */
84#define RCU_TASK_IPI_DELAY (HZ / 2)
85static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
86module_param(rcu_task_ipi_delay, int, 0644);
87
88/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
89#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
90static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
91module_param(rcu_task_stall_timeout, int, 0644);
92
93/* RCU tasks grace-period state for debugging. */
94#define RTGS_INIT 0
95#define RTGS_WAIT_WAIT_CBS 1
96#define RTGS_WAIT_GP 2
97#define RTGS_PRE_WAIT_GP 3
98#define RTGS_SCAN_TASKLIST 4
99#define RTGS_POST_SCAN_TASKLIST 5
100#define RTGS_WAIT_SCAN_HOLDOUTS 6
101#define RTGS_SCAN_HOLDOUTS 7
102#define RTGS_POST_GP 8
103#define RTGS_WAIT_READERS 9
104#define RTGS_INVOKE_CBS 10
105#define RTGS_WAIT_CBS 11
106#ifndef CONFIG_TINY_RCU
107static const char * const rcu_tasks_gp_state_names[] = {
108 "RTGS_INIT",
109 "RTGS_WAIT_WAIT_CBS",
110 "RTGS_WAIT_GP",
111 "RTGS_PRE_WAIT_GP",
112 "RTGS_SCAN_TASKLIST",
113 "RTGS_POST_SCAN_TASKLIST",
114 "RTGS_WAIT_SCAN_HOLDOUTS",
115 "RTGS_SCAN_HOLDOUTS",
116 "RTGS_POST_GP",
117 "RTGS_WAIT_READERS",
118 "RTGS_INVOKE_CBS",
119 "RTGS_WAIT_CBS",
120};
121#endif /* #ifndef CONFIG_TINY_RCU */
122
123////////////////////////////////////////////////////////////////////////
124//
125// Generic code.
126
127/* Record grace-period phase and time. */
128static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
129{
130 rtp->gp_state = newstate;
131 rtp->gp_jiffies = jiffies;
132}
133
134#ifndef CONFIG_TINY_RCU
135/* Return state name. */
136static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
137{
138 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
139 int j = READ_ONCE(i); // Prevent the compiler from reading twice
140
141 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
142 return "???";
143 return rcu_tasks_gp_state_names[j];
144}
145#endif /* #ifndef CONFIG_TINY_RCU */
146
147// Enqueue a callback for the specified flavor of Tasks RCU.
148static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
149 struct rcu_tasks *rtp)
150{
151 unsigned long flags;
152 bool needwake;
153
154 rhp->next = NULL;
155 rhp->func = func;
156 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
157 needwake = !rtp->cbs_head;
158 WRITE_ONCE(*rtp->cbs_tail, rhp);
159 rtp->cbs_tail = &rhp->next;
160 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
161 /* We can't create the thread unless interrupts are enabled. */
162 if (needwake && READ_ONCE(rtp->kthread_ptr))
163 wake_up(&rtp->cbs_wq);
164}
165
166// Wait for a grace period for the specified flavor of Tasks RCU.
167static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
168{
169 /* Complain if the scheduler has not started. */
170 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
171 "synchronize_rcu_tasks called too soon");
172
173 /* Wait for the grace period. */
174 wait_rcu_gp(rtp->call_func);
175}
176
177/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
178static int __noreturn rcu_tasks_kthread(void *arg)
179{
180 unsigned long flags;
181 struct rcu_head *list;
182 struct rcu_head *next;
183 struct rcu_tasks *rtp = arg;
184
185 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
186 housekeeping_affine(current, HK_FLAG_RCU);
187 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
188
189 /*
190 * Each pass through the following loop makes one check for
191 * newly arrived callbacks, and, if there are some, waits for
192 * one RCU-tasks grace period and then invokes the callbacks.
193 * This loop is terminated by the system going down. ;-)
194 */
195 for (;;) {
196
197 /* Pick up any new callbacks. */
198 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
199 smp_mb__after_spinlock(); // Order updates vs. GP.
200 list = rtp->cbs_head;
201 rtp->cbs_head = NULL;
202 rtp->cbs_tail = &rtp->cbs_head;
203 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
204
205 /* If there were none, wait a bit and start over. */
206 if (!list) {
207 wait_event_interruptible(rtp->cbs_wq,
208 READ_ONCE(rtp->cbs_head));
209 if (!rtp->cbs_head) {
210 WARN_ON(signal_pending(current));
211 set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
212 schedule_timeout_idle(HZ/10);
213 }
214 continue;
215 }
216
217 // Wait for one grace period.
218 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
219 rtp->gp_start = jiffies;
220 rtp->gp_func(rtp);
221 rtp->n_gps++;
222
223 /* Invoke the callbacks. */
224 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
225 while (list) {
226 next = list->next;
227 local_bh_disable();
228 list->func(list);
229 local_bh_enable();
230 list = next;
231 cond_resched();
232 }
233 /* Paranoid sleep to keep this from entering a tight loop */
234 schedule_timeout_idle(HZ/10);
235
236 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
237 }
238}
239
240/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
241static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
242{
243 struct task_struct *t;
244
245 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
246 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
247 return;
248 smp_mb(); /* Ensure others see full kthread. */
249}
250
251#ifndef CONFIG_TINY_RCU
252
253/*
254 * Print any non-default Tasks RCU settings.
255 */
256static void __init rcu_tasks_bootup_oddness(void)
257{
258#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
259 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
260 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
261#endif /* #ifdef CONFIG_TASKS_RCU */
262#ifdef CONFIG_TASKS_RCU
263 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
264#endif /* #ifdef CONFIG_TASKS_RCU */
265#ifdef CONFIG_TASKS_RUDE_RCU
266 pr_info("\tRude variant of Tasks RCU enabled.\n");
267#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
268#ifdef CONFIG_TASKS_TRACE_RCU
269 pr_info("\tTracing variant of Tasks RCU enabled.\n");
270#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
271}
272
273#endif /* #ifndef CONFIG_TINY_RCU */
274
275#ifndef CONFIG_TINY_RCU
276/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
277static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
278{
279 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
280 rtp->kname,
281 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
282 jiffies - data_race(rtp->gp_jiffies),
283 data_race(rtp->n_gps),
284 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
285 ".k"[!!data_race(rtp->kthread_ptr)],
286 ".C"[!!data_race(rtp->cbs_head)],
287 s);
288}
289#endif /* #ifndef CONFIG_TINY_RCU */
290
291static void exit_tasks_rcu_finish_trace(struct task_struct *t);
292
293#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
294
295////////////////////////////////////////////////////////////////////////
296//
297// Shared code between task-list-scanning variants of Tasks RCU.
298
299/* Wait for one RCU-tasks grace period. */
300static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
301{
302 struct task_struct *g, *t;
303 unsigned long lastreport;
304 LIST_HEAD(holdouts);
305 int fract;
306
307 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
308 rtp->pregp_func();
309
310 /*
311 * There were callbacks, so we need to wait for an RCU-tasks
312 * grace period. Start off by scanning the task list for tasks
313 * that are not already voluntarily blocked. Mark these tasks
314 * and make a list of them in holdouts.
315 */
316 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
317 rcu_read_lock();
318 for_each_process_thread(g, t)
319 rtp->pertask_func(t, &holdouts);
320 rcu_read_unlock();
321
322 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
323 rtp->postscan_func(&holdouts);
324
325 /*
326 * Each pass through the following loop scans the list of holdout
327 * tasks, removing any that are no longer holdouts. When the list
328 * is empty, we are done.
329 */
330 lastreport = jiffies;
331
332 /* Start off with HZ/10 wait and slowly back off to 1 HZ wait. */
333 fract = 10;
334
335 for (;;) {
336 bool firstreport;
337 bool needreport;
338 int rtst;
339
340 if (list_empty(&holdouts))
341 break;
342
343 /* Slowly back off waiting for holdouts */
344 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
345 schedule_timeout_idle(HZ/fract);
346
347 if (fract > 1)
348 fract--;
349
350 rtst = READ_ONCE(rcu_task_stall_timeout);
351 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
352 if (needreport)
353 lastreport = jiffies;
354 firstreport = true;
355 WARN_ON(signal_pending(current));
356 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
357 rtp->holdouts_func(&holdouts, needreport, &firstreport);
358 }
359
360 set_tasks_gp_state(rtp, RTGS_POST_GP);
361 rtp->postgp_func(rtp);
362}
363
364#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
365
366#ifdef CONFIG_TASKS_RCU
367
368////////////////////////////////////////////////////////////////////////
369//
370// Simple variant of RCU whose quiescent states are voluntary context
371// switch, cond_resched_rcu_qs(), user-space execution, and idle.
372// As such, grace periods can take one good long time. There are no
373// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
374// because this implementation is intended to get the system into a safe
375// state for some of the manipulations involved in tracing and the like.
376// Finally, this implementation does not support high call_rcu_tasks()
377// rates from multiple CPUs. If this is required, per-CPU callback lists
378// will be needed.
379
380/* Pre-grace-period preparation. */
381static void rcu_tasks_pregp_step(void)
382{
383 /*
384 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
385 * to complete. Invoking synchronize_rcu() suffices because all
386 * these transitions occur with interrupts disabled. Without this
387 * synchronize_rcu(), a read-side critical section that started
388 * before the grace period might be incorrectly seen as having
389 * started after the grace period.
390 *
391 * This synchronize_rcu() also dispenses with the need for a
392 * memory barrier on the first store to t->rcu_tasks_holdout,
393 * as it forces the store to happen after the beginning of the
394 * grace period.
395 */
396 synchronize_rcu();
397}
398
399/* Per-task initial processing. */
400static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
401{
402 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
403 get_task_struct(t);
404 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
405 WRITE_ONCE(t->rcu_tasks_holdout, true);
406 list_add(&t->rcu_tasks_holdout_list, hop);
407 }
408}
409
410/* Processing between scanning taskslist and draining the holdout list. */
411static void rcu_tasks_postscan(struct list_head *hop)
412{
413 /*
414 * Wait for tasks that are in the process of exiting. This
415 * does only part of the job, ensuring that all tasks that were
416 * previously exiting reach the point where they have disabled
417 * preemption, allowing the later synchronize_rcu() to finish
418 * the job.
419 */
420 synchronize_srcu(&tasks_rcu_exit_srcu);
421}
422
423/* See if tasks are still holding out, complain if so. */
424static void check_holdout_task(struct task_struct *t,
425 bool needreport, bool *firstreport)
426{
427 int cpu;
428
429 if (!READ_ONCE(t->rcu_tasks_holdout) ||
430 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
431 !READ_ONCE(t->on_rq) ||
432 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
433 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
434 WRITE_ONCE(t->rcu_tasks_holdout, false);
435 list_del_init(&t->rcu_tasks_holdout_list);
436 put_task_struct(t);
437 return;
438 }
439 rcu_request_urgent_qs_task(t);
440 if (!needreport)
441 return;
442 if (*firstreport) {
443 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
444 *firstreport = false;
445 }
446 cpu = task_cpu(t);
447 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
448 t, ".I"[is_idle_task(t)],
449 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
450 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
451 t->rcu_tasks_idle_cpu, cpu);
452 sched_show_task(t);
453}
454
455/* Scan the holdout lists for tasks no longer holding out. */
456static void check_all_holdout_tasks(struct list_head *hop,
457 bool needreport, bool *firstreport)
458{
459 struct task_struct *t, *t1;
460
461 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
462 check_holdout_task(t, needreport, firstreport);
463 cond_resched();
464 }
465}
466
467/* Finish off the Tasks-RCU grace period. */
468static void rcu_tasks_postgp(struct rcu_tasks *rtp)
469{
470 /*
471 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
472 * memory barriers prior to them in the schedule() path, memory
473 * reordering on other CPUs could cause their RCU-tasks read-side
474 * critical sections to extend past the end of the grace period.
475 * However, because these ->nvcsw updates are carried out with
476 * interrupts disabled, we can use synchronize_rcu() to force the
477 * needed ordering on all such CPUs.
478 *
479 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
480 * accesses to be within the grace period, avoiding the need for
481 * memory barriers for ->rcu_tasks_holdout accesses.
482 *
483 * In addition, this synchronize_rcu() waits for exiting tasks
484 * to complete their final preempt_disable() region of execution,
485 * cleaning up after the synchronize_srcu() above.
486 */
487 synchronize_rcu();
488}
489
490void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
491DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
492
493/**
494 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
495 * @rhp: structure to be used for queueing the RCU updates.
496 * @func: actual callback function to be invoked after the grace period
497 *
498 * The callback function will be invoked some time after a full grace
499 * period elapses, in other words after all currently executing RCU
500 * read-side critical sections have completed. call_rcu_tasks() assumes
501 * that the read-side critical sections end at a voluntary context
502 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
503 * or transition to usermode execution. As such, there are no read-side
504 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
505 * this primitive is intended to determine that all tasks have passed
506 * through a safe state, not so much for data-strcuture synchronization.
507 *
508 * See the description of call_rcu() for more detailed information on
509 * memory ordering guarantees.
510 */
511void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
512{
513 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
514}
515EXPORT_SYMBOL_GPL(call_rcu_tasks);
516
517/**
518 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
519 *
520 * Control will return to the caller some time after a full rcu-tasks
521 * grace period has elapsed, in other words after all currently
522 * executing rcu-tasks read-side critical sections have elapsed. These
523 * read-side critical sections are delimited by calls to schedule(),
524 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
525 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
526 *
527 * This is a very specialized primitive, intended only for a few uses in
528 * tracing and other situations requiring manipulation of function
529 * preambles and profiling hooks. The synchronize_rcu_tasks() function
530 * is not (yet) intended for heavy use from multiple CPUs.
531 *
532 * See the description of synchronize_rcu() for more detailed information
533 * on memory ordering guarantees.
534 */
535void synchronize_rcu_tasks(void)
536{
537 synchronize_rcu_tasks_generic(&rcu_tasks);
538}
539EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
540
541/**
542 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
543 *
544 * Although the current implementation is guaranteed to wait, it is not
545 * obligated to, for example, if there are no pending callbacks.
546 */
547void rcu_barrier_tasks(void)
548{
549 /* There is only one callback queue, so this is easy. ;-) */
550 synchronize_rcu_tasks();
551}
552EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
553
554static int __init rcu_spawn_tasks_kthread(void)
555{
556 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
557 rcu_tasks.pertask_func = rcu_tasks_pertask;
558 rcu_tasks.postscan_func = rcu_tasks_postscan;
559 rcu_tasks.holdouts_func = check_all_holdout_tasks;
560 rcu_tasks.postgp_func = rcu_tasks_postgp;
561 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
562 return 0;
563}
564core_initcall(rcu_spawn_tasks_kthread);
565
566#ifndef CONFIG_TINY_RCU
567static void show_rcu_tasks_classic_gp_kthread(void)
568{
569 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
570}
571#endif /* #ifndef CONFIG_TINY_RCU */
572
573/* Do the srcu_read_lock() for the above synchronize_srcu(). */
574void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
575{
576 preempt_disable();
577 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
578 preempt_enable();
579}
580
581/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
582void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
583{
584 struct task_struct *t = current;
585
586 preempt_disable();
587 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
588 preempt_enable();
589 exit_tasks_rcu_finish_trace(t);
590}
591
592#else /* #ifdef CONFIG_TASKS_RCU */
593static inline void show_rcu_tasks_classic_gp_kthread(void) { }
594void exit_tasks_rcu_start(void) { }
595void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
596#endif /* #else #ifdef CONFIG_TASKS_RCU */
597
598#ifdef CONFIG_TASKS_RUDE_RCU
599
600////////////////////////////////////////////////////////////////////////
601//
602// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
603// passing an empty function to schedule_on_each_cpu(). This approach
604// provides an asynchronous call_rcu_tasks_rude() API and batching
605// of concurrent calls to the synchronous synchronize_rcu_rude() API.
606// This sends IPIs far and wide and induces otherwise unnecessary context
607// switches on all online CPUs, whether idle or not.
608
609// Empty function to allow workqueues to force a context switch.
610static void rcu_tasks_be_rude(struct work_struct *work)
611{
612}
613
614// Wait for one rude RCU-tasks grace period.
615static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
616{
617 rtp->n_ipis += cpumask_weight(cpu_online_mask);
618 schedule_on_each_cpu(rcu_tasks_be_rude);
619}
620
621void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
622DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
623 "RCU Tasks Rude");
624
625/**
626 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
627 * @rhp: structure to be used for queueing the RCU updates.
628 * @func: actual callback function to be invoked after the grace period
629 *
630 * The callback function will be invoked some time after a full grace
631 * period elapses, in other words after all currently executing RCU
632 * read-side critical sections have completed. call_rcu_tasks_rude()
633 * assumes that the read-side critical sections end at context switch,
634 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
635 * there are no read-side primitives analogous to rcu_read_lock() and
636 * rcu_read_unlock() because this primitive is intended to determine
637 * that all tasks have passed through a safe state, not so much for
638 * data-strcuture synchronization.
639 *
640 * See the description of call_rcu() for more detailed information on
641 * memory ordering guarantees.
642 */
643void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
644{
645 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
646}
647EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
648
649/**
650 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
651 *
652 * Control will return to the caller some time after a rude rcu-tasks
653 * grace period has elapsed, in other words after all currently
654 * executing rcu-tasks read-side critical sections have elapsed. These
655 * read-side critical sections are delimited by calls to schedule(),
656 * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
657 * anyway) cond_resched().
658 *
659 * This is a very specialized primitive, intended only for a few uses in
660 * tracing and other situations requiring manipulation of function preambles
661 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
662 * (yet) intended for heavy use from multiple CPUs.
663 *
664 * See the description of synchronize_rcu() for more detailed information
665 * on memory ordering guarantees.
666 */
667void synchronize_rcu_tasks_rude(void)
668{
669 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
670}
671EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
672
673/**
674 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
675 *
676 * Although the current implementation is guaranteed to wait, it is not
677 * obligated to, for example, if there are no pending callbacks.
678 */
679void rcu_barrier_tasks_rude(void)
680{
681 /* There is only one callback queue, so this is easy. ;-) */
682 synchronize_rcu_tasks_rude();
683}
684EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
685
686static int __init rcu_spawn_tasks_rude_kthread(void)
687{
688 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
689 return 0;
690}
691core_initcall(rcu_spawn_tasks_rude_kthread);
692
693#ifndef CONFIG_TINY_RCU
694static void show_rcu_tasks_rude_gp_kthread(void)
695{
696 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
697}
698#endif /* #ifndef CONFIG_TINY_RCU */
699
700#else /* #ifdef CONFIG_TASKS_RUDE_RCU */
701static void show_rcu_tasks_rude_gp_kthread(void) {}
702#endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */
703
704////////////////////////////////////////////////////////////////////////
705//
706// Tracing variant of Tasks RCU. This variant is designed to be used
707// to protect tracing hooks, including those of BPF. This variant
708// therefore:
709//
710// 1. Has explicit read-side markers to allow finite grace periods
711// in the face of in-kernel loops for PREEMPT=n builds.
712//
713// 2. Protects code in the idle loop, exception entry/exit, and
714// CPU-hotplug code paths, similar to the capabilities of SRCU.
715//
716// 3. Avoids expensive read-side instruction, having overhead similar
717// to that of Preemptible RCU.
718//
719// There are of course downsides. The grace-period code can send IPIs to
720// CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
721// It is necessary to scan the full tasklist, much as for Tasks RCU. There
722// is a single callback queue guarded by a single lock, again, much as for
723// Tasks RCU. If needed, these downsides can be at least partially remedied.
724//
725// Perhaps most important, this variant of RCU does not affect the vanilla
726// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
727// readers can operate from idle, offline, and exception entry/exit in no
728// way allows rcu_preempt and rcu_sched readers to also do so.
729
730// The lockdep state must be outside of #ifdef to be useful.
731#ifdef CONFIG_DEBUG_LOCK_ALLOC
732static struct lock_class_key rcu_lock_trace_key;
733struct lockdep_map rcu_trace_lock_map =
734 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
735EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
736#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
737
738#ifdef CONFIG_TASKS_TRACE_RCU
739
740static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
741static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
742
743// Record outstanding IPIs to each CPU. No point in sending two...
744static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
745
746// The number of detections of task quiescent state relying on
747// heavyweight readers executing explicit memory barriers.
748unsigned long n_heavy_reader_attempts;
749unsigned long n_heavy_reader_updates;
750unsigned long n_heavy_reader_ofl_updates;
751
752void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
753DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
754 "RCU Tasks Trace");
755
756/*
757 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
758 * while the scheduler locks are held.
759 */
760static void rcu_read_unlock_iw(struct irq_work *iwp)
761{
762 wake_up(&trc_wait);
763}
764static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
765
766/* If we are the last reader, wake up the grace-period kthread. */
767void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
768{
769 int nq = t->trc_reader_special.b.need_qs;
770
771 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
772 t->trc_reader_special.b.need_mb)
773 smp_mb(); // Pairs with update-side barriers.
774 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
775 if (nq)
776 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
777 WRITE_ONCE(t->trc_reader_nesting, nesting);
778 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
779 irq_work_queue(&rcu_tasks_trace_iw);
780}
781EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
782
783/* Add a task to the holdout list, if it is not already on the list. */
784static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
785{
786 if (list_empty(&t->trc_holdout_list)) {
787 get_task_struct(t);
788 list_add(&t->trc_holdout_list, bhp);
789 }
790}
791
792/* Remove a task from the holdout list, if it is in fact present. */
793static void trc_del_holdout(struct task_struct *t)
794{
795 if (!list_empty(&t->trc_holdout_list)) {
796 list_del_init(&t->trc_holdout_list);
797 put_task_struct(t);
798 }
799}
800
801/* IPI handler to check task state. */
802static void trc_read_check_handler(void *t_in)
803{
804 struct task_struct *t = current;
805 struct task_struct *texp = t_in;
806
807 // If the task is no longer running on this CPU, leave.
808 if (unlikely(texp != t)) {
809 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
810 wake_up(&trc_wait);
811 goto reset_ipi; // Already on holdout list, so will check later.
812 }
813
814 // If the task is not in a read-side critical section, and
815 // if this is the last reader, awaken the grace-period kthread.
816 if (likely(!t->trc_reader_nesting)) {
817 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
818 wake_up(&trc_wait);
819 // Mark as checked after decrement to avoid false
820 // positives on the above WARN_ON_ONCE().
821 WRITE_ONCE(t->trc_reader_checked, true);
822 goto reset_ipi;
823 }
824 WRITE_ONCE(t->trc_reader_checked, true);
825
826 // Get here if the task is in a read-side critical section. Set
827 // its state so that it will awaken the grace-period kthread upon
828 // exit from that critical section.
829 WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
830 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
831
832reset_ipi:
833 // Allow future IPIs to be sent on CPU and for task.
834 // Also order this IPI handler against any later manipulations of
835 // the intended task.
836 smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
837 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
838}
839
840/* Callback function for scheduler to check locked-down task. */
841static bool trc_inspect_reader(struct task_struct *t, void *arg)
842{
843 int cpu = task_cpu(t);
844 bool in_qs = false;
845 bool ofl = cpu_is_offline(cpu);
846
847 if (task_curr(t)) {
848 WARN_ON_ONCE(ofl && !is_idle_task(t));
849
850 // If no chance of heavyweight readers, do it the hard way.
851 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
852 return false;
853
854 // If heavyweight readers are enabled on the remote task,
855 // we can inspect its state despite its currently running.
856 // However, we cannot safely change its state.
857 n_heavy_reader_attempts++;
858 if (!ofl && // Check for "running" idle tasks on offline CPUs.
859 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
860 return false; // No quiescent state, do it the hard way.
861 n_heavy_reader_updates++;
862 if (ofl)
863 n_heavy_reader_ofl_updates++;
864 in_qs = true;
865 } else {
866 in_qs = likely(!t->trc_reader_nesting);
867 }
868
869 // Mark as checked. Because this is called from the grace-period
870 // kthread, also remove the task from the holdout list.
871 t->trc_reader_checked = true;
872 trc_del_holdout(t);
873
874 if (in_qs)
875 return true; // Already in quiescent state, done!!!
876
877 // The task is in a read-side critical section, so set up its
878 // state so that it will awaken the grace-period kthread upon exit
879 // from that critical section.
880 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
881 WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
882 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
883 return true;
884}
885
886/* Attempt to extract the state for the specified task. */
887static void trc_wait_for_one_reader(struct task_struct *t,
888 struct list_head *bhp)
889{
890 int cpu;
891
892 // If a previous IPI is still in flight, let it complete.
893 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
894 return;
895
896 // The current task had better be in a quiescent state.
897 if (t == current) {
898 t->trc_reader_checked = true;
899 trc_del_holdout(t);
900 WARN_ON_ONCE(t->trc_reader_nesting);
901 return;
902 }
903
904 // Attempt to nail down the task for inspection.
905 get_task_struct(t);
906 if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
907 put_task_struct(t);
908 return;
909 }
910 put_task_struct(t);
911
912 // If currently running, send an IPI, either way, add to list.
913 trc_add_holdout(t, bhp);
914 if (task_curr(t) && time_after(jiffies, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
915 // The task is currently running, so try IPIing it.
916 cpu = task_cpu(t);
917
918 // If there is already an IPI outstanding, let it happen.
919 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
920 return;
921
922 atomic_inc(&trc_n_readers_need_end);
923 per_cpu(trc_ipi_to_cpu, cpu) = true;
924 t->trc_ipi_to_cpu = cpu;
925 rcu_tasks_trace.n_ipis++;
926 if (smp_call_function_single(cpu,
927 trc_read_check_handler, t, 0)) {
928 // Just in case there is some other reason for
929 // failure than the target CPU being offline.
930 rcu_tasks_trace.n_ipis_fails++;
931 per_cpu(trc_ipi_to_cpu, cpu) = false;
932 t->trc_ipi_to_cpu = cpu;
933 if (atomic_dec_and_test(&trc_n_readers_need_end)) {
934 WARN_ON_ONCE(1);
935 wake_up(&trc_wait);
936 }
937 }
938 }
939}
940
941/* Initialize for a new RCU-tasks-trace grace period. */
942static void rcu_tasks_trace_pregp_step(void)
943{
944 int cpu;
945
946 // Allow for fast-acting IPIs.
947 atomic_set(&trc_n_readers_need_end, 1);
948
949 // There shouldn't be any old IPIs, but...
950 for_each_possible_cpu(cpu)
951 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
952
953 // Disable CPU hotplug across the tasklist scan.
954 // This also waits for all readers in CPU-hotplug code paths.
955 cpus_read_lock();
956}
957
958/* Do first-round processing for the specified task. */
959static void rcu_tasks_trace_pertask(struct task_struct *t,
960 struct list_head *hop)
961{
962 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
963 WRITE_ONCE(t->trc_reader_checked, false);
964 t->trc_ipi_to_cpu = -1;
965 trc_wait_for_one_reader(t, hop);
966}
967
968/*
969 * Do intermediate processing between task and holdout scans and
970 * pick up the idle tasks.
971 */
972static void rcu_tasks_trace_postscan(struct list_head *hop)
973{
974 int cpu;
975
976 for_each_possible_cpu(cpu)
977 rcu_tasks_trace_pertask(idle_task(cpu), hop);
978
979 // Re-enable CPU hotplug now that the tasklist scan has completed.
980 cpus_read_unlock();
981
982 // Wait for late-stage exiting tasks to finish exiting.
983 // These might have passed the call to exit_tasks_rcu_finish().
984 synchronize_rcu();
985 // Any tasks that exit after this point will set ->trc_reader_checked.
986}
987
988/* Show the state of a task stalling the current RCU tasks trace GP. */
989static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
990{
991 int cpu;
992
993 if (*firstreport) {
994 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
995 *firstreport = false;
996 }
997 // FIXME: This should attempt to use try_invoke_on_nonrunning_task().
998 cpu = task_cpu(t);
999 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1000 t->pid,
1001 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
1002 ".i"[is_idle_task(t)],
1003 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
1004 t->trc_reader_nesting,
1005 " N"[!!t->trc_reader_special.b.need_qs],
1006 cpu);
1007 sched_show_task(t);
1008}
1009
1010/* List stalled IPIs for RCU tasks trace. */
1011static void show_stalled_ipi_trace(void)
1012{
1013 int cpu;
1014
1015 for_each_possible_cpu(cpu)
1016 if (per_cpu(trc_ipi_to_cpu, cpu))
1017 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1018}
1019
1020/* Do one scan of the holdout list. */
1021static void check_all_holdout_tasks_trace(struct list_head *hop,
1022 bool needreport, bool *firstreport)
1023{
1024 struct task_struct *g, *t;
1025
1026 // Disable CPU hotplug across the holdout list scan.
1027 cpus_read_lock();
1028
1029 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1030 // If safe and needed, try to check the current task.
1031 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1032 !READ_ONCE(t->trc_reader_checked))
1033 trc_wait_for_one_reader(t, hop);
1034
1035 // If check succeeded, remove this task from the list.
1036 if (READ_ONCE(t->trc_reader_checked))
1037 trc_del_holdout(t);
1038 else if (needreport)
1039 show_stalled_task_trace(t, firstreport);
1040 }
1041
1042 // Re-enable CPU hotplug now that the holdout list scan has completed.
1043 cpus_read_unlock();
1044
1045 if (needreport) {
1046 if (firstreport)
1047 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1048 show_stalled_ipi_trace();
1049 }
1050}
1051
1052/* Wait for grace period to complete and provide ordering. */
1053static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1054{
1055 bool firstreport;
1056 struct task_struct *g, *t;
1057 LIST_HEAD(holdouts);
1058 long ret;
1059
1060 // Remove the safety count.
1061 smp_mb__before_atomic(); // Order vs. earlier atomics
1062 atomic_dec(&trc_n_readers_need_end);
1063 smp_mb__after_atomic(); // Order vs. later atomics
1064
1065 // Wait for readers.
1066 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1067 for (;;) {
1068 ret = wait_event_idle_exclusive_timeout(
1069 trc_wait,
1070 atomic_read(&trc_n_readers_need_end) == 0,
1071 READ_ONCE(rcu_task_stall_timeout));
1072 if (ret)
1073 break; // Count reached zero.
1074 // Stall warning time, so make a list of the offenders.
1075 for_each_process_thread(g, t)
1076 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1077 trc_add_holdout(t, &holdouts);
1078 firstreport = true;
1079 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list)
1080 if (READ_ONCE(t->trc_reader_special.b.need_qs)) {
1081 show_stalled_task_trace(t, &firstreport);
1082 trc_del_holdout(t);
1083 }
1084 if (firstreport)
1085 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1086 show_stalled_ipi_trace();
1087 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1088 }
1089 smp_mb(); // Caller's code must be ordered after wakeup.
1090 // Pairs with pretty much every ordering primitive.
1091}
1092
1093/* Report any needed quiescent state for this exiting task. */
1094static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1095{
1096 WRITE_ONCE(t->trc_reader_checked, true);
1097 WARN_ON_ONCE(t->trc_reader_nesting);
1098 WRITE_ONCE(t->trc_reader_nesting, 0);
1099 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1100 rcu_read_unlock_trace_special(t, 0);
1101}
1102
1103/**
1104 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1105 * @rhp: structure to be used for queueing the RCU updates.
1106 * @func: actual callback function to be invoked after the grace period
1107 *
1108 * The callback function will be invoked some time after a full grace
1109 * period elapses, in other words after all currently executing RCU
1110 * read-side critical sections have completed. call_rcu_tasks_trace()
1111 * assumes that the read-side critical sections end at context switch,
1112 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
1113 * there are no read-side primitives analogous to rcu_read_lock() and
1114 * rcu_read_unlock() because this primitive is intended to determine
1115 * that all tasks have passed through a safe state, not so much for
1116 * data-strcuture synchronization.
1117 *
1118 * See the description of call_rcu() for more detailed information on
1119 * memory ordering guarantees.
1120 */
1121void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1122{
1123 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1124}
1125EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1126
1127/**
1128 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1129 *
1130 * Control will return to the caller some time after a trace rcu-tasks
1131 * grace period has elapsed, in other words after all currently executing
1132 * rcu-tasks read-side critical sections have elapsed. These read-side
1133 * critical sections are delimited by calls to rcu_read_lock_trace()
1134 * and rcu_read_unlock_trace().
1135 *
1136 * This is a very specialized primitive, intended only for a few uses in
1137 * tracing and other situations requiring manipulation of function preambles
1138 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1139 * (yet) intended for heavy use from multiple CPUs.
1140 *
1141 * See the description of synchronize_rcu() for more detailed information
1142 * on memory ordering guarantees.
1143 */
1144void synchronize_rcu_tasks_trace(void)
1145{
1146 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1147 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1148}
1149EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1150
1151/**
1152 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1153 *
1154 * Although the current implementation is guaranteed to wait, it is not
1155 * obligated to, for example, if there are no pending callbacks.
1156 */
1157void rcu_barrier_tasks_trace(void)
1158{
1159 /* There is only one callback queue, so this is easy. ;-) */
1160 synchronize_rcu_tasks_trace();
1161}
1162EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1163
1164static int __init rcu_spawn_tasks_trace_kthread(void)
1165{
1166 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1167 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1168 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1169 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1170 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1171 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1172 return 0;
1173}
1174core_initcall(rcu_spawn_tasks_trace_kthread);
1175
1176#ifndef CONFIG_TINY_RCU
1177static void show_rcu_tasks_trace_gp_kthread(void)
1178{
1179 char buf[64];
1180
1181 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1182 data_race(n_heavy_reader_ofl_updates),
1183 data_race(n_heavy_reader_updates),
1184 data_race(n_heavy_reader_attempts));
1185 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1186}
1187#endif /* #ifndef CONFIG_TINY_RCU */
1188
1189#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1190static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1191static inline void show_rcu_tasks_trace_gp_kthread(void) {}
1192#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1193
1194#ifndef CONFIG_TINY_RCU
1195void show_rcu_tasks_gp_kthreads(void)
1196{
1197 show_rcu_tasks_classic_gp_kthread();
1198 show_rcu_tasks_rude_gp_kthread();
1199 show_rcu_tasks_trace_gp_kthread();
1200}
1201#endif /* #ifndef CONFIG_TINY_RCU */
1202
1203#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1204static inline void rcu_tasks_bootup_oddness(void) {}
1205void show_rcu_tasks_gp_kthreads(void) {}
1206#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8#ifdef CONFIG_TASKS_RCU_GENERIC
9
10////////////////////////////////////////////////////////////////////////
11//
12// Generic data structures.
13
14struct rcu_tasks;
15typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
16typedef void (*pregp_func_t)(void);
17typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
18typedef void (*postscan_func_t)(struct list_head *hop);
19typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
20typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
21
22/**
23 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
24 * @cbs_head: Head of callback list.
25 * @cbs_tail: Tail pointer for callback list.
26 * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
27 * @cbs_lock: Lock protecting callback list.
28 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
29 * @gp_func: This flavor's grace-period-wait function.
30 * @gp_state: Grace period's most recent state transition (debugging).
31 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
32 * @init_fract: Initial backoff sleep interval.
33 * @gp_jiffies: Time of last @gp_state transition.
34 * @gp_start: Most recent grace-period start in jiffies.
35 * @n_gps: Number of grace periods completed since boot.
36 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
37 * @n_ipis_fails: Number of IPI-send failures.
38 * @pregp_func: This flavor's pre-grace-period function (optional).
39 * @pertask_func: This flavor's per-task scan function (optional).
40 * @postscan_func: This flavor's post-task scan function (optional).
41 * @holdouts_func: This flavor's holdout-list scan function (optional).
42 * @postgp_func: This flavor's post-grace-period function (optional).
43 * @call_func: This flavor's call_rcu()-equivalent function.
44 * @name: This flavor's textual name.
45 * @kname: This flavor's kthread name.
46 */
47struct rcu_tasks {
48 struct rcu_head *cbs_head;
49 struct rcu_head **cbs_tail;
50 struct wait_queue_head cbs_wq;
51 raw_spinlock_t cbs_lock;
52 int gp_state;
53 int gp_sleep;
54 int init_fract;
55 unsigned long gp_jiffies;
56 unsigned long gp_start;
57 unsigned long n_gps;
58 unsigned long n_ipis;
59 unsigned long n_ipis_fails;
60 struct task_struct *kthread_ptr;
61 rcu_tasks_gp_func_t gp_func;
62 pregp_func_t pregp_func;
63 pertask_func_t pertask_func;
64 postscan_func_t postscan_func;
65 holdouts_func_t holdouts_func;
66 postgp_func_t postgp_func;
67 call_rcu_func_t call_func;
68 char *name;
69 char *kname;
70};
71
72#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
73static struct rcu_tasks rt_name = \
74{ \
75 .cbs_tail = &rt_name.cbs_head, \
76 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
77 .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \
78 .gp_func = gp, \
79 .call_func = call, \
80 .name = n, \
81 .kname = #rt_name, \
82}
83
84/* Track exiting tasks in order to allow them to be waited for. */
85DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
86
87/* Avoid IPIing CPUs early in the grace period. */
88#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
89static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
90module_param(rcu_task_ipi_delay, int, 0644);
91
92/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
93#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
94static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
95module_param(rcu_task_stall_timeout, int, 0644);
96
97/* RCU tasks grace-period state for debugging. */
98#define RTGS_INIT 0
99#define RTGS_WAIT_WAIT_CBS 1
100#define RTGS_WAIT_GP 2
101#define RTGS_PRE_WAIT_GP 3
102#define RTGS_SCAN_TASKLIST 4
103#define RTGS_POST_SCAN_TASKLIST 5
104#define RTGS_WAIT_SCAN_HOLDOUTS 6
105#define RTGS_SCAN_HOLDOUTS 7
106#define RTGS_POST_GP 8
107#define RTGS_WAIT_READERS 9
108#define RTGS_INVOKE_CBS 10
109#define RTGS_WAIT_CBS 11
110#ifndef CONFIG_TINY_RCU
111static const char * const rcu_tasks_gp_state_names[] = {
112 "RTGS_INIT",
113 "RTGS_WAIT_WAIT_CBS",
114 "RTGS_WAIT_GP",
115 "RTGS_PRE_WAIT_GP",
116 "RTGS_SCAN_TASKLIST",
117 "RTGS_POST_SCAN_TASKLIST",
118 "RTGS_WAIT_SCAN_HOLDOUTS",
119 "RTGS_SCAN_HOLDOUTS",
120 "RTGS_POST_GP",
121 "RTGS_WAIT_READERS",
122 "RTGS_INVOKE_CBS",
123 "RTGS_WAIT_CBS",
124};
125#endif /* #ifndef CONFIG_TINY_RCU */
126
127////////////////////////////////////////////////////////////////////////
128//
129// Generic code.
130
131/* Record grace-period phase and time. */
132static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
133{
134 rtp->gp_state = newstate;
135 rtp->gp_jiffies = jiffies;
136}
137
138#ifndef CONFIG_TINY_RCU
139/* Return state name. */
140static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
141{
142 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
143 int j = READ_ONCE(i); // Prevent the compiler from reading twice
144
145 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
146 return "???";
147 return rcu_tasks_gp_state_names[j];
148}
149#endif /* #ifndef CONFIG_TINY_RCU */
150
151// Enqueue a callback for the specified flavor of Tasks RCU.
152static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
153 struct rcu_tasks *rtp)
154{
155 unsigned long flags;
156 bool needwake;
157
158 rhp->next = NULL;
159 rhp->func = func;
160 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
161 needwake = !rtp->cbs_head;
162 WRITE_ONCE(*rtp->cbs_tail, rhp);
163 rtp->cbs_tail = &rhp->next;
164 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
165 /* We can't create the thread unless interrupts are enabled. */
166 if (needwake && READ_ONCE(rtp->kthread_ptr))
167 wake_up(&rtp->cbs_wq);
168}
169
170// Wait for a grace period for the specified flavor of Tasks RCU.
171static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
172{
173 /* Complain if the scheduler has not started. */
174 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
175 "synchronize_rcu_tasks called too soon");
176
177 /* Wait for the grace period. */
178 wait_rcu_gp(rtp->call_func);
179}
180
181/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
182static int __noreturn rcu_tasks_kthread(void *arg)
183{
184 unsigned long flags;
185 struct rcu_head *list;
186 struct rcu_head *next;
187 struct rcu_tasks *rtp = arg;
188
189 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
190 housekeeping_affine(current, HK_FLAG_RCU);
191 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
192
193 /*
194 * Each pass through the following loop makes one check for
195 * newly arrived callbacks, and, if there are some, waits for
196 * one RCU-tasks grace period and then invokes the callbacks.
197 * This loop is terminated by the system going down. ;-)
198 */
199 for (;;) {
200
201 /* Pick up any new callbacks. */
202 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
203 smp_mb__after_spinlock(); // Order updates vs. GP.
204 list = rtp->cbs_head;
205 rtp->cbs_head = NULL;
206 rtp->cbs_tail = &rtp->cbs_head;
207 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
208
209 /* If there were none, wait a bit and start over. */
210 if (!list) {
211 wait_event_interruptible(rtp->cbs_wq,
212 READ_ONCE(rtp->cbs_head));
213 if (!rtp->cbs_head) {
214 WARN_ON(signal_pending(current));
215 set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
216 schedule_timeout_idle(HZ/10);
217 }
218 continue;
219 }
220
221 // Wait for one grace period.
222 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
223 rtp->gp_start = jiffies;
224 rtp->gp_func(rtp);
225 rtp->n_gps++;
226
227 /* Invoke the callbacks. */
228 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
229 while (list) {
230 next = list->next;
231 local_bh_disable();
232 list->func(list);
233 local_bh_enable();
234 list = next;
235 cond_resched();
236 }
237 /* Paranoid sleep to keep this from entering a tight loop */
238 schedule_timeout_idle(rtp->gp_sleep);
239
240 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
241 }
242}
243
244/* Spawn RCU-tasks grace-period kthread. */
245static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
246{
247 struct task_struct *t;
248
249 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
250 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
251 return;
252 smp_mb(); /* Ensure others see full kthread. */
253}
254
255#ifndef CONFIG_TINY_RCU
256
257/*
258 * Print any non-default Tasks RCU settings.
259 */
260static void __init rcu_tasks_bootup_oddness(void)
261{
262#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
263 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
264 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
265#endif /* #ifdef CONFIG_TASKS_RCU */
266#ifdef CONFIG_TASKS_RCU
267 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
268#endif /* #ifdef CONFIG_TASKS_RCU */
269#ifdef CONFIG_TASKS_RUDE_RCU
270 pr_info("\tRude variant of Tasks RCU enabled.\n");
271#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
272#ifdef CONFIG_TASKS_TRACE_RCU
273 pr_info("\tTracing variant of Tasks RCU enabled.\n");
274#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
275}
276
277#endif /* #ifndef CONFIG_TINY_RCU */
278
279#ifndef CONFIG_TINY_RCU
280/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
281static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
282{
283 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
284 rtp->kname,
285 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
286 jiffies - data_race(rtp->gp_jiffies),
287 data_race(rtp->n_gps),
288 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
289 ".k"[!!data_race(rtp->kthread_ptr)],
290 ".C"[!!data_race(rtp->cbs_head)],
291 s);
292}
293#endif // #ifndef CONFIG_TINY_RCU
294
295static void exit_tasks_rcu_finish_trace(struct task_struct *t);
296
297#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
298
299////////////////////////////////////////////////////////////////////////
300//
301// Shared code between task-list-scanning variants of Tasks RCU.
302
303/* Wait for one RCU-tasks grace period. */
304static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
305{
306 struct task_struct *g, *t;
307 unsigned long lastreport;
308 LIST_HEAD(holdouts);
309 int fract;
310
311 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
312 rtp->pregp_func();
313
314 /*
315 * There were callbacks, so we need to wait for an RCU-tasks
316 * grace period. Start off by scanning the task list for tasks
317 * that are not already voluntarily blocked. Mark these tasks
318 * and make a list of them in holdouts.
319 */
320 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
321 rcu_read_lock();
322 for_each_process_thread(g, t)
323 rtp->pertask_func(t, &holdouts);
324 rcu_read_unlock();
325
326 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
327 rtp->postscan_func(&holdouts);
328
329 /*
330 * Each pass through the following loop scans the list of holdout
331 * tasks, removing any that are no longer holdouts. When the list
332 * is empty, we are done.
333 */
334 lastreport = jiffies;
335
336 // Start off with initial wait and slowly back off to 1 HZ wait.
337 fract = rtp->init_fract;
338
339 while (!list_empty(&holdouts)) {
340 bool firstreport;
341 bool needreport;
342 int rtst;
343
344 /* Slowly back off waiting for holdouts */
345 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
346 schedule_timeout_idle(fract);
347
348 if (fract < HZ)
349 fract++;
350
351 rtst = READ_ONCE(rcu_task_stall_timeout);
352 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
353 if (needreport)
354 lastreport = jiffies;
355 firstreport = true;
356 WARN_ON(signal_pending(current));
357 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
358 rtp->holdouts_func(&holdouts, needreport, &firstreport);
359 }
360
361 set_tasks_gp_state(rtp, RTGS_POST_GP);
362 rtp->postgp_func(rtp);
363}
364
365#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
366
367#ifdef CONFIG_TASKS_RCU
368
369////////////////////////////////////////////////////////////////////////
370//
371// Simple variant of RCU whose quiescent states are voluntary context
372// switch, cond_resched_rcu_qs(), user-space execution, and idle.
373// As such, grace periods can take one good long time. There are no
374// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
375// because this implementation is intended to get the system into a safe
376// state for some of the manipulations involved in tracing and the like.
377// Finally, this implementation does not support high call_rcu_tasks()
378// rates from multiple CPUs. If this is required, per-CPU callback lists
379// will be needed.
380//
381// The implementation uses rcu_tasks_wait_gp(), which relies on function
382// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
383// function sets these function pointers up so that rcu_tasks_wait_gp()
384// invokes these functions in this order:
385//
386// rcu_tasks_pregp_step():
387// Invokes synchronize_rcu() in order to wait for all in-flight
388// t->on_rq and t->nvcsw transitions to complete. This works because
389// all such transitions are carried out with interrupts disabled.
390// rcu_tasks_pertask(), invoked on every non-idle task:
391// For every runnable non-idle task other than the current one, use
392// get_task_struct() to pin down that task, snapshot that task's
393// number of voluntary context switches, and add that task to the
394// holdout list.
395// rcu_tasks_postscan():
396// Invoke synchronize_srcu() to ensure that all tasks that were
397// in the process of exiting (and which thus might not know to
398// synchronize with this RCU Tasks grace period) have completed
399// exiting.
400// check_all_holdout_tasks(), repeatedly until holdout list is empty:
401// Scans the holdout list, attempting to identify a quiescent state
402// for each task on the list. If there is a quiescent state, the
403// corresponding task is removed from the holdout list.
404// rcu_tasks_postgp():
405// Invokes synchronize_rcu() in order to ensure that all prior
406// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
407// to have happened before the end of this RCU Tasks grace period.
408// Again, this works because all such transitions are carried out
409// with interrupts disabled.
410//
411// For each exiting task, the exit_tasks_rcu_start() and
412// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
413// read-side critical sections waited for by rcu_tasks_postscan().
414//
415// Pre-grace-period update-side code is ordered before the grace via the
416// ->cbs_lock and the smp_mb__after_spinlock(). Pre-grace-period read-side
417// code is ordered before the grace period via synchronize_rcu() call
418// in rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
419// disabling.
420
421/* Pre-grace-period preparation. */
422static void rcu_tasks_pregp_step(void)
423{
424 /*
425 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
426 * to complete. Invoking synchronize_rcu() suffices because all
427 * these transitions occur with interrupts disabled. Without this
428 * synchronize_rcu(), a read-side critical section that started
429 * before the grace period might be incorrectly seen as having
430 * started after the grace period.
431 *
432 * This synchronize_rcu() also dispenses with the need for a
433 * memory barrier on the first store to t->rcu_tasks_holdout,
434 * as it forces the store to happen after the beginning of the
435 * grace period.
436 */
437 synchronize_rcu();
438}
439
440/* Per-task initial processing. */
441static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
442{
443 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
444 get_task_struct(t);
445 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
446 WRITE_ONCE(t->rcu_tasks_holdout, true);
447 list_add(&t->rcu_tasks_holdout_list, hop);
448 }
449}
450
451/* Processing between scanning taskslist and draining the holdout list. */
452static void rcu_tasks_postscan(struct list_head *hop)
453{
454 /*
455 * Wait for tasks that are in the process of exiting. This
456 * does only part of the job, ensuring that all tasks that were
457 * previously exiting reach the point where they have disabled
458 * preemption, allowing the later synchronize_rcu() to finish
459 * the job.
460 */
461 synchronize_srcu(&tasks_rcu_exit_srcu);
462}
463
464/* See if tasks are still holding out, complain if so. */
465static void check_holdout_task(struct task_struct *t,
466 bool needreport, bool *firstreport)
467{
468 int cpu;
469
470 if (!READ_ONCE(t->rcu_tasks_holdout) ||
471 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
472 !READ_ONCE(t->on_rq) ||
473 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
474 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
475 WRITE_ONCE(t->rcu_tasks_holdout, false);
476 list_del_init(&t->rcu_tasks_holdout_list);
477 put_task_struct(t);
478 return;
479 }
480 rcu_request_urgent_qs_task(t);
481 if (!needreport)
482 return;
483 if (*firstreport) {
484 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
485 *firstreport = false;
486 }
487 cpu = task_cpu(t);
488 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
489 t, ".I"[is_idle_task(t)],
490 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
491 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
492 t->rcu_tasks_idle_cpu, cpu);
493 sched_show_task(t);
494}
495
496/* Scan the holdout lists for tasks no longer holding out. */
497static void check_all_holdout_tasks(struct list_head *hop,
498 bool needreport, bool *firstreport)
499{
500 struct task_struct *t, *t1;
501
502 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
503 check_holdout_task(t, needreport, firstreport);
504 cond_resched();
505 }
506}
507
508/* Finish off the Tasks-RCU grace period. */
509static void rcu_tasks_postgp(struct rcu_tasks *rtp)
510{
511 /*
512 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
513 * memory barriers prior to them in the schedule() path, memory
514 * reordering on other CPUs could cause their RCU-tasks read-side
515 * critical sections to extend past the end of the grace period.
516 * However, because these ->nvcsw updates are carried out with
517 * interrupts disabled, we can use synchronize_rcu() to force the
518 * needed ordering on all such CPUs.
519 *
520 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
521 * accesses to be within the grace period, avoiding the need for
522 * memory barriers for ->rcu_tasks_holdout accesses.
523 *
524 * In addition, this synchronize_rcu() waits for exiting tasks
525 * to complete their final preempt_disable() region of execution,
526 * cleaning up after the synchronize_srcu() above.
527 */
528 synchronize_rcu();
529}
530
531void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
532DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
533
534/**
535 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
536 * @rhp: structure to be used for queueing the RCU updates.
537 * @func: actual callback function to be invoked after the grace period
538 *
539 * The callback function will be invoked some time after a full grace
540 * period elapses, in other words after all currently executing RCU
541 * read-side critical sections have completed. call_rcu_tasks() assumes
542 * that the read-side critical sections end at a voluntary context
543 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
544 * or transition to usermode execution. As such, there are no read-side
545 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
546 * this primitive is intended to determine that all tasks have passed
547 * through a safe state, not so much for data-structure synchronization.
548 *
549 * See the description of call_rcu() for more detailed information on
550 * memory ordering guarantees.
551 */
552void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
553{
554 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
555}
556EXPORT_SYMBOL_GPL(call_rcu_tasks);
557
558/**
559 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
560 *
561 * Control will return to the caller some time after a full rcu-tasks
562 * grace period has elapsed, in other words after all currently
563 * executing rcu-tasks read-side critical sections have elapsed. These
564 * read-side critical sections are delimited by calls to schedule(),
565 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
566 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
567 *
568 * This is a very specialized primitive, intended only for a few uses in
569 * tracing and other situations requiring manipulation of function
570 * preambles and profiling hooks. The synchronize_rcu_tasks() function
571 * is not (yet) intended for heavy use from multiple CPUs.
572 *
573 * See the description of synchronize_rcu() for more detailed information
574 * on memory ordering guarantees.
575 */
576void synchronize_rcu_tasks(void)
577{
578 synchronize_rcu_tasks_generic(&rcu_tasks);
579}
580EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
581
582/**
583 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
584 *
585 * Although the current implementation is guaranteed to wait, it is not
586 * obligated to, for example, if there are no pending callbacks.
587 */
588void rcu_barrier_tasks(void)
589{
590 /* There is only one callback queue, so this is easy. ;-) */
591 synchronize_rcu_tasks();
592}
593EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
594
595static int __init rcu_spawn_tasks_kthread(void)
596{
597 rcu_tasks.gp_sleep = HZ / 10;
598 rcu_tasks.init_fract = HZ / 10;
599 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
600 rcu_tasks.pertask_func = rcu_tasks_pertask;
601 rcu_tasks.postscan_func = rcu_tasks_postscan;
602 rcu_tasks.holdouts_func = check_all_holdout_tasks;
603 rcu_tasks.postgp_func = rcu_tasks_postgp;
604 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
605 return 0;
606}
607
608#if !defined(CONFIG_TINY_RCU)
609void show_rcu_tasks_classic_gp_kthread(void)
610{
611 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
612}
613EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
614#endif // !defined(CONFIG_TINY_RCU)
615
616/* Do the srcu_read_lock() for the above synchronize_srcu(). */
617void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
618{
619 preempt_disable();
620 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
621 preempt_enable();
622}
623
624/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
625void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
626{
627 struct task_struct *t = current;
628
629 preempt_disable();
630 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
631 preempt_enable();
632 exit_tasks_rcu_finish_trace(t);
633}
634
635#else /* #ifdef CONFIG_TASKS_RCU */
636void exit_tasks_rcu_start(void) { }
637void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
638#endif /* #else #ifdef CONFIG_TASKS_RCU */
639
640#ifdef CONFIG_TASKS_RUDE_RCU
641
642////////////////////////////////////////////////////////////////////////
643//
644// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
645// passing an empty function to schedule_on_each_cpu(). This approach
646// provides an asynchronous call_rcu_tasks_rude() API and batching
647// of concurrent calls to the synchronous synchronize_rcu_rude() API.
648// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
649// and induces otherwise unnecessary context switches on all online CPUs,
650// whether idle or not.
651//
652// Callback handling is provided by the rcu_tasks_kthread() function.
653//
654// Ordering is provided by the scheduler's context-switch code.
655
656// Empty function to allow workqueues to force a context switch.
657static void rcu_tasks_be_rude(struct work_struct *work)
658{
659}
660
661// Wait for one rude RCU-tasks grace period.
662static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
663{
664 rtp->n_ipis += cpumask_weight(cpu_online_mask);
665 schedule_on_each_cpu(rcu_tasks_be_rude);
666}
667
668void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
669DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
670 "RCU Tasks Rude");
671
672/**
673 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
674 * @rhp: structure to be used for queueing the RCU updates.
675 * @func: actual callback function to be invoked after the grace period
676 *
677 * The callback function will be invoked some time after a full grace
678 * period elapses, in other words after all currently executing RCU
679 * read-side critical sections have completed. call_rcu_tasks_rude()
680 * assumes that the read-side critical sections end at context switch,
681 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
682 * there are no read-side primitives analogous to rcu_read_lock() and
683 * rcu_read_unlock() because this primitive is intended to determine
684 * that all tasks have passed through a safe state, not so much for
685 * data-structure synchronization.
686 *
687 * See the description of call_rcu() for more detailed information on
688 * memory ordering guarantees.
689 */
690void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
691{
692 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
693}
694EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
695
696/**
697 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
698 *
699 * Control will return to the caller some time after a rude rcu-tasks
700 * grace period has elapsed, in other words after all currently
701 * executing rcu-tasks read-side critical sections have elapsed. These
702 * read-side critical sections are delimited by calls to schedule(),
703 * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
704 * anyway) cond_resched().
705 *
706 * This is a very specialized primitive, intended only for a few uses in
707 * tracing and other situations requiring manipulation of function preambles
708 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
709 * (yet) intended for heavy use from multiple CPUs.
710 *
711 * See the description of synchronize_rcu() for more detailed information
712 * on memory ordering guarantees.
713 */
714void synchronize_rcu_tasks_rude(void)
715{
716 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
717}
718EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
719
720/**
721 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
722 *
723 * Although the current implementation is guaranteed to wait, it is not
724 * obligated to, for example, if there are no pending callbacks.
725 */
726void rcu_barrier_tasks_rude(void)
727{
728 /* There is only one callback queue, so this is easy. ;-) */
729 synchronize_rcu_tasks_rude();
730}
731EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
732
733static int __init rcu_spawn_tasks_rude_kthread(void)
734{
735 rcu_tasks_rude.gp_sleep = HZ / 10;
736 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
737 return 0;
738}
739
740#if !defined(CONFIG_TINY_RCU)
741void show_rcu_tasks_rude_gp_kthread(void)
742{
743 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
744}
745EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
746#endif // !defined(CONFIG_TINY_RCU)
747#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
748
749////////////////////////////////////////////////////////////////////////
750//
751// Tracing variant of Tasks RCU. This variant is designed to be used
752// to protect tracing hooks, including those of BPF. This variant
753// therefore:
754//
755// 1. Has explicit read-side markers to allow finite grace periods
756// in the face of in-kernel loops for PREEMPT=n builds.
757//
758// 2. Protects code in the idle loop, exception entry/exit, and
759// CPU-hotplug code paths, similar to the capabilities of SRCU.
760//
761// 3. Avoids expensive read-side instruction, having overhead similar
762// to that of Preemptible RCU.
763//
764// There are of course downsides. The grace-period code can send IPIs to
765// CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
766// It is necessary to scan the full tasklist, much as for Tasks RCU. There
767// is a single callback queue guarded by a single lock, again, much as for
768// Tasks RCU. If needed, these downsides can be at least partially remedied.
769//
770// Perhaps most important, this variant of RCU does not affect the vanilla
771// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
772// readers can operate from idle, offline, and exception entry/exit in no
773// way allows rcu_preempt and rcu_sched readers to also do so.
774//
775// The implementation uses rcu_tasks_wait_gp(), which relies on function
776// pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
777// function sets these function pointers up so that rcu_tasks_wait_gp()
778// invokes these functions in this order:
779//
780// rcu_tasks_trace_pregp_step():
781// Initialize the count of readers and block CPU-hotplug operations.
782// rcu_tasks_trace_pertask(), invoked on every non-idle task:
783// Initialize per-task state and attempt to identify an immediate
784// quiescent state for that task, or, failing that, attempt to
785// set that task's .need_qs flag so that task's next outermost
786// rcu_read_unlock_trace() will report the quiescent state (in which
787// case the count of readers is incremented). If both attempts fail,
788// the task is added to a "holdout" list.
789// rcu_tasks_trace_postscan():
790// Initialize state and attempt to identify an immediate quiescent
791// state as above (but only for idle tasks), unblock CPU-hotplug
792// operations, and wait for an RCU grace period to avoid races with
793// tasks that are in the process of exiting.
794// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
795// Scans the holdout list, attempting to identify a quiescent state
796// for each task on the list. If there is a quiescent state, the
797// corresponding task is removed from the holdout list.
798// rcu_tasks_trace_postgp():
799// Wait for the count of readers do drop to zero, reporting any stalls.
800// Also execute full memory barriers to maintain ordering with code
801// executing after the grace period.
802//
803// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
804//
805// Pre-grace-period update-side code is ordered before the grace
806// period via the ->cbs_lock and barriers in rcu_tasks_kthread().
807// Pre-grace-period read-side code is ordered before the grace period by
808// atomic_dec_and_test() of the count of readers (for IPIed readers) and by
809// scheduler context-switch ordering (for locked-down non-running readers).
810
811// The lockdep state must be outside of #ifdef to be useful.
812#ifdef CONFIG_DEBUG_LOCK_ALLOC
813static struct lock_class_key rcu_lock_trace_key;
814struct lockdep_map rcu_trace_lock_map =
815 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
816EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
817#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
818
819#ifdef CONFIG_TASKS_TRACE_RCU
820
821static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
822static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
823
824// Record outstanding IPIs to each CPU. No point in sending two...
825static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
826
827// The number of detections of task quiescent state relying on
828// heavyweight readers executing explicit memory barriers.
829static unsigned long n_heavy_reader_attempts;
830static unsigned long n_heavy_reader_updates;
831static unsigned long n_heavy_reader_ofl_updates;
832
833void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
834DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
835 "RCU Tasks Trace");
836
837/*
838 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
839 * while the scheduler locks are held.
840 */
841static void rcu_read_unlock_iw(struct irq_work *iwp)
842{
843 wake_up(&trc_wait);
844}
845static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
846
847/* If we are the last reader, wake up the grace-period kthread. */
848void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
849{
850 int nq = t->trc_reader_special.b.need_qs;
851
852 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
853 t->trc_reader_special.b.need_mb)
854 smp_mb(); // Pairs with update-side barriers.
855 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
856 if (nq)
857 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
858 WRITE_ONCE(t->trc_reader_nesting, nesting);
859 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
860 irq_work_queue(&rcu_tasks_trace_iw);
861}
862EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
863
864/* Add a task to the holdout list, if it is not already on the list. */
865static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
866{
867 if (list_empty(&t->trc_holdout_list)) {
868 get_task_struct(t);
869 list_add(&t->trc_holdout_list, bhp);
870 }
871}
872
873/* Remove a task from the holdout list, if it is in fact present. */
874static void trc_del_holdout(struct task_struct *t)
875{
876 if (!list_empty(&t->trc_holdout_list)) {
877 list_del_init(&t->trc_holdout_list);
878 put_task_struct(t);
879 }
880}
881
882/* IPI handler to check task state. */
883static void trc_read_check_handler(void *t_in)
884{
885 struct task_struct *t = current;
886 struct task_struct *texp = t_in;
887
888 // If the task is no longer running on this CPU, leave.
889 if (unlikely(texp != t)) {
890 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
891 wake_up(&trc_wait);
892 goto reset_ipi; // Already on holdout list, so will check later.
893 }
894
895 // If the task is not in a read-side critical section, and
896 // if this is the last reader, awaken the grace-period kthread.
897 if (likely(!t->trc_reader_nesting)) {
898 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
899 wake_up(&trc_wait);
900 // Mark as checked after decrement to avoid false
901 // positives on the above WARN_ON_ONCE().
902 WRITE_ONCE(t->trc_reader_checked, true);
903 goto reset_ipi;
904 }
905 // If we are racing with an rcu_read_unlock_trace(), try again later.
906 if (unlikely(t->trc_reader_nesting < 0)) {
907 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
908 wake_up(&trc_wait);
909 goto reset_ipi;
910 }
911 WRITE_ONCE(t->trc_reader_checked, true);
912
913 // Get here if the task is in a read-side critical section. Set
914 // its state so that it will awaken the grace-period kthread upon
915 // exit from that critical section.
916 WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
917 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
918
919reset_ipi:
920 // Allow future IPIs to be sent on CPU and for task.
921 // Also order this IPI handler against any later manipulations of
922 // the intended task.
923 smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
924 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
925}
926
927/* Callback function for scheduler to check locked-down task. */
928static bool trc_inspect_reader(struct task_struct *t, void *arg)
929{
930 int cpu = task_cpu(t);
931 bool in_qs = false;
932 bool ofl = cpu_is_offline(cpu);
933
934 if (task_curr(t)) {
935 WARN_ON_ONCE(ofl && !is_idle_task(t));
936
937 // If no chance of heavyweight readers, do it the hard way.
938 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
939 return false;
940
941 // If heavyweight readers are enabled on the remote task,
942 // we can inspect its state despite its currently running.
943 // However, we cannot safely change its state.
944 n_heavy_reader_attempts++;
945 if (!ofl && // Check for "running" idle tasks on offline CPUs.
946 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
947 return false; // No quiescent state, do it the hard way.
948 n_heavy_reader_updates++;
949 if (ofl)
950 n_heavy_reader_ofl_updates++;
951 in_qs = true;
952 } else {
953 in_qs = likely(!t->trc_reader_nesting);
954 }
955
956 // Mark as checked so that the grace-period kthread will
957 // remove it from the holdout list.
958 t->trc_reader_checked = true;
959
960 if (in_qs)
961 return true; // Already in quiescent state, done!!!
962
963 // The task is in a read-side critical section, so set up its
964 // state so that it will awaken the grace-period kthread upon exit
965 // from that critical section.
966 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
967 WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
968 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
969 return true;
970}
971
972/* Attempt to extract the state for the specified task. */
973static void trc_wait_for_one_reader(struct task_struct *t,
974 struct list_head *bhp)
975{
976 int cpu;
977
978 // If a previous IPI is still in flight, let it complete.
979 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
980 return;
981
982 // The current task had better be in a quiescent state.
983 if (t == current) {
984 t->trc_reader_checked = true;
985 WARN_ON_ONCE(t->trc_reader_nesting);
986 return;
987 }
988
989 // Attempt to nail down the task for inspection.
990 get_task_struct(t);
991 if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
992 put_task_struct(t);
993 return;
994 }
995 put_task_struct(t);
996
997 // If currently running, send an IPI, either way, add to list.
998 trc_add_holdout(t, bhp);
999 if (task_curr(t) &&
1000 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1001 // The task is currently running, so try IPIing it.
1002 cpu = task_cpu(t);
1003
1004 // If there is already an IPI outstanding, let it happen.
1005 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1006 return;
1007
1008 atomic_inc(&trc_n_readers_need_end);
1009 per_cpu(trc_ipi_to_cpu, cpu) = true;
1010 t->trc_ipi_to_cpu = cpu;
1011 rcu_tasks_trace.n_ipis++;
1012 if (smp_call_function_single(cpu,
1013 trc_read_check_handler, t, 0)) {
1014 // Just in case there is some other reason for
1015 // failure than the target CPU being offline.
1016 rcu_tasks_trace.n_ipis_fails++;
1017 per_cpu(trc_ipi_to_cpu, cpu) = false;
1018 t->trc_ipi_to_cpu = cpu;
1019 if (atomic_dec_and_test(&trc_n_readers_need_end)) {
1020 WARN_ON_ONCE(1);
1021 wake_up(&trc_wait);
1022 }
1023 }
1024 }
1025}
1026
1027/* Initialize for a new RCU-tasks-trace grace period. */
1028static void rcu_tasks_trace_pregp_step(void)
1029{
1030 int cpu;
1031
1032 // Allow for fast-acting IPIs.
1033 atomic_set(&trc_n_readers_need_end, 1);
1034
1035 // There shouldn't be any old IPIs, but...
1036 for_each_possible_cpu(cpu)
1037 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1038
1039 // Disable CPU hotplug across the tasklist scan.
1040 // This also waits for all readers in CPU-hotplug code paths.
1041 cpus_read_lock();
1042}
1043
1044/* Do first-round processing for the specified task. */
1045static void rcu_tasks_trace_pertask(struct task_struct *t,
1046 struct list_head *hop)
1047{
1048 // During early boot when there is only the one boot CPU, there
1049 // is no idle task for the other CPUs. Just return.
1050 if (unlikely(t == NULL))
1051 return;
1052
1053 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1054 WRITE_ONCE(t->trc_reader_checked, false);
1055 t->trc_ipi_to_cpu = -1;
1056 trc_wait_for_one_reader(t, hop);
1057}
1058
1059/*
1060 * Do intermediate processing between task and holdout scans and
1061 * pick up the idle tasks.
1062 */
1063static void rcu_tasks_trace_postscan(struct list_head *hop)
1064{
1065 int cpu;
1066
1067 for_each_possible_cpu(cpu)
1068 rcu_tasks_trace_pertask(idle_task(cpu), hop);
1069
1070 // Re-enable CPU hotplug now that the tasklist scan has completed.
1071 cpus_read_unlock();
1072
1073 // Wait for late-stage exiting tasks to finish exiting.
1074 // These might have passed the call to exit_tasks_rcu_finish().
1075 synchronize_rcu();
1076 // Any tasks that exit after this point will set ->trc_reader_checked.
1077}
1078
1079/* Show the state of a task stalling the current RCU tasks trace GP. */
1080static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1081{
1082 int cpu;
1083
1084 if (*firstreport) {
1085 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1086 *firstreport = false;
1087 }
1088 // FIXME: This should attempt to use try_invoke_on_nonrunning_task().
1089 cpu = task_cpu(t);
1090 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1091 t->pid,
1092 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
1093 ".i"[is_idle_task(t)],
1094 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
1095 t->trc_reader_nesting,
1096 " N"[!!t->trc_reader_special.b.need_qs],
1097 cpu);
1098 sched_show_task(t);
1099}
1100
1101/* List stalled IPIs for RCU tasks trace. */
1102static void show_stalled_ipi_trace(void)
1103{
1104 int cpu;
1105
1106 for_each_possible_cpu(cpu)
1107 if (per_cpu(trc_ipi_to_cpu, cpu))
1108 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1109}
1110
1111/* Do one scan of the holdout list. */
1112static void check_all_holdout_tasks_trace(struct list_head *hop,
1113 bool needreport, bool *firstreport)
1114{
1115 struct task_struct *g, *t;
1116
1117 // Disable CPU hotplug across the holdout list scan.
1118 cpus_read_lock();
1119
1120 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1121 // If safe and needed, try to check the current task.
1122 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1123 !READ_ONCE(t->trc_reader_checked))
1124 trc_wait_for_one_reader(t, hop);
1125
1126 // If check succeeded, remove this task from the list.
1127 if (READ_ONCE(t->trc_reader_checked))
1128 trc_del_holdout(t);
1129 else if (needreport)
1130 show_stalled_task_trace(t, firstreport);
1131 }
1132
1133 // Re-enable CPU hotplug now that the holdout list scan has completed.
1134 cpus_read_unlock();
1135
1136 if (needreport) {
1137 if (firstreport)
1138 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1139 show_stalled_ipi_trace();
1140 }
1141}
1142
1143/* Wait for grace period to complete and provide ordering. */
1144static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1145{
1146 bool firstreport;
1147 struct task_struct *g, *t;
1148 LIST_HEAD(holdouts);
1149 long ret;
1150
1151 // Remove the safety count.
1152 smp_mb__before_atomic(); // Order vs. earlier atomics
1153 atomic_dec(&trc_n_readers_need_end);
1154 smp_mb__after_atomic(); // Order vs. later atomics
1155
1156 // Wait for readers.
1157 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1158 for (;;) {
1159 ret = wait_event_idle_exclusive_timeout(
1160 trc_wait,
1161 atomic_read(&trc_n_readers_need_end) == 0,
1162 READ_ONCE(rcu_task_stall_timeout));
1163 if (ret)
1164 break; // Count reached zero.
1165 // Stall warning time, so make a list of the offenders.
1166 rcu_read_lock();
1167 for_each_process_thread(g, t)
1168 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1169 trc_add_holdout(t, &holdouts);
1170 rcu_read_unlock();
1171 firstreport = true;
1172 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1173 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1174 show_stalled_task_trace(t, &firstreport);
1175 trc_del_holdout(t); // Release task_struct reference.
1176 }
1177 if (firstreport)
1178 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1179 show_stalled_ipi_trace();
1180 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1181 }
1182 smp_mb(); // Caller's code must be ordered after wakeup.
1183 // Pairs with pretty much every ordering primitive.
1184}
1185
1186/* Report any needed quiescent state for this exiting task. */
1187static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1188{
1189 WRITE_ONCE(t->trc_reader_checked, true);
1190 WARN_ON_ONCE(t->trc_reader_nesting);
1191 WRITE_ONCE(t->trc_reader_nesting, 0);
1192 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1193 rcu_read_unlock_trace_special(t, 0);
1194}
1195
1196/**
1197 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1198 * @rhp: structure to be used for queueing the RCU updates.
1199 * @func: actual callback function to be invoked after the grace period
1200 *
1201 * The callback function will be invoked some time after a full grace
1202 * period elapses, in other words after all currently executing RCU
1203 * read-side critical sections have completed. call_rcu_tasks_trace()
1204 * assumes that the read-side critical sections end at context switch,
1205 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
1206 * there are no read-side primitives analogous to rcu_read_lock() and
1207 * rcu_read_unlock() because this primitive is intended to determine
1208 * that all tasks have passed through a safe state, not so much for
1209 * data-structure synchronization.
1210 *
1211 * See the description of call_rcu() for more detailed information on
1212 * memory ordering guarantees.
1213 */
1214void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1215{
1216 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1217}
1218EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1219
1220/**
1221 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1222 *
1223 * Control will return to the caller some time after a trace rcu-tasks
1224 * grace period has elapsed, in other words after all currently executing
1225 * rcu-tasks read-side critical sections have elapsed. These read-side
1226 * critical sections are delimited by calls to rcu_read_lock_trace()
1227 * and rcu_read_unlock_trace().
1228 *
1229 * This is a very specialized primitive, intended only for a few uses in
1230 * tracing and other situations requiring manipulation of function preambles
1231 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1232 * (yet) intended for heavy use from multiple CPUs.
1233 *
1234 * See the description of synchronize_rcu() for more detailed information
1235 * on memory ordering guarantees.
1236 */
1237void synchronize_rcu_tasks_trace(void)
1238{
1239 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1240 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1241}
1242EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1243
1244/**
1245 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1246 *
1247 * Although the current implementation is guaranteed to wait, it is not
1248 * obligated to, for example, if there are no pending callbacks.
1249 */
1250void rcu_barrier_tasks_trace(void)
1251{
1252 /* There is only one callback queue, so this is easy. ;-) */
1253 synchronize_rcu_tasks_trace();
1254}
1255EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1256
1257static int __init rcu_spawn_tasks_trace_kthread(void)
1258{
1259 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1260 rcu_tasks_trace.gp_sleep = HZ / 10;
1261 rcu_tasks_trace.init_fract = HZ / 10;
1262 } else {
1263 rcu_tasks_trace.gp_sleep = HZ / 200;
1264 if (rcu_tasks_trace.gp_sleep <= 0)
1265 rcu_tasks_trace.gp_sleep = 1;
1266 rcu_tasks_trace.init_fract = HZ / 200;
1267 if (rcu_tasks_trace.init_fract <= 0)
1268 rcu_tasks_trace.init_fract = 1;
1269 }
1270 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1271 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1272 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1273 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1274 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1275 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1276 return 0;
1277}
1278
1279#if !defined(CONFIG_TINY_RCU)
1280void show_rcu_tasks_trace_gp_kthread(void)
1281{
1282 char buf[64];
1283
1284 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1285 data_race(n_heavy_reader_ofl_updates),
1286 data_race(n_heavy_reader_updates),
1287 data_race(n_heavy_reader_attempts));
1288 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1289}
1290EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1291#endif // !defined(CONFIG_TINY_RCU)
1292
1293#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1294static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1295#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1296
1297#ifndef CONFIG_TINY_RCU
1298void show_rcu_tasks_gp_kthreads(void)
1299{
1300 show_rcu_tasks_classic_gp_kthread();
1301 show_rcu_tasks_rude_gp_kthread();
1302 show_rcu_tasks_trace_gp_kthread();
1303}
1304#endif /* #ifndef CONFIG_TINY_RCU */
1305
1306#ifdef CONFIG_PROVE_RCU
1307struct rcu_tasks_test_desc {
1308 struct rcu_head rh;
1309 const char *name;
1310 bool notrun;
1311};
1312
1313static struct rcu_tasks_test_desc tests[] = {
1314 {
1315 .name = "call_rcu_tasks()",
1316 /* If not defined, the test is skipped. */
1317 .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1318 },
1319 {
1320 .name = "call_rcu_tasks_rude()",
1321 /* If not defined, the test is skipped. */
1322 .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1323 },
1324 {
1325 .name = "call_rcu_tasks_trace()",
1326 /* If not defined, the test is skipped. */
1327 .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1328 }
1329};
1330
1331static void test_rcu_tasks_callback(struct rcu_head *rhp)
1332{
1333 struct rcu_tasks_test_desc *rttd =
1334 container_of(rhp, struct rcu_tasks_test_desc, rh);
1335
1336 pr_info("Callback from %s invoked.\n", rttd->name);
1337
1338 rttd->notrun = true;
1339}
1340
1341static void rcu_tasks_initiate_self_tests(void)
1342{
1343 pr_info("Running RCU-tasks wait API self tests\n");
1344#ifdef CONFIG_TASKS_RCU
1345 synchronize_rcu_tasks();
1346 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1347#endif
1348
1349#ifdef CONFIG_TASKS_RUDE_RCU
1350 synchronize_rcu_tasks_rude();
1351 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1352#endif
1353
1354#ifdef CONFIG_TASKS_TRACE_RCU
1355 synchronize_rcu_tasks_trace();
1356 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1357#endif
1358}
1359
1360static int rcu_tasks_verify_self_tests(void)
1361{
1362 int ret = 0;
1363 int i;
1364
1365 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1366 if (!tests[i].notrun) { // still hanging.
1367 pr_err("%s has been failed.\n", tests[i].name);
1368 ret = -1;
1369 }
1370 }
1371
1372 if (ret)
1373 WARN_ON(1);
1374
1375 return ret;
1376}
1377late_initcall(rcu_tasks_verify_self_tests);
1378#else /* #ifdef CONFIG_PROVE_RCU */
1379static void rcu_tasks_initiate_self_tests(void) { }
1380#endif /* #else #ifdef CONFIG_PROVE_RCU */
1381
1382void __init rcu_init_tasks_generic(void)
1383{
1384#ifdef CONFIG_TASKS_RCU
1385 rcu_spawn_tasks_kthread();
1386#endif
1387
1388#ifdef CONFIG_TASKS_RUDE_RCU
1389 rcu_spawn_tasks_rude_kthread();
1390#endif
1391
1392#ifdef CONFIG_TASKS_TRACE_RCU
1393 rcu_spawn_tasks_trace_kthread();
1394#endif
1395
1396 // Run the self-tests.
1397 rcu_tasks_initiate_self_tests();
1398}
1399
1400#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1401static inline void rcu_tasks_bootup_oddness(void) {}
1402#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */